Merge origin/main into codex/provider-profile-recommendations

Preserve provider recommendation workflows while integrating Codex profile support, safer launch isolation, and updated docs/scripts from upstream main.
This commit is contained in:
Vasanthdev2004
2026-04-01 17:33:07 +05:30
21 changed files with 2141 additions and 188 deletions

View File

@@ -2,7 +2,7 @@
Use Claude Code with **any LLM** — not just Claude.
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API.
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`.
All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agents, tasks, MCP — just powered by whatever model you choose.
@@ -82,6 +82,25 @@ export OPENAI_API_KEY=sk-...
export OPENAI_MODEL=gpt-4o
```
### Codex via ChatGPT auth
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
`codexspark` maps to GPT-5.3 Codex Spark for faster loops.
If you already use the Codex CLI, OpenClaude will read `~/.codex/auth.json`
automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or
override the token directly with `CODEX_API_KEY`.
```bash
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_MODEL=codexplan
# optional if you do not already have ~/.codex/auth.json
export CODEX_API_KEY=...
openclaude
```
### DeepSeek
```bash
@@ -165,6 +184,9 @@ export OPENAI_MODEL=gpt-4o
| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama) |
| `OPENAI_MODEL` | Yes | Model name (e.g. `gpt-4o`, `deepseek-chat`, `llama3.3:70b`) |
| `OPENAI_BASE_URL` | No | API endpoint (defaults to `https://api.openai.com/v1`) |
| `CODEX_API_KEY` | Codex only | Codex/ChatGPT access token override |
| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file |
| `CODEX_HOME` | Codex only | Alternative Codex home directory (`auth.json` will be read from here) |
You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority.
@@ -197,21 +219,25 @@ bun run hardening:strict
Notes:
- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key (`SUA_CHAVE`) or a missing key for non-local providers.
- Local providers (for example `http://localhost:11434/v1`) can run without `OPENAI_API_KEY`.
- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`.
### Provider Launch Profiles
Use profile launchers to avoid repeated environment setup:
```bash
# one-time profile bootstrap (best available provider)
# one-time profile bootstrap (prefer viable local Ollama, otherwise OpenAI)
bun run profile:init
# preview the best provider/model for your goal
bun run profile:recommend -- --goal coding --benchmark
# auto-apply the best available provider/model for your goal
# auto-apply the best available local/openai provider/model for your goal
bun run profile:auto -- --goal latency
# codex bootstrap (defaults to codexplan and ~/.codex/auth.json)
bun run profile:codex
# openai bootstrap with explicit key
bun run profile:init -- --provider openai --api-key sk-...
@@ -221,9 +247,15 @@ bun run profile:init -- --provider ollama --model llama3.1:8b
# ollama bootstrap with intelligent model auto-selection
bun run profile:init -- --provider ollama --goal coding
# codex bootstrap with a fast model alias
bun run profile:init -- --provider codex --model codexspark
# launch using persisted profile (.openclaude-profile.json)
bun run dev:profile
# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json)
bun run dev:codex
# OpenAI profile (requires OPENAI_API_KEY in your shell)
bun run dev:openai
@@ -237,7 +269,9 @@ If no profile exists yet, `dev:profile` now uses the same goal-aware defaults wh
Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed.
Goal-based Ollama selection only recommends among models that are already installed and reachable from Ollama.
`dev:openai` and `dev:ollama` run `doctor:runtime` first and only launch the app if checks pass.
Use `profile:codex` or `--provider codex` when you want the ChatGPT Codex backend.
`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
For `dev:ollama`, make sure Ollama is running locally before launch.
---

7
bin/import-specifier.mjs Normal file
View File

@@ -0,0 +1,7 @@
import { join } from 'path'
import { pathToFileURL } from 'url'
export function getDistImportSpecifier(baseDir) {
const distPath = join(baseDir, '..', 'dist', 'cli.mjs')
return pathToFileURL(distPath).href
}

View File

@@ -0,0 +1,13 @@
import assert from 'node:assert/strict'
import test from 'node:test'
import { getDistImportSpecifier } from './import-specifier.mjs'
test('builds a file URL import specifier for dist/cli.mjs', () => {
const specifier = getDistImportSpecifier('C:\\repo\\bin')
assert.equal(
specifier,
'file:///C:/repo/dist/cli.mjs',
)
})

View File

@@ -10,12 +10,13 @@
import { existsSync } from 'fs'
import { join, dirname } from 'path'
import { fileURLToPath } from 'url'
import { getDistImportSpecifier } from './import-specifier.mjs'
const __dirname = dirname(fileURLToPath(import.meta.url))
const distPath = join(__dirname, '..', 'dist', 'cli.mjs')
if (existsSync(distPath)) {
await import(distPath)
await import(getDistImportSpecifier(__dirname))
} else {
console.error(`
openclaude: dist/cli.mjs not found.

View File

@@ -16,12 +16,14 @@
"dev": "bun run build && node dist/cli.mjs",
"dev:profile": "bun run scripts/provider-launch.ts",
"dev:profile:fast": "bun run scripts/provider-launch.ts auto --fast --bare",
"dev:codex": "bun run scripts/provider-launch.ts codex",
"dev:openai": "bun run scripts/provider-launch.ts openai",
"dev:ollama": "bun run scripts/provider-launch.ts ollama",
"dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare",
"profile:init": "bun run scripts/provider-bootstrap.ts",
"profile:recommend": "bun run scripts/provider-recommend.ts",
"profile:auto": "bun run scripts/provider-recommend.ts --apply",
"profile:codex": "bun run profile:init -- --provider codex --model codexplan",
"profile:fast": "bun run profile:init -- --provider ollama --model llama3.2:3b",
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
@@ -30,6 +32,7 @@
"test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
"typecheck": "tsc --noEmit",
"smoke": "bun run build && node dist/cli.mjs --version",
"test:provider": "bun test src/services/api/*.test.ts",
"doctor:runtime": "bun run scripts/system-check.ts",
"doctor:runtime:json": "bun run scripts/system-check.ts --json",
"doctor:report": "bun run scripts/system-check.ts --out reports/doctor-runtime.json",

View File

@@ -1,12 +1,16 @@
// @ts-nocheck
import { writeFileSync } from 'node:fs'
import { resolve } from 'node:path'
import {
resolveCodexApiCredentials,
} from '../src/services/api/providerConfig.js'
import {
getGoalDefaultOpenAIModel,
normalizeRecommendationGoal,
recommendOllamaModel,
} from '../src/utils/providerRecommendation.ts'
import {
buildCodexProfileEnv,
buildOllamaProfileEnv,
buildOpenAIProfileEnv,
createProfileFile,
@@ -29,7 +33,7 @@ function parseArg(name: string): string | null {
function parseProviderArg(): ProviderProfile | 'auto' {
const p = parseArg('--provider')?.toLowerCase()
if (p === 'openai' || p === 'ollama') return p
if (p === 'openai' || p === 'ollama' || p === 'codex') return p
return 'auto'
}
@@ -82,19 +86,39 @@ async function main(): Promise<void> {
getOllamaChatBaseUrl,
},
)
} else if (selected === 'codex') {
const builtEnv = buildCodexProfileEnv({
model: argModel,
baseUrl: argBaseUrl,
apiKey: argApiKey || process.env.CODEX_API_KEY || null,
processEnv: process.env,
})
if (!builtEnv) {
const credentials = resolveCodexApiCredentials(
argApiKey
? { ...process.env, CODEX_API_KEY: argApiKey }
: process.env,
)
const authHint = credentials.authPath
? ` or make sure ${credentials.authPath} exists`
: ''
if (!credentials.apiKey) {
console.error(`Codex profile requires CODEX_API_KEY${authHint}.`)
} else {
console.error('Codex profile requires CHATGPT_ACCOUNT_ID or an auth.json that includes it.')
}
process.exit(1)
}
env = builtEnv
} else {
const builtEnv = buildOpenAIProfileEnv({
goal,
model:
argModel ||
process.env.OPENAI_MODEL ||
getGoalDefaultOpenAIModel(goal),
model: argModel || null,
baseUrl: argBaseUrl || null,
apiKey: argApiKey || process.env.OPENAI_API_KEY || null,
processEnv: {
...process.env,
OPENAI_BASE_URL:
argBaseUrl || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1',
},
processEnv: process.env,
})
if (!builtEnv) {

View File

@@ -2,6 +2,9 @@
import { spawn } from 'node:child_process'
import { existsSync, readFileSync } from 'node:fs'
import { resolve } from 'node:path'
import {
resolveCodexApiCredentials,
} from '../src/services/api/providerConfig.js'
import {
normalizeRecommendationGoal,
recommendOllamaModel,
@@ -45,7 +48,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
continue
}
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama') && requestedProfile === 'auto') {
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex') && requestedProfile === 'auto') {
requestedProfile = lower as ProviderProfile | 'auto'
continue
}
@@ -76,7 +79,7 @@ function loadPersistedProfile(): ProfileFile | null {
if (!existsSync(path)) return null
try {
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
if (parsed.profile === 'openai' || parsed.profile === 'ollama') {
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex') {
return parsed
}
return null
@@ -123,18 +126,22 @@ function quoteArg(arg: string): string {
}
function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
const keySet = Boolean(env.OPENAI_API_KEY)
const keySet = profile === 'codex'
? Boolean(resolveCodexApiCredentials(env).apiKey)
: Boolean(env.OPENAI_API_KEY)
console.log(`Launching profile: ${profile}`)
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log(`OPENAI_API_KEY_SET=${keySet}`)
console.log(
`${profile === 'codex' ? 'CODEX_API_KEY_SET' : 'OPENAI_API_KEY_SET'}=${keySet}`,
)
}
async function main(): Promise<void> {
const options = parseLaunchOptions(process.argv.slice(2))
const requestedProfile = options.requestedProfile
if (!requestedProfile) {
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|auto] [--fast] [-- <cli args>]')
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
process.exit(1)
}
@@ -155,7 +162,10 @@ async function main(): Promise<void> {
profile = requestedProfile
}
if (profile === 'ollama' && persisted?.profile !== 'ollama') {
if (
profile === 'ollama' &&
(persisted?.profile !== 'ollama' || !persisted?.env?.OPENAI_MODEL)
) {
resolvedOllamaModel ??= await resolveOllamaDefaultModel(options.goal)
if (!resolvedOllamaModel) {
console.error('No viable Ollama chat model was discovered. Pull a chat model first or save one with `bun run profile:init -- --provider ollama --model <model>`.')
@@ -179,6 +189,22 @@ async function main(): Promise<void> {
process.exit(1)
}
if (profile === 'codex') {
const credentials = resolveCodexApiCredentials(env)
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or make sure ${credentials.authPath} exists`
: ''
console.error(`CODEX_API_KEY is required for codex profile${authHint}. Run: bun run profile:init -- --provider codex --model codexplan`)
process.exit(1)
}
if (!credentials.accountId) {
console.error('CHATGPT_ACCOUNT_ID is required for codex profile. Set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID or use an auth.json that includes it.')
process.exit(1)
}
}
printSummary(profile, env)
const doctorCode = await runCommand('bun run scripts/system-check.ts', env)

View File

@@ -2,6 +2,11 @@
import { existsSync, mkdirSync, writeFileSync } from 'node:fs'
import { dirname, join, resolve } from 'node:path'
import { spawnSync } from 'node:child_process'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
isLocalProviderUrl as isProviderLocalUrl,
} from '../src/services/api/providerConfig.js'
type CheckResult = {
ok: boolean
@@ -84,12 +89,7 @@ function checkBuildArtifacts(): CheckResult {
}
function isLocalBaseUrl(baseUrl: string): boolean {
try {
const url = new URL(baseUrl)
return url.hostname === 'localhost' || url.hostname === '127.0.0.1' || url.hostname === '::1'
} catch {
return false
}
return isProviderLocalUrl(baseUrl)
}
function currentBaseUrl(): string {
@@ -105,23 +105,50 @@ function checkOpenAIEnv(): CheckResult[] {
return results
}
const baseUrl = process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
const model = process.env.OPENAI_MODEL
const key = process.env.OPENAI_API_KEY
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
results.push(pass('Provider mode', 'OpenAI-compatible provider enabled.'))
results.push(
pass(
'Provider mode',
request.transport === 'codex_responses'
? 'Codex responses backend enabled.'
: 'OpenAI-compatible provider enabled.',
),
)
if (!model) {
if (!process.env.OPENAI_MODEL) {
results.push(pass('OPENAI_MODEL', 'Not set. Runtime fallback model will be used.'))
} else {
results.push(pass('OPENAI_MODEL', model))
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
}
results.push(pass('OPENAI_BASE_URL', baseUrl))
results.push(pass('OPENAI_BASE_URL', request.baseUrl))
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env)
if (!credentials.apiKey) {
const authHint = credentials.authPath
? `Missing CODEX_API_KEY and no usable auth.json at ${credentials.authPath}.`
: 'Missing CODEX_API_KEY and auth.json fallback.'
results.push(fail('CODEX auth', authHint))
} else if (!credentials.accountId) {
results.push(fail('CHATGPT_ACCOUNT_ID', 'Missing chatgpt_account_id in Codex auth.'))
} else {
const detail = credentials.source === 'env'
? 'Using CODEX_API_KEY.'
: `Using ${credentials.authPath}.`
results.push(pass('CODEX auth', detail))
}
return results
}
const key = process.env.OPENAI_API_KEY
if (key === 'SUA_CHAVE') {
results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.'))
} else if (!key && !isLocalBaseUrl(baseUrl)) {
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
} else if (!key) {
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
@@ -137,22 +164,53 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
}
const baseUrl = currentBaseUrl()
const key = process.env.OPENAI_API_KEY
const endpoint = `${baseUrl.replace(/\/$/, '')}/models`
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
const endpoint = request.transport === 'codex_responses'
? `${request.baseUrl}/responses`
: `${request.baseUrl}/models`
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 4000)
try {
const headers: Record<string, string> = {}
if (key) {
headers.Authorization = `Bearer ${key}`
let method = 'GET'
let body: string | undefined
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env)
if (credentials.apiKey) {
headers.Authorization = `Bearer ${credentials.apiKey}`
}
if (credentials.accountId) {
headers['chatgpt-account-id'] = credentials.accountId
}
headers['Content-Type'] = 'application/json'
method = 'POST'
body = JSON.stringify({
model: request.resolvedModel,
instructions: 'Runtime doctor probe.',
input: [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'ping' }],
},
],
store: false,
stream: true,
})
} else if (process.env.OPENAI_API_KEY) {
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
}
const response = await fetch(endpoint, {
method: 'GET',
method,
headers,
body,
signal: controller.signal,
})
@@ -209,11 +267,16 @@ function checkOllamaProcessorMode(): CheckResult {
}
function serializeSafeEnvSummary(): Record<string, string | boolean> {
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
return {
CLAUDE_CODE_USE_OPENAI: isTruthy(process.env.CLAUDE_CODE_USE_OPENAI),
OPENAI_MODEL: process.env.OPENAI_MODEL ?? '(unset)',
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1',
OPENAI_BASE_URL: request.baseUrl,
OPENAI_API_KEY_SET: Boolean(process.env.OPENAI_API_KEY),
CODEX_API_KEY_SET: Boolean(resolveCodexApiCredentials(process.env).apiKey),
}
}

View File

@@ -1,4 +1,8 @@
import { feature } from 'bun:bundle';
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../services/api/providerConfig.js'
// Bugfix for corepack auto-pinning, which adds yarnpkg to peoples' package.jsons
// eslint-disable-next-line custom-rules/no-top-level-side-effects
@@ -46,15 +50,33 @@ function validateProviderEnvOrExit(): void {
return
}
const apiKey = process.env.OPENAI_API_KEY
const baseUrl = process.env.OPENAI_BASE_URL
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
if (apiKey === 'SUA_CHAVE') {
if (process.env.OPENAI_API_KEY === 'SUA_CHAVE') {
console.error('Invalid OPENAI_API_KEY: placeholder value SUA_CHAVE detected. Set a real key or unset for local providers.')
process.exit(1)
}
if (!apiKey && !isLocalProviderUrl(baseUrl)) {
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials()
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or put auth.json at ${credentials.authPath}`
: ''
console.error(`Codex auth is required for ${request.requestedModel}. Set CODEX_API_KEY${authHint}.`)
process.exit(1)
}
if (!credentials.accountId) {
console.error('Codex auth is missing chatgpt_account_id. Re-login with Codex or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.')
process.exit(1)
}
return
}
if (!process.env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) {
console.error('OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.')
process.exit(1)
}

View File

@@ -103,13 +103,16 @@ export async function renderAndRun(root: Root, element: React.ReactNode): Promis
}
export async function showSetupScreens(root: Root, permissionMode: PermissionMode, allowDangerouslySkipPermissions: boolean, commands?: Command[], claudeInChrome?: boolean, devChannels?: ChannelEntry[]): Promise<boolean> {
if ("production" === 'test' || isEnvTruthy(false) || process.env.IS_DEMO // Skip onboarding in demo mode
|| isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) // Skip onboarding for OpenAI provider
) {
return false;
}
const isOpenAIProvider = isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI);
const config = getGlobalConfig();
let onboardingShown = false;
if (!config.theme || !config.hasCompletedOnboarding // always show onboarding at least once
// Skip onboarding dialog for OpenAI provider (no Anthropic account needed)
if (!isOpenAIProvider && (!config.theme || !config.hasCompletedOnboarding) // always show onboarding at least once
) {
onboardingShown = true;
const {
@@ -130,10 +133,9 @@ export async function showSetupScreens(root: Root, permissionMode: PermissionMod
// Note: non-interactive sessions (CI/CD with -p) never reach showSetupScreens at all.
// Skip permission checks in claubbit
if (!isEnvTruthy(process.env.CLAUBBIT)) {
// Fast-path: skip TrustDialog import+render when CWD is already trusted.
// If it returns true, the TrustDialog would auto-resolve regardless of
// security features, so we can skip the dynamic import and render cycle.
if (!checkHasTrustDialogAccepted()) {
// Skip trust dialog UI for OpenAI provider (no Anthropic auth), but still
// run trust state initialization below so the REPL mounts correctly.
if (!isOpenAIProvider && !checkHasTrustDialogAccepted()) {
const {
TrustDialog
} = await import('./components/TrustDialog/TrustDialog.js');
@@ -142,6 +144,8 @@ export async function showSetupScreens(root: Root, permissionMode: PermissionMod
// Signal that trust has been verified for this session.
// GrowthBook checks this to decide whether to include auth headers.
// Critical for OpenAI provider: without this, downstream config lookups
// may fail silently, preventing the REPL from mounting (frozen terminal).
setSessionTrustAccepted(true);
// Reset and reinitialize GrowthBook after trust is established.
@@ -153,6 +157,8 @@ export async function showSetupScreens(root: Root, permissionMode: PermissionMod
// Now that trust is established, prefetch system context if it wasn't already
void getSystemContext();
// Skip MCP approval dialogs for OpenAI provider (no interactive auth prompts)
if (!isOpenAIProvider) {
// If settings are valid, check for any mcp.json servers that need approval
const {
errors: allErrors
@@ -170,6 +176,7 @@ export async function showSetupScreens(root: Root, permissionMode: PermissionMod
await showSetupDialog(root, done => <ClaudeMdExternalIncludesDialog onDone={done} isStandaloneDialog externalIncludes={externalIncludes} />);
}
}
}
// Track current repo path for teleport directory switching (fire-and-forget)
// This must happen AFTER trust to prevent untrusted directories from poisoning the mapping

View File

@@ -0,0 +1,172 @@
import { afterEach, describe, expect, test } from 'bun:test'
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'
import { join } from 'node:path'
import { tmpdir } from 'node:os'
import {
codexStreamToAnthropic,
convertAnthropicMessagesToResponsesInput,
convertCodexResponseToAnthropicMessage,
} from './codexShim.js'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from './providerConfig.js'
const tempDirs: string[] = []
afterEach(() => {
while (tempDirs.length > 0) {
const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true })
}
})
function createTempAuthJson(payload: Record<string, unknown>): string {
const dir = mkdtempSync(join(tmpdir(), 'openclaude-codex-'))
tempDirs.push(dir)
const authPath = join(dir, 'auth.json')
writeFileSync(authPath, JSON.stringify(payload), 'utf8')
return authPath
}
async function collectStreamEventTypes(responseText: string): Promise<string[]> {
const stream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(responseText))
controller.close()
},
})
const events: string[] = []
for await (const event of codexStreamToAnthropic(new Response(stream), 'gpt-5.4')) {
events.push(event.type)
}
return events
}
describe('Codex provider config', () => {
test('resolves codexplan alias to Codex transport with reasoning', () => {
const resolved = resolveProviderRequest({ model: 'codexplan' })
expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' })
})
test('loads Codex credentials from auth.json fallback', () => {
const authPath = createTempAuthJson({
tokens: {
access_token: 'header.payload.signature',
account_id: 'acct_test',
},
})
const credentials = resolveCodexApiCredentials({
CODEX_AUTH_JSON_PATH: authPath,
} as NodeJS.ProcessEnv)
expect(credentials.apiKey).toBe('header.payload.signature')
expect(credentials.accountId).toBe('acct_test')
expect(credentials.source).toBe('auth.json')
})
})
describe('Codex request translation', () => {
test('converts assistant tool use and user tool result into Responses items', () => {
const items = convertAnthropicMessagesToResponsesInput([
{
role: 'assistant',
content: [
{ type: 'text', text: 'Working...' },
{ type: 'tool_use', id: 'call_123', name: 'search', input: { q: 'x' } },
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'call_123', content: 'done' },
],
},
])
expect(items).toEqual([
{
type: 'message',
role: 'assistant',
content: [{ type: 'output_text', text: 'Working...' }],
},
{
type: 'function_call',
id: 'fc_123',
call_id: 'call_123',
name: 'search',
arguments: '{"q":"x"}',
},
{
type: 'function_call_output',
call_id: 'call_123',
output: 'done',
},
])
})
test('converts completed Codex tool response into Anthropic message', () => {
const message = convertCodexResponseToAnthropicMessage(
{
id: 'resp_1',
model: 'gpt-5.3-codex-spark',
output: [
{
type: 'function_call',
id: 'fc_1',
call_id: 'call_1',
name: 'ping',
arguments: '{"value":"ping"}',
},
],
usage: { input_tokens: 12, output_tokens: 4 },
},
'gpt-5.3-codex-spark',
)
expect(message.stop_reason).toBe('tool_use')
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'call_1',
name: 'ping',
input: { value: 'ping' },
},
])
})
test('translates Codex SSE text stream into Anthropic events', async () => {
const responseText = [
'event: response.output_item.added',
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
'',
'event: response.content_part.added',
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
'',
'event: response.output_text.delta',
'data: {"type":"response.output_text.delta","content_index":0,"delta":"ok","item_id":"msg_1","output_index":0,"sequence_number":2}',
'',
'event: response.output_item.done',
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"ok"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
'',
'event: response.completed',
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
'',
].join('\n')
const eventTypes = await collectStreamEventTypes(responseText)
expect(eventTypes).toEqual([
'message_start',
'content_block_start',
'content_block_delta',
'content_block_stop',
'message_delta',
'message_stop',
])
})
})

View File

@@ -0,0 +1,740 @@
import type {
ResolvedCodexCredentials,
ResolvedProviderRequest,
} from './providerConfig.js'
export interface AnthropicUsage {
input_tokens: number
output_tokens: number
cache_creation_input_tokens: number
cache_read_input_tokens: number
}
export interface AnthropicStreamEvent {
type: string
message?: Record<string, unknown>
index?: number
content_block?: Record<string, unknown>
delta?: Record<string, unknown>
usage?: Partial<AnthropicUsage>
}
export interface ShimCreateParams {
model: string
messages: Array<Record<string, unknown>>
system?: unknown
tools?: Array<Record<string, unknown>>
max_tokens: number
stream?: boolean
temperature?: number
top_p?: number
tool_choice?: unknown
metadata?: unknown
[key: string]: unknown
}
type ResponsesInputPart =
| { type: 'input_text'; text: string }
| { type: 'output_text'; text: string }
| { type: 'input_image'; image_url: string }
type ResponsesInputItem =
| {
type: 'message'
role: 'user' | 'assistant'
content: ResponsesInputPart[]
}
| {
type: 'function_call'
id: string
call_id: string
name: string
arguments: string
}
| {
type: 'function_call_output'
call_id: string
output: string
}
type ResponsesTool = {
type: 'function'
name: string
description: string
parameters: Record<string, unknown>
strict?: boolean
}
type CodexSseEvent = {
event: string
data: Record<string, any>
}
function makeUsage(usage?: {
input_tokens?: number
output_tokens?: number
}): AnthropicUsage {
return {
input_tokens: usage?.input_tokens ?? 0,
output_tokens: usage?.output_tokens ?? 0,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
}
}
function makeMessageId(): string {
return `msg_${Math.random().toString(36).slice(2)}${Date.now().toString(36)}`
}
function normalizeToolUseId(toolUseId: string | undefined): {
id: string
callId: string
} {
const value = (toolUseId || '').trim()
if (!value) {
return {
id: 'fc_unknown',
callId: 'call_unknown',
}
}
if (value.startsWith('call_')) {
return {
id: `fc_${value.slice('call_'.length)}`,
callId: value,
}
}
if (value.startsWith('fc_')) {
return {
id: value,
callId: `call_${value.slice('fc_'.length)}`,
}
}
return {
id: `fc_${value}`,
callId: value,
}
}
function convertSystemPrompt(system: unknown): string {
if (!system) return ''
if (typeof system === 'string') return system
if (Array.isArray(system)) {
return system
.map((block: { type?: string; text?: string }) =>
block.type === 'text' ? (block.text ?? '') : '',
)
.join('\n\n')
}
return String(system)
}
function convertToolResultToText(content: unknown): string {
if (typeof content === 'string') return content
if (!Array.isArray(content)) return JSON.stringify(content ?? '')
const chunks: string[] = []
for (const block of content) {
if (block?.type === 'text' && typeof block.text === 'string') {
chunks.push(block.text)
continue
}
if (block?.type === 'image') {
const src = block.source
if (src?.type === 'url' && src.url) {
chunks.push(`[Image](${src.url})`)
}
continue
}
if (typeof block?.text === 'string') {
chunks.push(block.text)
}
}
return chunks.join('\n')
}
function convertContentBlocksToResponsesParts(
content: unknown,
role: 'user' | 'assistant',
): ResponsesInputPart[] {
const textType = role === 'assistant' ? 'output_text' : 'input_text'
if (typeof content === 'string') {
return [{ type: textType, text: content }]
}
if (!Array.isArray(content)) {
return [{ type: textType, text: String(content ?? '') }]
}
const parts: ResponsesInputPart[] = []
for (const block of content) {
switch (block?.type) {
case 'text':
parts.push({ type: textType, text: block.text ?? '' })
break
case 'image': {
if (role === 'assistant') break
const source = block.source
if (source?.type === 'base64') {
parts.push({
type: 'input_image',
image_url: `data:${source.media_type};base64,${source.data}`,
})
} else if (source?.type === 'url' && source.url) {
parts.push({
type: 'input_image',
image_url: source.url,
})
}
break
}
case 'thinking':
if (block.thinking) {
parts.push({
type: textType,
text: `<thinking>${block.thinking}</thinking>`,
})
}
break
case 'tool_use':
case 'tool_result':
break
default:
if (typeof block?.text === 'string') {
parts.push({ type: textType, text: block.text })
}
}
}
return parts
}
export function convertAnthropicMessagesToResponsesInput(
messages: Array<{ role?: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
): ResponsesInputItem[] {
const items: ResponsesInputItem[] = []
for (const message of messages) {
const inner = message.message ?? message
const role = (inner as { role?: string }).role ?? message.role
const content = (inner as { content?: unknown }).content
if (role === 'user') {
if (Array.isArray(content)) {
const toolResults = content.filter(
(block: { type?: string }) => block.type === 'tool_result',
)
const otherContent = content.filter(
(block: { type?: string }) => block.type !== 'tool_result',
)
for (const toolResult of toolResults) {
const { callId } = normalizeToolUseId(toolResult.tool_use_id)
items.push({
type: 'function_call_output',
call_id: callId,
output: convertToolResultToText(toolResult.content),
})
}
const parts = convertContentBlocksToResponsesParts(otherContent, 'user')
if (parts.length > 0) {
items.push({
type: 'message',
role: 'user',
content: parts,
})
}
continue
}
items.push({
type: 'message',
role: 'user',
content: convertContentBlocksToResponsesParts(content, 'user'),
})
continue
}
if (role === 'assistant') {
const textBlocks = Array.isArray(content)
? content.filter((block: { type?: string }) => block.type !== 'tool_use')
: content
const parts = convertContentBlocksToResponsesParts(textBlocks, 'assistant')
if (parts.length > 0) {
items.push({
type: 'message',
role: 'assistant',
content: parts,
})
}
if (Array.isArray(content)) {
for (const toolUse of content.filter(
(block: { type?: string }) => block.type === 'tool_use',
)) {
const normalized = normalizeToolUseId(toolUse.id)
items.push({
type: 'function_call',
id: normalized.id,
call_id: normalized.callId,
name: toolUse.name ?? 'tool',
arguments:
typeof toolUse.input === 'string'
? toolUse.input
: JSON.stringify(toolUse.input ?? {}),
})
}
}
}
}
return items.filter(item =>
item.type !== 'message' || item.content.length > 0,
)
}
export function convertToolsToResponsesTools(
tools: Array<{ name?: string; description?: string; input_schema?: Record<string, unknown> }>,
): ResponsesTool[] {
return tools
.filter(tool => tool.name && tool.name !== 'ToolSearchTool')
.map(tool => ({
type: 'function',
name: tool.name ?? 'tool',
description: tool.description ?? '',
parameters: tool.input_schema ?? { type: 'object', properties: {} },
strict: true,
}))
}
function convertToolChoice(toolChoice: unknown): unknown {
const choice = toolChoice as { type?: string; name?: string } | undefined
if (!choice?.type) return undefined
if (choice.type === 'auto') return 'auto'
if (choice.type === 'any') return 'required'
if (choice.type === 'tool' && choice.name) {
return {
type: 'function',
name: choice.name,
}
}
return undefined
}
export async function performCodexRequest(options: {
request: ResolvedProviderRequest
credentials: ResolvedCodexCredentials
params: ShimCreateParams
defaultHeaders: Record<string, string>
signal?: AbortSignal
}): Promise<Response> {
const input = convertAnthropicMessagesToResponsesInput(
options.params.messages as Array<{
role?: string
message?: { role?: string; content?: unknown }
content?: unknown
}>,
)
const body: Record<string, unknown> = {
model: options.request.resolvedModel,
input: input.length > 0
? input
: [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: '' }],
},
],
store: false,
stream: true,
}
const instructions = convertSystemPrompt(options.params.system)
if (instructions) {
body.instructions = instructions
}
const toolChoice = convertToolChoice(options.params.tool_choice)
if (toolChoice) {
body.tool_choice = toolChoice
}
if (options.params.tools && options.params.tools.length > 0) {
const convertedTools = convertToolsToResponsesTools(
options.params.tools as Array<{
name?: string
description?: string
input_schema?: Record<string, unknown>
}>,
)
if (convertedTools.length > 0) {
body.tools = convertedTools
body.parallel_tool_calls = true
body.tool_choice ??= 'auto'
}
}
if (options.request.reasoning) {
body.reasoning = options.request.reasoning
}
if (options.params.temperature !== undefined) {
body.temperature = options.params.temperature
}
if (options.params.top_p !== undefined) {
body.top_p = options.params.top_p
}
const headers: Record<string, string> = {
'Content-Type': 'application/json',
...options.defaultHeaders,
Authorization: `Bearer ${options.credentials.apiKey}`,
}
if (options.credentials.accountId) {
headers['chatgpt-account-id'] = options.credentials.accountId
}
headers.originator ??= 'openclaude'
const response = await fetch(`${options.request.baseUrl}/responses`, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: options.signal,
})
if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error')
throw new Error(`Codex API error ${response.status}: ${errorBody}`)
}
return response
}
async function* readSseEvents(response: Response): AsyncGenerator<CodexSseEvent> {
const reader = response.body?.getReader()
if (!reader) return
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const chunks = buffer.split('\n\n')
buffer = chunks.pop() ?? ''
for (const chunk of chunks) {
const lines = chunk
.split('\n')
.map(line => line.trim())
.filter(Boolean)
if (lines.length === 0) continue
const eventLine = lines.find(line => line.startsWith('event: '))
const dataLines = lines.filter(line => line.startsWith('data: '))
if (!eventLine || dataLines.length === 0) continue
const event = eventLine.slice(7).trim()
const rawData = dataLines.map(line => line.slice(6)).join('\n')
if (rawData === '[DONE]') continue
let data: Record<string, any>
try {
const parsed = JSON.parse(rawData)
if (!parsed || typeof parsed !== 'object') continue
data = parsed as Record<string, any>
} catch {
continue
}
yield { event, data }
}
}
}
function determineStopReason(
response: Record<string, any> | undefined,
sawToolUse: boolean,
): 'end_turn' | 'tool_use' | 'max_tokens' {
const output = Array.isArray(response?.output) ? response.output : []
if (
sawToolUse ||
output.some((item: { type?: string }) => item?.type === 'function_call')
) {
return 'tool_use'
}
const incompleteReason = response?.incomplete_details?.reason
if (
typeof incompleteReason === 'string' &&
incompleteReason.includes('max_output_tokens')
) {
return 'max_tokens'
}
return 'end_turn'
}
export async function collectCodexCompletedResponse(
response: Response,
): Promise<Record<string, any>> {
let completedResponse: Record<string, any> | undefined
for await (const event of readSseEvents(response)) {
if (event.event === 'response.failed') {
throw new Error(
event.data?.response?.error?.message ??
event.data?.error?.message ??
'Codex response failed',
)
}
if (
event.event === 'response.completed' ||
event.event === 'response.incomplete'
) {
completedResponse = event.data?.response
break
}
}
if (!completedResponse) {
throw new Error('Codex response ended without a completed payload')
}
return completedResponse
}
export async function* codexStreamToAnthropic(
response: Response,
model: string,
): AsyncGenerator<AnthropicStreamEvent> {
const messageId = makeMessageId()
const toolBlocksByItemId = new Map<
string,
{ index: number; toolUseId: string }
>()
let activeTextBlockIndex: number | null = null
let nextContentBlockIndex = 0
let sawToolUse = false
let finalResponse: Record<string, any> | undefined
const closeActiveTextBlock = async function* () {
if (activeTextBlockIndex === null) return
yield {
type: 'content_block_stop',
index: activeTextBlockIndex,
}
activeTextBlockIndex = null
}
const startTextBlockIfNeeded = async function* () {
if (activeTextBlockIndex !== null) return
activeTextBlockIndex = nextContentBlockIndex++
yield {
type: 'content_block_start',
index: activeTextBlockIndex,
content_block: { type: 'text', text: '' },
}
}
yield {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model,
stop_reason: null,
stop_sequence: null,
usage: makeUsage(),
},
}
for await (const event of readSseEvents(response)) {
const payload = event.data
if (event.event === 'response.output_item.added') {
const item = payload.item
if (item?.type === 'function_call') {
yield* closeActiveTextBlock()
const blockIndex = nextContentBlockIndex++
const toolUseId = item.call_id ?? item.id ?? `call_${blockIndex}`
toolBlocksByItemId.set(String(item.id ?? toolUseId), {
index: blockIndex,
toolUseId,
})
sawToolUse = true
yield {
type: 'content_block_start',
index: blockIndex,
content_block: {
type: 'tool_use',
id: toolUseId,
name: item.name ?? 'tool',
input: {},
},
}
if (item.arguments) {
yield {
type: 'content_block_delta',
index: blockIndex,
delta: {
type: 'input_json_delta',
partial_json: item.arguments,
},
}
}
}
continue
}
if (event.event === 'response.content_part.added') {
if (payload.part?.type === 'output_text') {
yield* startTextBlockIfNeeded()
}
continue
}
if (event.event === 'response.output_text.delta') {
yield* startTextBlockIfNeeded()
if (activeTextBlockIndex !== null) {
yield {
type: 'content_block_delta',
index: activeTextBlockIndex,
delta: {
type: 'text_delta',
text: payload.delta ?? '',
},
}
}
continue
}
if (event.event === 'response.function_call_arguments.delta') {
const toolBlock = toolBlocksByItemId.get(String(payload.item_id ?? ''))
if (toolBlock) {
yield {
type: 'content_block_delta',
index: toolBlock.index,
delta: {
type: 'input_json_delta',
partial_json: payload.delta ?? '',
},
}
}
continue
}
if (event.event === 'response.output_item.done') {
const item = payload.item
if (item?.type === 'function_call') {
const toolBlock = toolBlocksByItemId.get(String(item.id ?? ''))
if (toolBlock) {
yield {
type: 'content_block_stop',
index: toolBlock.index,
}
toolBlocksByItemId.delete(String(item.id))
}
} else if (item?.type === 'message') {
yield* closeActiveTextBlock()
}
continue
}
if (
event.event === 'response.completed' ||
event.event === 'response.incomplete'
) {
finalResponse = payload.response
break
}
if (event.event === 'response.failed') {
throw new Error(
payload?.response?.error?.message ??
payload?.error?.message ??
'Codex response failed',
)
}
}
yield* closeActiveTextBlock()
for (const toolBlock of toolBlocksByItemId.values()) {
yield {
type: 'content_block_stop',
index: toolBlock.index,
}
}
yield {
type: 'message_delta',
delta: {
stop_reason: determineStopReason(finalResponse, sawToolUse),
stop_sequence: null,
},
usage: {
input_tokens: finalResponse?.usage?.input_tokens ?? 0,
output_tokens: finalResponse?.usage?.output_tokens ?? 0,
},
}
yield { type: 'message_stop' }
}
export function convertCodexResponseToAnthropicMessage(
data: Record<string, any>,
model: string,
): Record<string, unknown> {
const content: Array<Record<string, unknown>> = []
const output = Array.isArray(data.output) ? data.output : []
for (const item of output) {
if (item?.type === 'message' && Array.isArray(item.content)) {
for (const part of item.content) {
if (part?.type === 'output_text') {
content.push({
type: 'text',
text: part.text ?? '',
})
}
}
continue
}
if (item?.type === 'function_call') {
let input: unknown
try {
input = JSON.parse(item.arguments ?? '{}')
} catch {
input = { raw: item.arguments ?? '' }
}
content.push({
type: 'tool_use',
id: item.call_id ?? item.id ?? makeMessageId(),
name: item.name ?? 'tool',
input,
})
}
}
return {
id: data.id ?? makeMessageId(),
type: 'message',
role: 'assistant',
content,
model: data.model ?? model,
stop_reason: determineStopReason(data, content.some(item => item.type === 'tool_use')),
stop_sequence: null,
usage: makeUsage(data.usage),
}
}

View File

@@ -0,0 +1,135 @@
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { createOpenAIShimClient } from './openaiShim.ts'
type FetchType = typeof globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
}
const originalFetch = globalThis.fetch
function makeSseResponse(lines: string[]): Response {
const encoder = new TextEncoder()
return new Response(
new ReadableStream({
start(controller) {
for (const line of lines) {
controller.enqueue(encoder.encode(line))
}
controller.close()
},
}),
{
headers: {
'Content-Type': 'text/event-stream',
},
},
)
}
function makeStreamChunks(chunks: unknown[]): string[] {
return [
...chunks.map(chunk => `data: ${JSON.stringify(chunk)}\n\n`),
'data: [DONE]\n\n',
]
}
beforeEach(() => {
process.env.OPENAI_BASE_URL = 'http://example.test/v1'
process.env.OPENAI_API_KEY = 'test-key'
})
afterEach(() => {
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
globalThis.fetch = originalFetch
})
test('preserves usage from final OpenAI stream chunk with empty choices', async () => {
globalThis.fetch = (async (_input, init) => {
const url = typeof _input === 'string' ? _input : _input.url
expect(url).toBe('http://example.test/v1/chat/completions')
const body = JSON.parse(String(init?.body))
expect(body.stream).toBe(true)
expect(body.stream_options).toEqual({ include_usage: true })
const chunks = makeStreamChunks([
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [
{
index: 0,
delta: { role: 'assistant', content: 'hello world' },
finish_reason: null,
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [
{
index: 0,
delta: {},
finish_reason: 'stop',
},
],
},
{
id: 'chatcmpl-1',
object: 'chat.completion.chunk',
model: 'fake-model',
choices: [],
usage: {
prompt_tokens: 123,
completion_tokens: 45,
total_tokens: 168,
},
},
])
return makeSseResponse(chunks)
}) as FetchType
const client = createOpenAIShimClient({}) as {
beta: {
messages: {
create: (
params: Record<string, unknown>,
options?: Record<string, unknown>,
) => Promise<unknown> & {
withResponse: () => Promise<{ data: AsyncIterable<Record<string, unknown>> }>
}
}
}
}
const result = await client.beta.messages
.create({
model: 'fake-model',
system: 'test system',
messages: [{ role: 'user', content: 'hello' }],
max_tokens: 64,
stream: true,
})
.withResponse()
const events: Array<Record<string, unknown>> = []
for await (const event of result.data) {
events.push(event)
}
const usageEvent = events.find(
event => event.type === 'message_delta' && typeof event.usage === 'object' && event.usage !== null,
) as { usage?: { input_tokens?: number; output_tokens?: number } } | undefined
expect(usageEvent).toBeDefined()
expect(usageEvent?.usage?.input_tokens).toBe(123)
expect(usageEvent?.usage?.output_tokens).toBe(45)
})

View File

@@ -13,28 +13,26 @@
* OPENAI_API_KEY=sk-... — API key (optional for local models)
* OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1)
* OPENAI_MODEL=gpt-4o — default model override
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
*/
import {
codexStreamToAnthropic,
collectCodexCompletedResponse,
convertCodexResponseToAnthropicMessage,
performCodexRequest,
type AnthropicStreamEvent,
type ShimCreateParams,
} from './codexShim.js'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from './providerConfig.js'
// ---------------------------------------------------------------------------
// Types — minimal subset of Anthropic SDK types we need to produce
// ---------------------------------------------------------------------------
interface AnthropicUsage {
input_tokens: number
output_tokens: number
cache_creation_input_tokens: number
cache_read_input_tokens: number
}
interface AnthropicStreamEvent {
type: string
message?: Record<string, unknown>
index?: number
content_block?: Record<string, unknown>
delta?: Record<string, unknown>
usage?: Partial<AnthropicUsage>
}
// ---------------------------------------------------------------------------
// Message format conversion: Anthropic → OpenAI
// ---------------------------------------------------------------------------
@@ -267,6 +265,19 @@ function makeMessageId(): string {
return `msg_${Math.random().toString(36).slice(2)}${Date.now().toString(36)}`
}
function convertChunkUsage(
usage: OpenAIStreamChunk['usage'] | undefined,
): Partial<AnthropicUsage> | undefined {
if (!usage) return undefined
return {
input_tokens: usage.prompt_tokens ?? 0,
output_tokens: usage.completion_tokens ?? 0,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
}
}
/**
* Async generator that transforms an OpenAI SSE stream into
* Anthropic-format BetaRawMessageStreamEvent objects.
@@ -279,6 +290,8 @@ async function* openaiStreamToAnthropic(
let contentBlockIndex = 0
const activeToolCalls = new Map<number, { id: string; name: string; index: number }>()
let hasEmittedContentStart = false
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
let hasEmittedFinalUsage = false
// Emit message_start
yield {
@@ -326,6 +339,8 @@ async function* openaiStreamToAnthropic(
continue
}
const chunkUsage = convertChunkUsage(chunk.usage)
for (const choice of chunk.choices ?? []) {
const delta = choice.delta
@@ -427,16 +442,31 @@ async function* openaiStreamToAnthropic(
: choice.finish_reason === 'length'
? 'max_tokens'
: 'end_turn'
lastStopReason = stopReason
yield {
type: 'message_delta',
delta: { stop_reason: stopReason, stop_sequence: null },
usage: {
output_tokens: chunk.usage?.completion_tokens ?? 0,
},
...(chunkUsage ? { usage: chunkUsage } : {}),
}
if (chunkUsage) {
hasEmittedFinalUsage = true
}
}
}
if (
!hasEmittedFinalUsage &&
chunkUsage &&
(chunk.choices?.length ?? 0) === 0
) {
yield {
type: 'message_delta',
delta: { stop_reason: lastStopReason, stop_sequence: null },
usage: chunkUsage,
}
hasEmittedFinalUsage = true
}
}
}
@@ -447,20 +477,6 @@ async function* openaiStreamToAnthropic(
// The shim client — duck-types as Anthropic SDK
// ---------------------------------------------------------------------------
interface ShimCreateParams {
model: string
messages: Array<Record<string, unknown>>
system?: unknown
tools?: Array<Record<string, unknown>>
max_tokens: number
stream?: boolean
temperature?: number
top_p?: number
tool_choice?: unknown
metadata?: unknown
[key: string]: unknown
}
class OpenAIShimStream {
private generator: AsyncGenerator<AnthropicStreamEvent>
// The controller property is checked by claude.ts to distinguish streams from error messages
@@ -476,17 +492,9 @@ class OpenAIShimStream {
}
class OpenAIShimMessages {
private baseUrl: string
private apiKey: string
private defaultHeaders: Record<string, string>
constructor(
baseUrl: string,
apiKey: string,
defaultHeaders: Record<string, string>,
) {
this.baseUrl = baseUrl
this.apiKey = apiKey
constructor(defaultHeaders: Record<string, string>) {
this.defaultHeaders = defaultHeaders
}
@@ -496,20 +504,30 @@ class OpenAIShimMessages {
) {
const self = this
// Return a thenable that also has .withResponse()
const promise = (async () => {
const response = await self._doRequest(params, options)
const request = resolveProviderRequest({ model: params.model })
const response = await self._doRequest(request, params, options)
if (params.stream) {
return new OpenAIShimStream(
openaiStreamToAnthropic(response, params.model),
request.transport === 'codex_responses'
? codexStreamToAnthropic(response, request.resolvedModel)
: openaiStreamToAnthropic(response, request.resolvedModel),
)
}
// Non-streaming: parse the full response and convert
if (request.transport === 'codex_responses') {
const data = await collectCodexCompletedResponse(response)
return convertCodexResponseToAnthropicMessage(
data,
request.resolvedModel,
)
}
const data = await response.json()
return self._convertNonStreamingResponse(data, params.model)
return self._convertNonStreamingResponse(data, request.resolvedModel)
})()
// Add .withResponse() for streaming path (claude.ts uses this)
;(promise as unknown as Record<string, unknown>).withResponse =
async () => {
const data = await promise
@@ -524,6 +542,43 @@ class OpenAIShimMessages {
}
private async _doRequest(
request: ReturnType<typeof resolveProviderRequest>,
params: ShimCreateParams,
options?: { signal?: AbortSignal; headers?: Record<string, string> },
): Promise<Response> {
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials()
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or place a Codex auth.json at ${credentials.authPath}`
: ''
throw new Error(
`Codex auth is required for ${request.requestedModel}. Set CODEX_API_KEY${authHint}.`,
)
}
if (!credentials.accountId) {
throw new Error(
'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
)
}
return performCodexRequest({
request,
credentials,
params,
defaultHeaders: {
...this.defaultHeaders,
...(options?.headers ?? {}),
},
signal: options?.signal,
})
}
return this._doOpenAIRequest(request, params, options)
}
private async _doOpenAIRequest(
request: ReturnType<typeof resolveProviderRequest>,
params: ShimCreateParams,
options?: { signal?: AbortSignal; headers?: Record<string, string> },
): Promise<Response> {
@@ -537,7 +592,7 @@ class OpenAIShimMessages {
)
const body: Record<string, unknown> = {
model: params.model,
model: request.resolvedModel,
messages: openaiMessages,
max_tokens: params.max_tokens,
stream: params.stream ?? false,
@@ -550,7 +605,6 @@ class OpenAIShimMessages {
if (params.temperature !== undefined) body.temperature = params.temperature
if (params.top_p !== undefined) body.top_p = params.top_p
// Convert tools
if (params.tools && params.tools.length > 0) {
const converted = convertTools(
params.tools as Array<{
@@ -561,7 +615,6 @@ class OpenAIShimMessages {
)
if (converted.length > 0) {
body.tools = converted
// Convert tool_choice
if (params.tool_choice) {
const tc = params.tool_choice as { type?: string; name?: string }
if (tc.type === 'auto') {
@@ -578,18 +631,18 @@ class OpenAIShimMessages {
}
}
const url = `${this.baseUrl}/chat/completions`
const headers: Record<string, string> = {
'Content-Type': 'application/json',
...this.defaultHeaders,
...(options?.headers ?? {}),
}
if (this.apiKey) {
headers['Authorization'] = `Bearer ${this.apiKey}`
const apiKey = process.env.OPENAI_API_KEY ?? ''
if (apiKey) {
headers.Authorization = `Bearer ${apiKey}`
}
const response = await fetch(url, {
const response = await fetch(`${request.baseUrl}/chat/completions`, {
method: 'POST',
headers,
body: JSON.stringify(body),
@@ -598,9 +651,7 @@ class OpenAIShimMessages {
if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error')
throw new Error(
`OpenAI API error ${response.status}: ${errorBody}`,
)
throw new Error(`OpenAI API error ${response.status}: ${errorBody}`)
}
return response
@@ -680,45 +731,22 @@ class OpenAIShimMessages {
class OpenAIShimBeta {
messages: OpenAIShimMessages
constructor(
baseUrl: string,
apiKey: string,
defaultHeaders: Record<string, string>,
) {
this.messages = new OpenAIShimMessages(baseUrl, apiKey, defaultHeaders)
constructor(defaultHeaders: Record<string, string>) {
this.messages = new OpenAIShimMessages(defaultHeaders)
}
}
/**
* Creates an Anthropic SDK-compatible client that routes requests
* to an OpenAI-compatible API endpoint.
*
* Usage:
* CLAUDE_CODE_USE_OPENAI=1 OPENAI_API_KEY=sk-... OPENAI_MODEL=gpt-4o
*/
export function createOpenAIShimClient(options: {
defaultHeaders?: Record<string, string>
maxRetries?: number
timeout?: number
}): unknown {
const baseUrl = (
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
'https://api.openai.com/v1'
).replace(/\/+$/, '')
const apiKey = process.env.OPENAI_API_KEY ?? ''
const headers = {
const beta = new OpenAIShimBeta({
...(options.defaultHeaders ?? {}),
}
})
const beta = new OpenAIShimBeta(baseUrl, apiKey, headers)
// Duck-type as Anthropic client
return {
beta,
// Some code paths access .messages directly (non-beta)
messages: beta.messages,
}
}

View File

@@ -0,0 +1,313 @@
import { existsSync, readFileSync } from 'node:fs'
import { homedir } from 'node:os'
import { join } from 'node:path'
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
const CODEX_ALIAS_MODELS: Record<
string,
{
model: string
reasoningEffort?: ReasoningEffort
}
> = {
codexplan: {
model: 'gpt-5.4',
reasoningEffort: 'high',
},
codexspark: {
model: 'gpt-5.3-codex-spark',
},
} as const
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
type ReasoningEffort = 'low' | 'medium' | 'high'
export type ProviderTransport = 'chat_completions' | 'codex_responses'
export type ResolvedProviderRequest = {
transport: ProviderTransport
requestedModel: string
resolvedModel: string
baseUrl: string
reasoning?: {
effort: ReasoningEffort
}
}
export type ResolvedCodexCredentials = {
apiKey: string
accountId?: string
authPath?: string
source: 'env' | 'auth.json' | 'none'
}
type ModelDescriptor = {
raw: string
baseModel: string
reasoning?: {
effort: ReasoningEffort
}
}
const LOCALHOST_HOSTNAMES = new Set(['localhost', '127.0.0.1', '::1'])
function asTrimmedString(value: unknown): string | undefined {
return typeof value === 'string' && value.trim() ? value.trim() : undefined
}
function readNestedString(
value: unknown,
paths: string[][],
): string | undefined {
for (const path of paths) {
let current = value
let valid = true
for (const key of path) {
if (!current || typeof current !== 'object' || !(key in current)) {
valid = false
break
}
current = (current as Record<string, unknown>)[key]
}
if (!valid) continue
const stringValue = asTrimmedString(current)
if (stringValue) return stringValue
}
return undefined
}
function decodeJwtPayload(token: string): Record<string, unknown> | undefined {
const parts = token.split('.')
if (parts.length < 2) return undefined
try {
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
const json = Buffer.from(padded, 'base64').toString('utf8')
const parsed = JSON.parse(json)
return parsed && typeof parsed === 'object'
? (parsed as Record<string, unknown>)
: undefined
} catch {
return undefined
}
}
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
if (!value) return undefined
const normalized = value.trim().toLowerCase()
if (normalized === 'low' || normalized === 'medium' || normalized === 'high') {
return normalized
}
return undefined
}
function parseModelDescriptor(model: string): ModelDescriptor {
const trimmed = model.trim()
const queryIndex = trimmed.indexOf('?')
if (queryIndex === -1) {
const alias = trimmed.toLowerCase() as CodexAlias
const aliasConfig = CODEX_ALIAS_MODELS[alias]
if (aliasConfig) {
return {
raw: trimmed,
baseModel: aliasConfig.model,
reasoning: aliasConfig.reasoningEffort
? { effort: aliasConfig.reasoningEffort }
: undefined,
}
}
return {
raw: trimmed,
baseModel: trimmed,
}
}
const baseModel = trimmed.slice(0, queryIndex).trim()
const params = new URLSearchParams(trimmed.slice(queryIndex + 1))
const alias = baseModel.toLowerCase() as CodexAlias
const aliasConfig = CODEX_ALIAS_MODELS[alias]
const resolvedBaseModel = aliasConfig?.model ?? baseModel
const reasoning =
parseReasoningEffort(params.get('reasoning') ?? undefined) ??
(aliasConfig?.reasoningEffort
? { effort: aliasConfig.reasoningEffort }
: undefined)
return {
raw: trimmed,
baseModel: resolvedBaseModel,
reasoning: typeof reasoning === 'string' ? { effort: reasoning } : reasoning,
}
}
function isCodexAlias(model: string): boolean {
const normalized = model.trim().toLowerCase()
const base = normalized.split('?', 1)[0] ?? normalized
return base in CODEX_ALIAS_MODELS
}
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
return LOCALHOST_HOSTNAMES.has(new URL(baseUrl).hostname)
} catch {
return false
}
}
export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
const parsed = new URL(baseUrl)
return (
parsed.hostname === 'chatgpt.com' &&
parsed.pathname.replace(/\/+$/, '') === '/backend-api/codex'
)
} catch {
return false
}
}
export function resolveProviderRequest(options?: {
model?: string
baseUrl?: string
fallbackModel?: string
}): ResolvedProviderRequest {
const requestedModel =
options?.model?.trim() ||
process.env.OPENAI_MODEL?.trim() ||
options?.fallbackModel?.trim() ||
'gpt-4o'
const descriptor = parseModelDescriptor(requestedModel)
const rawBaseUrl =
options?.baseUrl ??
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
undefined
const transport: ProviderTransport =
isCodexAlias(requestedModel) || isCodexBaseUrl(rawBaseUrl)
? 'codex_responses'
: 'chat_completions'
return {
transport,
requestedModel,
resolvedModel: descriptor.baseModel,
baseUrl:
(rawBaseUrl ??
(transport === 'codex_responses'
? DEFAULT_CODEX_BASE_URL
: DEFAULT_OPENAI_BASE_URL)
).replace(/\/+$/, ''),
reasoning: descriptor.reasoning,
}
}
export function resolveCodexAuthPath(
env: NodeJS.ProcessEnv = process.env,
): string {
const explicit = asTrimmedString(env.CODEX_AUTH_JSON_PATH)
if (explicit) return explicit
const codexHome = asTrimmedString(env.CODEX_HOME)
if (codexHome) return join(codexHome, 'auth.json')
return join(homedir(), '.codex', 'auth.json')
}
export function parseChatgptAccountId(
token: string | undefined,
): string | undefined {
if (!token) return undefined
const payload = decodeJwtPayload(token)
const fromClaim = asTrimmedString(
payload?.['https://api.openai.com/auth.chatgpt_account_id'],
)
if (fromClaim) return fromClaim
return asTrimmedString(payload?.chatgpt_account_id)
}
function loadCodexAuthJson(
authPath: string,
): Record<string, unknown> | undefined {
if (!existsSync(authPath)) return undefined
try {
const raw = readFileSync(authPath, 'utf8')
const parsed = JSON.parse(raw)
return parsed && typeof parsed === 'object'
? (parsed as Record<string, unknown>)
: undefined
} catch {
return undefined
}
}
export function resolveCodexApiCredentials(
env: NodeJS.ProcessEnv = process.env,
): ResolvedCodexCredentials {
const envApiKey = asTrimmedString(env.CODEX_API_KEY)
const envAccountId =
asTrimmedString(env.CODEX_ACCOUNT_ID) ??
asTrimmedString(env.CHATGPT_ACCOUNT_ID)
if (envApiKey) {
return {
apiKey: envApiKey,
accountId: envAccountId ?? parseChatgptAccountId(envApiKey),
source: 'env',
}
}
const authPath = resolveCodexAuthPath(env)
const authJson = loadCodexAuthJson(authPath)
if (!authJson) {
return {
apiKey: '',
authPath,
source: 'none',
}
}
const apiKey = readNestedString(authJson, [
['access_token'],
['accessToken'],
['tokens', 'access_token'],
['tokens', 'accessToken'],
['auth', 'access_token'],
['auth', 'accessToken'],
['token', 'access_token'],
['token', 'accessToken'],
['tokens', 'id_token'],
['tokens', 'idToken'],
])
const accountId =
envAccountId ??
readNestedString(authJson, [
['account_id'],
['accountId'],
['tokens', 'account_id'],
['tokens', 'accountId'],
['auth', 'account_id'],
['auth', 'accountId'],
]) ??
parseChatgptAccountId(apiKey)
if (!apiKey) {
return {
apiKey: '',
accountId,
authPath,
source: 'none',
}
}
return {
apiKey,
accountId,
authPath,
source: 'auth.json',
}
}

View File

@@ -6,6 +6,8 @@ export const MODEL_ALIASES = [
'sonnet[1m]',
'opus[1m]',
'opusplan',
'codexplan',
'codexspark',
] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number]

View File

@@ -193,6 +193,11 @@ export function getRuntimeMainLoopModel(params: {
* @returns The default model setting to use
*/
export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
// OpenAI provider: always use the configured OpenAI model
if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o'
}
// Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') {
return (
@@ -318,6 +323,12 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
}
if (setting === 'codexplan') {
return 'Codex Plan (GPT-5.4 high reasoning)'
}
if (setting === 'codexspark') {
return 'Codex Spark (GPT-5.3 Codex Spark)'
}
return renderModelName(parseUserSpecifiedModel(setting))
}
@@ -352,6 +363,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') {
return 'Opus Plan'
}
if (setting === 'codexplan') {
return 'Codex Plan'
}
if (setting === 'codexspark') {
return 'Codex Spark'
}
if (isModelAlias(setting)) {
return capitalize(setting)
}
@@ -364,7 +381,15 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model.
*/
export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI provider, show the actual model name (e.g. 'gpt-4o') not a Claude alias
if (getAPIProvider() === 'openai') {
return null
}
switch (model) {
case 'gpt-5.4':
return 'GPT-5.4'
case 'gpt-5.3-codex-spark':
return 'GPT-5.3 Codex Spark'
case getModelStrings().opus46:
return 'Opus 4.6'
case getModelStrings().opus46 + '[1m]':
@@ -472,6 +497,10 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) {
switch (modelString) {
case 'codexplan':
return modelInputTrimmed
case 'codexspark':
return modelInputTrimmed
case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet':

View File

@@ -266,6 +266,22 @@ function getOpusPlanOption(): ModelOption {
}
}
function getCodexPlanOption(): ModelOption {
return {
value: 'codexplan',
label: 'Codex Plan',
description: 'GPT-5.4 on the Codex backend with high reasoning',
}
}
function getCodexSparkOption(): ModelOption {
return {
value: 'codexspark',
label: 'Codex Spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
}
}
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -344,6 +360,10 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
if (getAPIProvider() === 'openai') {
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
}
const customSonnet = getCustomSonnetOption()
if (customSonnet !== undefined) {
payg3pOptions.push(customSonnet)
@@ -497,6 +517,10 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
} else if (customModel === 'codexplan') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
} else if (customModel === 'codexspark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([
...options,

View File

@@ -1,36 +1,22 @@
export type ModifierKey = 'shift' | 'command' | 'control' | 'option'
let prewarmed = false
/**
* Pre-warm the native module by loading it in advance.
* Call this early to avoid delay on first use.
*
* NOTE: The `modifiers-napi` package is an Anthropic-internal native addon
* that is not shipped with the open-source build. All calls are no-ops here
* to avoid supply-chain risk from unverified npm packages with the same name.
*/
export function prewarmModifiers(): void {
if (prewarmed || process.platform !== 'darwin') {
return
}
prewarmed = true
// Load module in background
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
const { prewarm } = require('modifiers-napi') as { prewarm: () => void }
prewarm()
} catch {
// Ignore errors during prewarm
}
// No-op in open-source build — native modifier detection is not available.
}
/**
* Check if a specific modifier key is currently pressed (synchronous).
*
* Always returns false in the open-source build since the native addon
* is not available.
*/
export function isModifierPressed(modifier: ModifierKey): boolean {
if (process.platform !== 'darwin') {
export function isModifierPressed(_modifier: ModifierKey): boolean {
return false
}
// Dynamic import to avoid loading native module at top level
const { isModifierPressed: nativeIsModifierPressed } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('modifiers-napi') as { isModifierPressed: (m: string) => boolean }
return nativeIsModifierPressed(modifier)
}

View File

@@ -1,9 +1,14 @@
import assert from 'node:assert/strict'
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'
import { tmpdir } from 'node:os'
import { join } from 'node:path'
import test from 'node:test'
import {
buildCodexProfileEnv,
buildLaunchEnv,
buildOllamaProfileEnv,
buildOpenAIProfileEnv,
selectAutoProfile,
type ProfileFile,
} from './providerProfile.ts'
@@ -16,6 +21,8 @@ function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): Prof
}
}
const missingCodexAuthPath = join(tmpdir(), 'openclaude-missing-codex-auth.json')
test('matching persisted ollama env is reused for ollama launch', async () => {
const env = await buildLaunchEnv({
profile: 'ollama',
@@ -45,6 +52,9 @@ test('ollama launch ignores mismatched persisted openai env and shell model fall
processEnv: {
OPENAI_BASE_URL: 'https://api.deepseek.com/v1',
OPENAI_MODEL: 'gpt-4o-mini',
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
getOllamaChatBaseUrl: () => 'http://localhost:11434/v1',
resolveOllamaDefaultModel: async () => 'qwen2.5-coder:7b',
@@ -52,6 +62,9 @@ test('ollama launch ignores mismatched persisted openai env and shell model fall
assert.equal(env.OPENAI_BASE_URL, 'http://localhost:11434/v1')
assert.equal(env.OPENAI_MODEL, 'qwen2.5-coder:7b')
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
test('openai launch ignores mismatched persisted ollama env', async () => {
@@ -64,6 +77,8 @@ test('openai launch ignores mismatched persisted ollama env', async () => {
goal: 'latency',
processEnv: {
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
getOllamaChatBaseUrl: () => 'http://localhost:11434/v1',
resolveOllamaDefaultModel: async () => 'llama3.1:8b',
@@ -72,6 +87,159 @@ test('openai launch ignores mismatched persisted ollama env', async () => {
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o-mini')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
test('openai launch ignores codex shell transport hints', async () => {
const env = await buildLaunchEnv({
profile: 'openai',
persisted: null,
goal: 'balanced',
processEnv: {
OPENAI_API_KEY: 'sk-live',
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
})
test('openai launch ignores codex persisted transport hints', async () => {
const env = await buildLaunchEnv({
profile: 'openai',
persisted: profile('openai', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
OPENAI_API_KEY: 'sk-persisted',
}),
goal: 'balanced',
processEnv: {
OPENAI_API_KEY: 'sk-live',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1')
assert.equal(env.OPENAI_MODEL, 'gpt-4o')
assert.equal(env.OPENAI_API_KEY, 'sk-live')
})
test('matching persisted codex env is reused for codex launch', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-persisted',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexspark')
assert.equal(env.CODEX_API_KEY, 'codex-persisted')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_persisted')
})
test('codex launch normalizes poisoned persisted base urls', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'codexspark',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexspark')
})
test('codex launch ignores mismatched persisted openai env', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('openai', {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-persisted',
}),
goal: 'balanced',
processEnv: {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o-mini',
OPENAI_API_KEY: 'sk-live',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
},
})
assert.equal(env.OPENAI_BASE_URL, 'https://chatgpt.com/backend-api/codex')
assert.equal(env.OPENAI_MODEL, 'codexplan')
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.CODEX_API_KEY, 'codex-live')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_live')
})
test('codex launch ignores placeholder codex env keys', async () => {
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-persisted',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_API_KEY: 'SUA_CHAVE',
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env.CODEX_API_KEY, 'codex-persisted')
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_persisted')
})
test('codex launch prefers auth account id over stale persisted value', async () => {
const codexHome = mkdtempSync(join(tmpdir(), 'openclaude-codex-'))
try {
writeFileSync(
join(codexHome, 'auth.json'),
JSON.stringify({
access_token: 'codex-live',
account_id: 'acct_auth',
}),
'utf8',
)
const env = await buildLaunchEnv({
profile: 'codex',
persisted: profile('codex', {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CHATGPT_ACCOUNT_ID: 'acct_persisted',
}),
goal: 'balanced',
processEnv: {
CODEX_HOME: codexHome,
},
})
assert.equal(env.CHATGPT_ACCOUNT_ID, 'acct_auth')
} finally {
rmSync(codexHome, { recursive: true, force: true })
}
})
test('ollama profiles never persist openai api keys', () => {
@@ -86,6 +254,53 @@ test('ollama profiles never persist openai api keys', () => {
assert.equal('OPENAI_API_KEY' in env, false)
})
test('codex profiles accept explicit codex credentials', () => {
const env = buildCodexProfileEnv({
model: 'codexspark',
apiKey: 'codex-live',
processEnv: {
CHATGPT_ACCOUNT_ID: 'acct_123',
},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexspark',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_123',
})
})
test('codex profiles require a chatgpt account id', () => {
const env = buildCodexProfileEnv({
model: 'codexspark',
apiKey: 'codex-live',
processEnv: {
CODEX_AUTH_JSON_PATH: missingCodexAuthPath,
},
})
assert.equal(env, null)
})
test('openai profiles ignore codex shell transport hints', () => {
const env = buildOpenAIProfileEnv({
goal: 'balanced',
apiKey: 'sk-live',
processEnv: {
OPENAI_BASE_URL: 'https://chatgpt.com/backend-api/codex',
OPENAI_MODEL: 'codexplan',
OPENAI_API_KEY: 'sk-live',
},
})
assert.deepEqual(env, {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-live',
})
})
test('auto profile falls back to openai when no viable ollama model exists', () => {
assert.equal(selectAutoProfile(null), 'openai')
assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama')

View File

@@ -1,14 +1,24 @@
import {
DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL,
isCodexBaseUrl,
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../services/api/providerConfig.ts'
import {
getGoalDefaultOpenAIModel,
type RecommendationGoal,
} from './providerRecommendation.ts'
export type ProviderProfile = 'openai' | 'ollama'
export type ProviderProfile = 'openai' | 'ollama' | 'codex'
export type ProfileEnv = {
OPENAI_BASE_URL?: string
OPENAI_MODEL?: string
OPENAI_API_KEY?: string
CODEX_API_KEY?: string
CHATGPT_ACCOUNT_ID?: string
CODEX_ACCOUNT_ID?: string
}
export type ProfileFile = {
@@ -40,6 +50,7 @@ export function buildOllamaProfileEnv(
export function buildOpenAIProfileEnv(options: {
goal: RecommendationGoal
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
@@ -49,13 +60,57 @@ export function buildOpenAIProfileEnv(options: {
return null
}
const defaultModel = getGoalDefaultOpenAIModel(options.goal)
const shellOpenAIRequest = resolveProviderRequest({
model: processEnv.OPENAI_MODEL,
baseUrl: processEnv.OPENAI_BASE_URL,
fallbackModel: defaultModel,
})
const useShellOpenAIConfig = shellOpenAIRequest.transport === 'chat_completions'
return {
OPENAI_BASE_URL: processEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1',
OPENAI_MODEL: options.model || getGoalDefaultOpenAIModel(options.goal),
OPENAI_BASE_URL:
options.baseUrl ||
(useShellOpenAIConfig ? processEnv.OPENAI_BASE_URL : undefined) ||
DEFAULT_OPENAI_BASE_URL,
OPENAI_MODEL:
options.model ||
(useShellOpenAIConfig ? processEnv.OPENAI_MODEL : undefined) ||
defaultModel,
OPENAI_API_KEY: key,
}
}
export function buildCodexProfileEnv(options: {
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
const processEnv = options.processEnv ?? process.env
const key = sanitizeApiKey(options.apiKey ?? processEnv.CODEX_API_KEY)
const credentialEnv = key
? ({ ...processEnv, CODEX_API_KEY: key } as NodeJS.ProcessEnv)
: processEnv
const credentials = resolveCodexApiCredentials(credentialEnv)
if (!credentials.apiKey || !credentials.accountId) {
return null
}
const env: ProfileEnv = {
OPENAI_BASE_URL: options.baseUrl || DEFAULT_CODEX_BASE_URL,
OPENAI_MODEL: options.model || 'codexplan',
}
if (key) {
env.CODEX_API_KEY = key
}
env.CHATGPT_ACCOUNT_ID = credentials.accountId
return env
}
export function createProfileFile(
profile: ProviderProfile,
env: ProfileEnv,
@@ -103,21 +158,75 @@ export async function buildLaunchEnv(options: {
persistedEnv.OPENAI_MODEL ||
(await resolveOllamaModel(options.goal))
if (!processEnv.OPENAI_API_KEY || processEnv.OPENAI_API_KEY === 'SUA_CHAVE') {
delete env.OPENAI_API_KEY
}
delete env.CODEX_API_KEY
delete env.CHATGPT_ACCOUNT_ID
delete env.CODEX_ACCOUNT_ID
return env
}
if (options.profile === 'codex') {
env.OPENAI_BASE_URL =
persistedEnv.OPENAI_BASE_URL && isCodexBaseUrl(persistedEnv.OPENAI_BASE_URL)
? persistedEnv.OPENAI_BASE_URL
: DEFAULT_CODEX_BASE_URL
env.OPENAI_MODEL = persistedEnv.OPENAI_MODEL || 'codexplan'
delete env.OPENAI_API_KEY
const codexKey =
sanitizeApiKey(processEnv.CODEX_API_KEY) ||
sanitizeApiKey(persistedEnv.CODEX_API_KEY)
const liveCodexCredentials = resolveCodexApiCredentials(processEnv)
const codexAccountId =
processEnv.CHATGPT_ACCOUNT_ID ||
processEnv.CODEX_ACCOUNT_ID ||
liveCodexCredentials.accountId ||
persistedEnv.CHATGPT_ACCOUNT_ID ||
persistedEnv.CODEX_ACCOUNT_ID
if (codexKey) {
env.CODEX_API_KEY = codexKey
} else {
delete env.CODEX_API_KEY
}
if (codexAccountId) {
env.CHATGPT_ACCOUNT_ID = codexAccountId
} else {
delete env.CHATGPT_ACCOUNT_ID
}
delete env.CODEX_ACCOUNT_ID
return env
}
const defaultOpenAIModel = getGoalDefaultOpenAIModel(options.goal)
const shellOpenAIRequest = resolveProviderRequest({
model: processEnv.OPENAI_MODEL,
baseUrl: processEnv.OPENAI_BASE_URL,
fallbackModel: defaultOpenAIModel,
})
const persistedOpenAIRequest = resolveProviderRequest({
model: persistedEnv.OPENAI_MODEL,
baseUrl: persistedEnv.OPENAI_BASE_URL,
fallbackModel: defaultOpenAIModel,
})
const useShellOpenAIConfig = shellOpenAIRequest.transport === 'chat_completions'
const usePersistedOpenAIConfig =
(!persistedEnv.OPENAI_MODEL && !persistedEnv.OPENAI_BASE_URL) ||
persistedOpenAIRequest.transport === 'chat_completions'
env.OPENAI_BASE_URL =
processEnv.OPENAI_BASE_URL ||
persistedEnv.OPENAI_BASE_URL ||
'https://api.openai.com/v1'
(useShellOpenAIConfig ? processEnv.OPENAI_BASE_URL : undefined) ||
(usePersistedOpenAIConfig ? persistedEnv.OPENAI_BASE_URL : undefined) ||
DEFAULT_OPENAI_BASE_URL
env.OPENAI_MODEL =
processEnv.OPENAI_MODEL ||
persistedEnv.OPENAI_MODEL ||
getGoalDefaultOpenAIModel(options.goal)
(useShellOpenAIConfig ? processEnv.OPENAI_MODEL : undefined) ||
(usePersistedOpenAIConfig ? persistedEnv.OPENAI_MODEL : undefined) ||
defaultOpenAIModel
env.OPENAI_API_KEY = processEnv.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY
delete env.CODEX_API_KEY
delete env.CHATGPT_ACCOUNT_ID
delete env.CODEX_ACCOUNT_ID
return env
}