Merge pull request #11 from strato-space/feat/codexplan-codexspark

Add Codex plan/spark provider support
This commit is contained in:
Kevin Codex
2026-04-01 19:28:55 +08:00
committed by GitHub
13 changed files with 1560 additions and 117 deletions

View File

@@ -2,7 +2,7 @@
Use Claude Code with **any LLM** — not just Claude.
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API.
OpenClaude is a fork of the [Claude Code source leak](https://gitlawb.com/node/repos/z6MkgKkb/instructkr-claude-code) (exposed via npm source maps on March 31, 2026). We added an OpenAI-compatible provider shim so you can plug in GPT-4o, DeepSeek, Gemini, Llama, Mistral, or any model that speaks the OpenAI chat completions API. It now also supports the ChatGPT Codex backend for `codexplan` and `codexspark`.
All of Claude Code's tools work — bash, file read/write/edit, grep, glob, agents, tasks, MCP — just powered by whatever model you choose.
@@ -82,6 +82,25 @@ export OPENAI_API_KEY=sk-...
export OPENAI_MODEL=gpt-4o
```
### Codex via ChatGPT auth
`codexplan` maps to GPT-5.4 on the Codex backend with high reasoning.
`codexspark` maps to GPT-5.3 Codex Spark for faster loops.
If you already use the Codex CLI, OpenClaude will read `~/.codex/auth.json`
automatically. You can also point it elsewhere with `CODEX_AUTH_JSON_PATH` or
override the token directly with `CODEX_API_KEY`.
```bash
export CLAUDE_CODE_USE_OPENAI=1
export OPENAI_MODEL=codexplan
# optional if you do not already have ~/.codex/auth.json
export CODEX_API_KEY=...
openclaude
```
### DeepSeek
```bash
@@ -165,6 +184,9 @@ export OPENAI_MODEL=gpt-4o
| `OPENAI_API_KEY` | Yes* | Your API key (*not needed for local models like Ollama) |
| `OPENAI_MODEL` | Yes | Model name (e.g. `gpt-4o`, `deepseek-chat`, `llama3.3:70b`) |
| `OPENAI_BASE_URL` | No | API endpoint (defaults to `https://api.openai.com/v1`) |
| `CODEX_API_KEY` | Codex only | Codex/ChatGPT access token override |
| `CODEX_AUTH_JSON_PATH` | Codex only | Path to a Codex CLI `auth.json` file |
| `CODEX_HOME` | Codex only | Alternative Codex home directory (`auth.json` will be read from here) |
You can also use `ANTHROPIC_MODEL` to override the model name. `OPENAI_MODEL` takes priority.
@@ -197,6 +219,7 @@ bun run hardening:strict
Notes:
- `doctor:runtime` fails fast if `CLAUDE_CODE_USE_OPENAI=1` with a placeholder key (`SUA_CHAVE`) or a missing key for non-local providers.
- Local providers (for example `http://localhost:11434/v1`) can run without `OPENAI_API_KEY`.
- Codex profiles validate `CODEX_API_KEY` or the Codex CLI auth file and probe `POST /responses` instead of `GET /models`.
### Provider Launch Profiles
@@ -206,15 +229,24 @@ Use profile launchers to avoid repeated environment setup:
# one-time profile bootstrap (auto-detect ollama, otherwise openai)
bun run profile:init
# codex bootstrap (defaults to codexplan and ~/.codex/auth.json)
bun run profile:codex
# openai bootstrap with explicit key
bun run profile:init -- --provider openai --api-key sk-...
# ollama bootstrap with custom model
bun run profile:init -- --provider ollama --model llama3.1:8b
# codex bootstrap with a fast model alias
bun run profile:init -- --provider codex --model codexspark
# launch using persisted profile (.openclaude-profile.json)
bun run dev:profile
# codex profile (uses CODEX_API_KEY or ~/.codex/auth.json)
bun run dev:codex
# OpenAI profile (requires OPENAI_API_KEY in your shell)
bun run dev:openai
@@ -222,7 +254,7 @@ bun run dev:openai
bun run dev:ollama
```
`dev:openai` and `dev:ollama` run `doctor:runtime` first and only launch the app if checks pass.
`dev:openai`, `dev:ollama`, and `dev:codex` run `doctor:runtime` first and only launch the app if checks pass.
For `dev:ollama`, make sure Ollama is running locally before launch.
---

View File

@@ -16,10 +16,12 @@
"dev": "bun run build && node dist/cli.mjs",
"dev:profile": "bun run scripts/provider-launch.ts",
"dev:profile:fast": "bun run scripts/provider-launch.ts auto --fast --bare",
"dev:codex": "bun run scripts/provider-launch.ts codex",
"dev:openai": "bun run scripts/provider-launch.ts openai",
"dev:ollama": "bun run scripts/provider-launch.ts ollama",
"dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare",
"profile:init": "bun run scripts/provider-bootstrap.ts",
"profile:codex": "bun run profile:init -- --provider codex --model codexplan",
"profile:fast": "bun run profile:init -- --provider ollama --model llama3.2:3b",
"profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b",
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
@@ -27,6 +29,7 @@
"start": "node dist/cli.mjs",
"typecheck": "tsc --noEmit",
"smoke": "bun run build && node dist/cli.mjs --version",
"test:provider": "bun test src/services/api/*.test.ts",
"doctor:runtime": "bun run scripts/system-check.ts",
"doctor:runtime:json": "bun run scripts/system-check.ts --json",
"doctor:report": "bun run scripts/system-check.ts --out reports/doctor-runtime.json",

View File

@@ -1,8 +1,12 @@
// @ts-nocheck
import { writeFileSync } from 'node:fs'
import { resolve } from 'node:path'
import {
DEFAULT_CODEX_BASE_URL,
resolveCodexApiCredentials,
} from '../src/services/api/providerConfig.js'
type ProviderProfile = 'openai' | 'ollama'
type ProviderProfile = 'openai' | 'ollama' | 'codex'
type ProfileFile = {
profile: ProviderProfile
@@ -10,6 +14,7 @@ type ProfileFile = {
OPENAI_BASE_URL?: string
OPENAI_MODEL?: string
OPENAI_API_KEY?: string
CODEX_API_KEY?: string
}
createdAt: string
}
@@ -23,7 +28,7 @@ function parseArg(name: string): string | null {
function parseProviderArg(): ProviderProfile | 'auto' {
const p = parseArg('--provider')?.toLowerCase()
if (p === 'openai' || p === 'ollama') return p
if (p === 'openai' || p === 'ollama' || p === 'codex') return p
return 'auto'
}
@@ -69,6 +74,23 @@ async function main(): Promise<void> {
env.OPENAI_MODEL = argModel || process.env.OPENAI_MODEL || 'llama3.1:8b'
const key = sanitizeApiKey(argApiKey || process.env.OPENAI_API_KEY || null)
if (key) env.OPENAI_API_KEY = key
} else if (selected === 'codex') {
env.OPENAI_BASE_URL =
argBaseUrl || process.env.OPENAI_BASE_URL || DEFAULT_CODEX_BASE_URL
env.OPENAI_MODEL = argModel || process.env.OPENAI_MODEL || 'codexplan'
const key = sanitizeApiKey(argApiKey || process.env.CODEX_API_KEY || null)
if (key) {
env.CODEX_API_KEY = key
} else {
const credentials = resolveCodexApiCredentials(process.env)
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or make sure ${credentials.authPath} exists`
: ''
console.error(`Codex profile requires CODEX_API_KEY${authHint}.`)
process.exit(1)
}
}
} else {
env.OPENAI_BASE_URL = argBaseUrl || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
env.OPENAI_MODEL = argModel || process.env.OPENAI_MODEL || 'gpt-4o'

View File

@@ -2,8 +2,12 @@
import { spawn } from 'node:child_process'
import { existsSync, readFileSync } from 'node:fs'
import { resolve } from 'node:path'
import {
DEFAULT_CODEX_BASE_URL,
resolveCodexApiCredentials,
} from '../src/services/api/providerConfig.js'
type ProviderProfile = 'openai' | 'ollama'
type ProviderProfile = 'openai' | 'ollama' | 'codex'
type ProfileFile = {
profile: ProviderProfile
@@ -11,6 +15,7 @@ type ProfileFile = {
OPENAI_BASE_URL?: string
OPENAI_MODEL?: string
OPENAI_API_KEY?: string
CODEX_API_KEY?: string
}
}
@@ -32,7 +37,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
continue
}
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama') && requestedProfile === 'auto') {
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex') && requestedProfile === 'auto') {
requestedProfile = lower as ProviderProfile | 'auto'
continue
}
@@ -62,7 +67,7 @@ function loadPersistedProfile(): ProfileFile | null {
if (!existsSync(path)) return null
try {
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
if (parsed.profile === 'openai' || parsed.profile === 'ollama') {
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex') {
return parsed
}
return null
@@ -115,6 +120,20 @@ function buildEnv(profile: ProviderProfile, persisted: ProfileFile | null): Node
return env
}
if (profile === 'codex') {
env.OPENAI_BASE_URL =
process.env.OPENAI_BASE_URL ||
persistedEnv.OPENAI_BASE_URL ||
DEFAULT_CODEX_BASE_URL
env.OPENAI_MODEL =
process.env.OPENAI_MODEL ||
persistedEnv.OPENAI_MODEL ||
'codexplan'
env.CODEX_API_KEY =
process.env.CODEX_API_KEY || persistedEnv.CODEX_API_KEY
return env
}
env.OPENAI_BASE_URL = process.env.OPENAI_BASE_URL || persistedEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1'
env.OPENAI_MODEL = process.env.OPENAI_MODEL || persistedEnv.OPENAI_MODEL || 'gpt-4o'
env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY
@@ -137,18 +156,22 @@ function quoteArg(arg: string): string {
}
function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
const keySet = Boolean(env.OPENAI_API_KEY)
const keySet = profile === 'codex'
? Boolean(resolveCodexApiCredentials(env).apiKey)
: Boolean(env.OPENAI_API_KEY)
console.log(`Launching profile: ${profile}`)
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log(`OPENAI_API_KEY_SET=${keySet}`)
console.log(
`${profile === 'codex' ? 'CODEX_API_KEY_SET' : 'OPENAI_API_KEY_SET'}=${keySet}`,
)
}
async function main(): Promise<void> {
const options = parseLaunchOptions(process.argv.slice(2))
const requestedProfile = options.requestedProfile
if (!requestedProfile) {
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|auto] [--fast] [-- <cli args>]')
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|auto] [--fast] [-- <cli args>]')
process.exit(1)
}
@@ -175,6 +198,17 @@ async function main(): Promise<void> {
process.exit(1)
}
if (profile === 'codex') {
const credentials = resolveCodexApiCredentials(env)
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or make sure ${credentials.authPath} exists`
: ''
console.error(`CODEX_API_KEY is required for codex profile${authHint}. Run: bun run profile:init -- --provider codex --model codexplan`)
process.exit(1)
}
}
printSummary(profile, env)
const doctorCode = await runCommand('bun run scripts/system-check.ts', env)

View File

@@ -2,6 +2,11 @@
import { existsSync, mkdirSync, writeFileSync } from 'node:fs'
import { dirname, join, resolve } from 'node:path'
import { spawnSync } from 'node:child_process'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
isLocalProviderUrl as isProviderLocalUrl,
} from '../src/services/api/providerConfig.js'
type CheckResult = {
ok: boolean
@@ -84,12 +89,7 @@ function checkBuildArtifacts(): CheckResult {
}
function isLocalBaseUrl(baseUrl: string): boolean {
try {
const url = new URL(baseUrl)
return url.hostname === 'localhost' || url.hostname === '127.0.0.1' || url.hostname === '::1'
} catch {
return false
}
return isProviderLocalUrl(baseUrl)
}
function currentBaseUrl(): string {
@@ -105,23 +105,50 @@ function checkOpenAIEnv(): CheckResult[] {
return results
}
const baseUrl = process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
const model = process.env.OPENAI_MODEL
const key = process.env.OPENAI_API_KEY
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
results.push(pass('Provider mode', 'OpenAI-compatible provider enabled.'))
results.push(
pass(
'Provider mode',
request.transport === 'codex_responses'
? 'Codex responses backend enabled.'
: 'OpenAI-compatible provider enabled.',
),
)
if (!model) {
if (!process.env.OPENAI_MODEL) {
results.push(pass('OPENAI_MODEL', 'Not set. Runtime fallback model will be used.'))
} else {
results.push(pass('OPENAI_MODEL', model))
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
}
results.push(pass('OPENAI_BASE_URL', baseUrl))
results.push(pass('OPENAI_BASE_URL', request.baseUrl))
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env)
if (!credentials.apiKey) {
const authHint = credentials.authPath
? `Missing CODEX_API_KEY and no usable auth.json at ${credentials.authPath}.`
: 'Missing CODEX_API_KEY and auth.json fallback.'
results.push(fail('CODEX auth', authHint))
} else if (!credentials.accountId) {
results.push(fail('CHATGPT_ACCOUNT_ID', 'Missing chatgpt_account_id in Codex auth.'))
} else {
const detail = credentials.source === 'env'
? 'Using CODEX_API_KEY.'
: `Using ${credentials.authPath}.`
results.push(pass('CODEX auth', detail))
}
return results
}
const key = process.env.OPENAI_API_KEY
if (key === 'SUA_CHAVE') {
results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.'))
} else if (!key && !isLocalBaseUrl(baseUrl)) {
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
} else if (!key) {
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
@@ -137,22 +164,53 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
}
const baseUrl = currentBaseUrl()
const key = process.env.OPENAI_API_KEY
const endpoint = `${baseUrl.replace(/\/$/, '')}/models`
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
const endpoint = request.transport === 'codex_responses'
? `${request.baseUrl}/responses`
: `${request.baseUrl}/models`
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 4000)
try {
const headers: Record<string, string> = {}
if (key) {
headers.Authorization = `Bearer ${key}`
let method = 'GET'
let body: string | undefined
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials(process.env)
if (credentials.apiKey) {
headers.Authorization = `Bearer ${credentials.apiKey}`
}
if (credentials.accountId) {
headers['chatgpt-account-id'] = credentials.accountId
}
headers['Content-Type'] = 'application/json'
method = 'POST'
body = JSON.stringify({
model: request.resolvedModel,
instructions: 'Runtime doctor probe.',
input: [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'ping' }],
},
],
store: false,
stream: true,
})
} else if (process.env.OPENAI_API_KEY) {
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
}
const response = await fetch(endpoint, {
method: 'GET',
method,
headers,
body,
signal: controller.signal,
})
@@ -209,11 +267,16 @@ function checkOllamaProcessorMode(): CheckResult {
}
function serializeSafeEnvSummary(): Record<string, string | boolean> {
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
return {
CLAUDE_CODE_USE_OPENAI: isTruthy(process.env.CLAUDE_CODE_USE_OPENAI),
OPENAI_MODEL: process.env.OPENAI_MODEL ?? '(unset)',
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1',
OPENAI_BASE_URL: request.baseUrl,
OPENAI_API_KEY_SET: Boolean(process.env.OPENAI_API_KEY),
CODEX_API_KEY_SET: Boolean(resolveCodexApiCredentials(process.env).apiKey),
}
}

View File

@@ -1,4 +1,8 @@
import { feature } from 'bun:bundle';
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../services/api/providerConfig.js'
// Bugfix for corepack auto-pinning, which adds yarnpkg to peoples' package.jsons
// eslint-disable-next-line custom-rules/no-top-level-side-effects
@@ -46,15 +50,33 @@ function validateProviderEnvOrExit(): void {
return
}
const apiKey = process.env.OPENAI_API_KEY
const baseUrl = process.env.OPENAI_BASE_URL
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
})
if (apiKey === 'SUA_CHAVE') {
if (process.env.OPENAI_API_KEY === 'SUA_CHAVE') {
console.error('Invalid OPENAI_API_KEY: placeholder value SUA_CHAVE detected. Set a real key or unset for local providers.')
process.exit(1)
}
if (!apiKey && !isLocalProviderUrl(baseUrl)) {
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials()
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or put auth.json at ${credentials.authPath}`
: ''
console.error(`Codex auth is required for ${request.requestedModel}. Set CODEX_API_KEY${authHint}.`)
process.exit(1)
}
if (!credentials.accountId) {
console.error('Codex auth is missing chatgpt_account_id. Re-login with Codex or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.')
process.exit(1)
}
return
}
if (!process.env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) {
console.error('OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.')
process.exit(1)
}

View File

@@ -0,0 +1,172 @@
import { afterEach, describe, expect, test } from 'bun:test'
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'
import { join } from 'node:path'
import { tmpdir } from 'node:os'
import {
codexStreamToAnthropic,
convertAnthropicMessagesToResponsesInput,
convertCodexResponseToAnthropicMessage,
} from './codexShim.js'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from './providerConfig.js'
const tempDirs: string[] = []
afterEach(() => {
while (tempDirs.length > 0) {
const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true })
}
})
function createTempAuthJson(payload: Record<string, unknown>): string {
const dir = mkdtempSync(join(tmpdir(), 'openclaude-codex-'))
tempDirs.push(dir)
const authPath = join(dir, 'auth.json')
writeFileSync(authPath, JSON.stringify(payload), 'utf8')
return authPath
}
async function collectStreamEventTypes(responseText: string): Promise<string[]> {
const stream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(responseText))
controller.close()
},
})
const events: string[] = []
for await (const event of codexStreamToAnthropic(new Response(stream), 'gpt-5.4')) {
events.push(event.type)
}
return events
}
describe('Codex provider config', () => {
test('resolves codexplan alias to Codex transport with reasoning', () => {
const resolved = resolveProviderRequest({ model: 'codexplan' })
expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' })
})
test('loads Codex credentials from auth.json fallback', () => {
const authPath = createTempAuthJson({
tokens: {
access_token: 'header.payload.signature',
account_id: 'acct_test',
},
})
const credentials = resolveCodexApiCredentials({
CODEX_AUTH_JSON_PATH: authPath,
} as NodeJS.ProcessEnv)
expect(credentials.apiKey).toBe('header.payload.signature')
expect(credentials.accountId).toBe('acct_test')
expect(credentials.source).toBe('auth.json')
})
})
describe('Codex request translation', () => {
test('converts assistant tool use and user tool result into Responses items', () => {
const items = convertAnthropicMessagesToResponsesInput([
{
role: 'assistant',
content: [
{ type: 'text', text: 'Working...' },
{ type: 'tool_use', id: 'call_123', name: 'search', input: { q: 'x' } },
],
},
{
role: 'user',
content: [
{ type: 'tool_result', tool_use_id: 'call_123', content: 'done' },
],
},
])
expect(items).toEqual([
{
type: 'message',
role: 'assistant',
content: [{ type: 'output_text', text: 'Working...' }],
},
{
type: 'function_call',
id: 'fc_123',
call_id: 'call_123',
name: 'search',
arguments: '{"q":"x"}',
},
{
type: 'function_call_output',
call_id: 'call_123',
output: 'done',
},
])
})
test('converts completed Codex tool response into Anthropic message', () => {
const message = convertCodexResponseToAnthropicMessage(
{
id: 'resp_1',
model: 'gpt-5.3-codex-spark',
output: [
{
type: 'function_call',
id: 'fc_1',
call_id: 'call_1',
name: 'ping',
arguments: '{"value":"ping"}',
},
],
usage: { input_tokens: 12, output_tokens: 4 },
},
'gpt-5.3-codex-spark',
)
expect(message.stop_reason).toBe('tool_use')
expect(message.content).toEqual([
{
type: 'tool_use',
id: 'call_1',
name: 'ping',
input: { value: 'ping' },
},
])
})
test('translates Codex SSE text stream into Anthropic events', async () => {
const responseText = [
'event: response.output_item.added',
'data: {"type":"response.output_item.added","item":{"id":"msg_1","type":"message","status":"in_progress","content":[],"role":"assistant"},"output_index":0,"sequence_number":0}',
'',
'event: response.content_part.added',
'data: {"type":"response.content_part.added","content_index":0,"item_id":"msg_1","output_index":0,"part":{"type":"output_text","text":""},"sequence_number":1}',
'',
'event: response.output_text.delta',
'data: {"type":"response.output_text.delta","content_index":0,"delta":"ok","item_id":"msg_1","output_index":0,"sequence_number":2}',
'',
'event: response.output_item.done',
'data: {"type":"response.output_item.done","item":{"id":"msg_1","type":"message","status":"completed","content":[{"type":"output_text","text":"ok"}],"role":"assistant"},"output_index":0,"sequence_number":3}',
'',
'event: response.completed',
'data: {"type":"response.completed","response":{"id":"resp_1","status":"completed","model":"gpt-5.4","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"ok"}]}],"usage":{"input_tokens":2,"output_tokens":1}},"sequence_number":4}',
'',
].join('\n')
const eventTypes = await collectStreamEventTypes(responseText)
expect(eventTypes).toEqual([
'message_start',
'content_block_start',
'content_block_delta',
'content_block_stop',
'message_delta',
'message_stop',
])
})
})

View File

@@ -0,0 +1,740 @@
import type {
ResolvedCodexCredentials,
ResolvedProviderRequest,
} from './providerConfig.js'
export interface AnthropicUsage {
input_tokens: number
output_tokens: number
cache_creation_input_tokens: number
cache_read_input_tokens: number
}
export interface AnthropicStreamEvent {
type: string
message?: Record<string, unknown>
index?: number
content_block?: Record<string, unknown>
delta?: Record<string, unknown>
usage?: Partial<AnthropicUsage>
}
export interface ShimCreateParams {
model: string
messages: Array<Record<string, unknown>>
system?: unknown
tools?: Array<Record<string, unknown>>
max_tokens: number
stream?: boolean
temperature?: number
top_p?: number
tool_choice?: unknown
metadata?: unknown
[key: string]: unknown
}
type ResponsesInputPart =
| { type: 'input_text'; text: string }
| { type: 'output_text'; text: string }
| { type: 'input_image'; image_url: string }
type ResponsesInputItem =
| {
type: 'message'
role: 'user' | 'assistant'
content: ResponsesInputPart[]
}
| {
type: 'function_call'
id: string
call_id: string
name: string
arguments: string
}
| {
type: 'function_call_output'
call_id: string
output: string
}
type ResponsesTool = {
type: 'function'
name: string
description: string
parameters: Record<string, unknown>
strict?: boolean
}
type CodexSseEvent = {
event: string
data: Record<string, any>
}
function makeUsage(usage?: {
input_tokens?: number
output_tokens?: number
}): AnthropicUsage {
return {
input_tokens: usage?.input_tokens ?? 0,
output_tokens: usage?.output_tokens ?? 0,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
}
}
function makeMessageId(): string {
return `msg_${Math.random().toString(36).slice(2)}${Date.now().toString(36)}`
}
function normalizeToolUseId(toolUseId: string | undefined): {
id: string
callId: string
} {
const value = (toolUseId || '').trim()
if (!value) {
return {
id: 'fc_unknown',
callId: 'call_unknown',
}
}
if (value.startsWith('call_')) {
return {
id: `fc_${value.slice('call_'.length)}`,
callId: value,
}
}
if (value.startsWith('fc_')) {
return {
id: value,
callId: `call_${value.slice('fc_'.length)}`,
}
}
return {
id: `fc_${value}`,
callId: value,
}
}
function convertSystemPrompt(system: unknown): string {
if (!system) return ''
if (typeof system === 'string') return system
if (Array.isArray(system)) {
return system
.map((block: { type?: string; text?: string }) =>
block.type === 'text' ? (block.text ?? '') : '',
)
.join('\n\n')
}
return String(system)
}
function convertToolResultToText(content: unknown): string {
if (typeof content === 'string') return content
if (!Array.isArray(content)) return JSON.stringify(content ?? '')
const chunks: string[] = []
for (const block of content) {
if (block?.type === 'text' && typeof block.text === 'string') {
chunks.push(block.text)
continue
}
if (block?.type === 'image') {
const src = block.source
if (src?.type === 'url' && src.url) {
chunks.push(`[Image](${src.url})`)
}
continue
}
if (typeof block?.text === 'string') {
chunks.push(block.text)
}
}
return chunks.join('\n')
}
function convertContentBlocksToResponsesParts(
content: unknown,
role: 'user' | 'assistant',
): ResponsesInputPart[] {
const textType = role === 'assistant' ? 'output_text' : 'input_text'
if (typeof content === 'string') {
return [{ type: textType, text: content }]
}
if (!Array.isArray(content)) {
return [{ type: textType, text: String(content ?? '') }]
}
const parts: ResponsesInputPart[] = []
for (const block of content) {
switch (block?.type) {
case 'text':
parts.push({ type: textType, text: block.text ?? '' })
break
case 'image': {
if (role === 'assistant') break
const source = block.source
if (source?.type === 'base64') {
parts.push({
type: 'input_image',
image_url: `data:${source.media_type};base64,${source.data}`,
})
} else if (source?.type === 'url' && source.url) {
parts.push({
type: 'input_image',
image_url: source.url,
})
}
break
}
case 'thinking':
if (block.thinking) {
parts.push({
type: textType,
text: `<thinking>${block.thinking}</thinking>`,
})
}
break
case 'tool_use':
case 'tool_result':
break
default:
if (typeof block?.text === 'string') {
parts.push({ type: textType, text: block.text })
}
}
}
return parts
}
export function convertAnthropicMessagesToResponsesInput(
messages: Array<{ role?: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
): ResponsesInputItem[] {
const items: ResponsesInputItem[] = []
for (const message of messages) {
const inner = message.message ?? message
const role = (inner as { role?: string }).role ?? message.role
const content = (inner as { content?: unknown }).content
if (role === 'user') {
if (Array.isArray(content)) {
const toolResults = content.filter(
(block: { type?: string }) => block.type === 'tool_result',
)
const otherContent = content.filter(
(block: { type?: string }) => block.type !== 'tool_result',
)
for (const toolResult of toolResults) {
const { callId } = normalizeToolUseId(toolResult.tool_use_id)
items.push({
type: 'function_call_output',
call_id: callId,
output: convertToolResultToText(toolResult.content),
})
}
const parts = convertContentBlocksToResponsesParts(otherContent, 'user')
if (parts.length > 0) {
items.push({
type: 'message',
role: 'user',
content: parts,
})
}
continue
}
items.push({
type: 'message',
role: 'user',
content: convertContentBlocksToResponsesParts(content, 'user'),
})
continue
}
if (role === 'assistant') {
const textBlocks = Array.isArray(content)
? content.filter((block: { type?: string }) => block.type !== 'tool_use')
: content
const parts = convertContentBlocksToResponsesParts(textBlocks, 'assistant')
if (parts.length > 0) {
items.push({
type: 'message',
role: 'assistant',
content: parts,
})
}
if (Array.isArray(content)) {
for (const toolUse of content.filter(
(block: { type?: string }) => block.type === 'tool_use',
)) {
const normalized = normalizeToolUseId(toolUse.id)
items.push({
type: 'function_call',
id: normalized.id,
call_id: normalized.callId,
name: toolUse.name ?? 'tool',
arguments:
typeof toolUse.input === 'string'
? toolUse.input
: JSON.stringify(toolUse.input ?? {}),
})
}
}
}
}
return items.filter(item =>
item.type !== 'message' || item.content.length > 0,
)
}
export function convertToolsToResponsesTools(
tools: Array<{ name?: string; description?: string; input_schema?: Record<string, unknown> }>,
): ResponsesTool[] {
return tools
.filter(tool => tool.name && tool.name !== 'ToolSearchTool')
.map(tool => ({
type: 'function',
name: tool.name ?? 'tool',
description: tool.description ?? '',
parameters: tool.input_schema ?? { type: 'object', properties: {} },
strict: true,
}))
}
function convertToolChoice(toolChoice: unknown): unknown {
const choice = toolChoice as { type?: string; name?: string } | undefined
if (!choice?.type) return undefined
if (choice.type === 'auto') return 'auto'
if (choice.type === 'any') return 'required'
if (choice.type === 'tool' && choice.name) {
return {
type: 'function',
name: choice.name,
}
}
return undefined
}
export async function performCodexRequest(options: {
request: ResolvedProviderRequest
credentials: ResolvedCodexCredentials
params: ShimCreateParams
defaultHeaders: Record<string, string>
signal?: AbortSignal
}): Promise<Response> {
const input = convertAnthropicMessagesToResponsesInput(
options.params.messages as Array<{
role?: string
message?: { role?: string; content?: unknown }
content?: unknown
}>,
)
const body: Record<string, unknown> = {
model: options.request.resolvedModel,
input: input.length > 0
? input
: [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: '' }],
},
],
store: false,
stream: true,
}
const instructions = convertSystemPrompt(options.params.system)
if (instructions) {
body.instructions = instructions
}
const toolChoice = convertToolChoice(options.params.tool_choice)
if (toolChoice) {
body.tool_choice = toolChoice
}
if (options.params.tools && options.params.tools.length > 0) {
const convertedTools = convertToolsToResponsesTools(
options.params.tools as Array<{
name?: string
description?: string
input_schema?: Record<string, unknown>
}>,
)
if (convertedTools.length > 0) {
body.tools = convertedTools
body.parallel_tool_calls = true
body.tool_choice ??= 'auto'
}
}
if (options.request.reasoning) {
body.reasoning = options.request.reasoning
}
if (options.params.temperature !== undefined) {
body.temperature = options.params.temperature
}
if (options.params.top_p !== undefined) {
body.top_p = options.params.top_p
}
const headers: Record<string, string> = {
'Content-Type': 'application/json',
...options.defaultHeaders,
Authorization: `Bearer ${options.credentials.apiKey}`,
}
if (options.credentials.accountId) {
headers['chatgpt-account-id'] = options.credentials.accountId
}
headers.originator ??= 'openclaude'
const response = await fetch(`${options.request.baseUrl}/responses`, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: options.signal,
})
if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error')
throw new Error(`Codex API error ${response.status}: ${errorBody}`)
}
return response
}
async function* readSseEvents(response: Response): AsyncGenerator<CodexSseEvent> {
const reader = response.body?.getReader()
if (!reader) return
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const chunks = buffer.split('\n\n')
buffer = chunks.pop() ?? ''
for (const chunk of chunks) {
const lines = chunk
.split('\n')
.map(line => line.trim())
.filter(Boolean)
if (lines.length === 0) continue
const eventLine = lines.find(line => line.startsWith('event: '))
const dataLines = lines.filter(line => line.startsWith('data: '))
if (!eventLine || dataLines.length === 0) continue
const event = eventLine.slice(7).trim()
const rawData = dataLines.map(line => line.slice(6)).join('\n')
if (rawData === '[DONE]') continue
let data: Record<string, any>
try {
const parsed = JSON.parse(rawData)
if (!parsed || typeof parsed !== 'object') continue
data = parsed as Record<string, any>
} catch {
continue
}
yield { event, data }
}
}
}
function determineStopReason(
response: Record<string, any> | undefined,
sawToolUse: boolean,
): 'end_turn' | 'tool_use' | 'max_tokens' {
const output = Array.isArray(response?.output) ? response.output : []
if (
sawToolUse ||
output.some((item: { type?: string }) => item?.type === 'function_call')
) {
return 'tool_use'
}
const incompleteReason = response?.incomplete_details?.reason
if (
typeof incompleteReason === 'string' &&
incompleteReason.includes('max_output_tokens')
) {
return 'max_tokens'
}
return 'end_turn'
}
export async function collectCodexCompletedResponse(
response: Response,
): Promise<Record<string, any>> {
let completedResponse: Record<string, any> | undefined
for await (const event of readSseEvents(response)) {
if (event.event === 'response.failed') {
throw new Error(
event.data?.response?.error?.message ??
event.data?.error?.message ??
'Codex response failed',
)
}
if (
event.event === 'response.completed' ||
event.event === 'response.incomplete'
) {
completedResponse = event.data?.response
break
}
}
if (!completedResponse) {
throw new Error('Codex response ended without a completed payload')
}
return completedResponse
}
export async function* codexStreamToAnthropic(
response: Response,
model: string,
): AsyncGenerator<AnthropicStreamEvent> {
const messageId = makeMessageId()
const toolBlocksByItemId = new Map<
string,
{ index: number; toolUseId: string }
>()
let activeTextBlockIndex: number | null = null
let nextContentBlockIndex = 0
let sawToolUse = false
let finalResponse: Record<string, any> | undefined
const closeActiveTextBlock = async function* () {
if (activeTextBlockIndex === null) return
yield {
type: 'content_block_stop',
index: activeTextBlockIndex,
}
activeTextBlockIndex = null
}
const startTextBlockIfNeeded = async function* () {
if (activeTextBlockIndex !== null) return
activeTextBlockIndex = nextContentBlockIndex++
yield {
type: 'content_block_start',
index: activeTextBlockIndex,
content_block: { type: 'text', text: '' },
}
}
yield {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model,
stop_reason: null,
stop_sequence: null,
usage: makeUsage(),
},
}
for await (const event of readSseEvents(response)) {
const payload = event.data
if (event.event === 'response.output_item.added') {
const item = payload.item
if (item?.type === 'function_call') {
yield* closeActiveTextBlock()
const blockIndex = nextContentBlockIndex++
const toolUseId = item.call_id ?? item.id ?? `call_${blockIndex}`
toolBlocksByItemId.set(String(item.id ?? toolUseId), {
index: blockIndex,
toolUseId,
})
sawToolUse = true
yield {
type: 'content_block_start',
index: blockIndex,
content_block: {
type: 'tool_use',
id: toolUseId,
name: item.name ?? 'tool',
input: {},
},
}
if (item.arguments) {
yield {
type: 'content_block_delta',
index: blockIndex,
delta: {
type: 'input_json_delta',
partial_json: item.arguments,
},
}
}
}
continue
}
if (event.event === 'response.content_part.added') {
if (payload.part?.type === 'output_text') {
yield* startTextBlockIfNeeded()
}
continue
}
if (event.event === 'response.output_text.delta') {
yield* startTextBlockIfNeeded()
if (activeTextBlockIndex !== null) {
yield {
type: 'content_block_delta',
index: activeTextBlockIndex,
delta: {
type: 'text_delta',
text: payload.delta ?? '',
},
}
}
continue
}
if (event.event === 'response.function_call_arguments.delta') {
const toolBlock = toolBlocksByItemId.get(String(payload.item_id ?? ''))
if (toolBlock) {
yield {
type: 'content_block_delta',
index: toolBlock.index,
delta: {
type: 'input_json_delta',
partial_json: payload.delta ?? '',
},
}
}
continue
}
if (event.event === 'response.output_item.done') {
const item = payload.item
if (item?.type === 'function_call') {
const toolBlock = toolBlocksByItemId.get(String(item.id ?? ''))
if (toolBlock) {
yield {
type: 'content_block_stop',
index: toolBlock.index,
}
toolBlocksByItemId.delete(String(item.id))
}
} else if (item?.type === 'message') {
yield* closeActiveTextBlock()
}
continue
}
if (
event.event === 'response.completed' ||
event.event === 'response.incomplete'
) {
finalResponse = payload.response
break
}
if (event.event === 'response.failed') {
throw new Error(
payload?.response?.error?.message ??
payload?.error?.message ??
'Codex response failed',
)
}
}
yield* closeActiveTextBlock()
for (const toolBlock of toolBlocksByItemId.values()) {
yield {
type: 'content_block_stop',
index: toolBlock.index,
}
}
yield {
type: 'message_delta',
delta: {
stop_reason: determineStopReason(finalResponse, sawToolUse),
stop_sequence: null,
},
usage: {
input_tokens: finalResponse?.usage?.input_tokens ?? 0,
output_tokens: finalResponse?.usage?.output_tokens ?? 0,
},
}
yield { type: 'message_stop' }
}
export function convertCodexResponseToAnthropicMessage(
data: Record<string, any>,
model: string,
): Record<string, unknown> {
const content: Array<Record<string, unknown>> = []
const output = Array.isArray(data.output) ? data.output : []
for (const item of output) {
if (item?.type === 'message' && Array.isArray(item.content)) {
for (const part of item.content) {
if (part?.type === 'output_text') {
content.push({
type: 'text',
text: part.text ?? '',
})
}
}
continue
}
if (item?.type === 'function_call') {
let input: unknown
try {
input = JSON.parse(item.arguments ?? '{}')
} catch {
input = { raw: item.arguments ?? '' }
}
content.push({
type: 'tool_use',
id: item.call_id ?? item.id ?? makeMessageId(),
name: item.name ?? 'tool',
input,
})
}
}
return {
id: data.id ?? makeMessageId(),
type: 'message',
role: 'assistant',
content,
model: data.model ?? model,
stop_reason: determineStopReason(data, content.some(item => item.type === 'tool_use')),
stop_sequence: null,
usage: makeUsage(data.usage),
}
}

View File

@@ -13,28 +13,26 @@
* OPENAI_API_KEY=sk-... — API key (optional for local models)
* OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1)
* OPENAI_MODEL=gpt-4o — default model override
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
*/
import {
codexStreamToAnthropic,
collectCodexCompletedResponse,
convertCodexResponseToAnthropicMessage,
performCodexRequest,
type AnthropicStreamEvent,
type ShimCreateParams,
} from './codexShim.js'
import {
resolveCodexApiCredentials,
resolveProviderRequest,
} from './providerConfig.js'
// ---------------------------------------------------------------------------
// Types — minimal subset of Anthropic SDK types we need to produce
// ---------------------------------------------------------------------------
interface AnthropicUsage {
input_tokens: number
output_tokens: number
cache_creation_input_tokens: number
cache_read_input_tokens: number
}
interface AnthropicStreamEvent {
type: string
message?: Record<string, unknown>
index?: number
content_block?: Record<string, unknown>
delta?: Record<string, unknown>
usage?: Partial<AnthropicUsage>
}
// ---------------------------------------------------------------------------
// Message format conversion: Anthropic → OpenAI
// ---------------------------------------------------------------------------
@@ -447,20 +445,6 @@ async function* openaiStreamToAnthropic(
// The shim client — duck-types as Anthropic SDK
// ---------------------------------------------------------------------------
interface ShimCreateParams {
model: string
messages: Array<Record<string, unknown>>
system?: unknown
tools?: Array<Record<string, unknown>>
max_tokens: number
stream?: boolean
temperature?: number
top_p?: number
tool_choice?: unknown
metadata?: unknown
[key: string]: unknown
}
class OpenAIShimStream {
private generator: AsyncGenerator<AnthropicStreamEvent>
// The controller property is checked by claude.ts to distinguish streams from error messages
@@ -476,17 +460,9 @@ class OpenAIShimStream {
}
class OpenAIShimMessages {
private baseUrl: string
private apiKey: string
private defaultHeaders: Record<string, string>
constructor(
baseUrl: string,
apiKey: string,
defaultHeaders: Record<string, string>,
) {
this.baseUrl = baseUrl
this.apiKey = apiKey
constructor(defaultHeaders: Record<string, string>) {
this.defaultHeaders = defaultHeaders
}
@@ -496,20 +472,30 @@ class OpenAIShimMessages {
) {
const self = this
// Return a thenable that also has .withResponse()
const promise = (async () => {
const response = await self._doRequest(params, options)
const request = resolveProviderRequest({ model: params.model })
const response = await self._doRequest(request, params, options)
if (params.stream) {
return new OpenAIShimStream(
openaiStreamToAnthropic(response, params.model),
request.transport === 'codex_responses'
? codexStreamToAnthropic(response, request.resolvedModel)
: openaiStreamToAnthropic(response, request.resolvedModel),
)
}
// Non-streaming: parse the full response and convert
if (request.transport === 'codex_responses') {
const data = await collectCodexCompletedResponse(response)
return convertCodexResponseToAnthropicMessage(
data,
request.resolvedModel,
)
}
const data = await response.json()
return self._convertNonStreamingResponse(data, params.model)
return self._convertNonStreamingResponse(data, request.resolvedModel)
})()
// Add .withResponse() for streaming path (claude.ts uses this)
;(promise as unknown as Record<string, unknown>).withResponse =
async () => {
const data = await promise
@@ -524,6 +510,43 @@ class OpenAIShimMessages {
}
private async _doRequest(
request: ReturnType<typeof resolveProviderRequest>,
params: ShimCreateParams,
options?: { signal?: AbortSignal; headers?: Record<string, string> },
): Promise<Response> {
if (request.transport === 'codex_responses') {
const credentials = resolveCodexApiCredentials()
if (!credentials.apiKey) {
const authHint = credentials.authPath
? ` or place a Codex auth.json at ${credentials.authPath}`
: ''
throw new Error(
`Codex auth is required for ${request.requestedModel}. Set CODEX_API_KEY${authHint}.`,
)
}
if (!credentials.accountId) {
throw new Error(
'Codex auth is missing chatgpt_account_id. Re-login with the Codex CLI or set CHATGPT_ACCOUNT_ID/CODEX_ACCOUNT_ID.',
)
}
return performCodexRequest({
request,
credentials,
params,
defaultHeaders: {
...this.defaultHeaders,
...(options?.headers ?? {}),
},
signal: options?.signal,
})
}
return this._doOpenAIRequest(request, params, options)
}
private async _doOpenAIRequest(
request: ReturnType<typeof resolveProviderRequest>,
params: ShimCreateParams,
options?: { signal?: AbortSignal; headers?: Record<string, string> },
): Promise<Response> {
@@ -537,7 +560,7 @@ class OpenAIShimMessages {
)
const body: Record<string, unknown> = {
model: params.model,
model: request.resolvedModel,
messages: openaiMessages,
max_tokens: params.max_tokens,
stream: params.stream ?? false,
@@ -550,7 +573,6 @@ class OpenAIShimMessages {
if (params.temperature !== undefined) body.temperature = params.temperature
if (params.top_p !== undefined) body.top_p = params.top_p
// Convert tools
if (params.tools && params.tools.length > 0) {
const converted = convertTools(
params.tools as Array<{
@@ -561,7 +583,6 @@ class OpenAIShimMessages {
)
if (converted.length > 0) {
body.tools = converted
// Convert tool_choice
if (params.tool_choice) {
const tc = params.tool_choice as { type?: string; name?: string }
if (tc.type === 'auto') {
@@ -578,18 +599,18 @@ class OpenAIShimMessages {
}
}
const url = `${this.baseUrl}/chat/completions`
const headers: Record<string, string> = {
'Content-Type': 'application/json',
...this.defaultHeaders,
...(options?.headers ?? {}),
}
if (this.apiKey) {
headers['Authorization'] = `Bearer ${this.apiKey}`
const apiKey = process.env.OPENAI_API_KEY ?? ''
if (apiKey) {
headers.Authorization = `Bearer ${apiKey}`
}
const response = await fetch(url, {
const response = await fetch(`${request.baseUrl}/chat/completions`, {
method: 'POST',
headers,
body: JSON.stringify(body),
@@ -598,9 +619,7 @@ class OpenAIShimMessages {
if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error')
throw new Error(
`OpenAI API error ${response.status}: ${errorBody}`,
)
throw new Error(`OpenAI API error ${response.status}: ${errorBody}`)
}
return response
@@ -680,45 +699,22 @@ class OpenAIShimMessages {
class OpenAIShimBeta {
messages: OpenAIShimMessages
constructor(
baseUrl: string,
apiKey: string,
defaultHeaders: Record<string, string>,
) {
this.messages = new OpenAIShimMessages(baseUrl, apiKey, defaultHeaders)
constructor(defaultHeaders: Record<string, string>) {
this.messages = new OpenAIShimMessages(defaultHeaders)
}
}
/**
* Creates an Anthropic SDK-compatible client that routes requests
* to an OpenAI-compatible API endpoint.
*
* Usage:
* CLAUDE_CODE_USE_OPENAI=1 OPENAI_API_KEY=sk-... OPENAI_MODEL=gpt-4o
*/
export function createOpenAIShimClient(options: {
defaultHeaders?: Record<string, string>
maxRetries?: number
timeout?: number
}): unknown {
const baseUrl = (
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
'https://api.openai.com/v1'
).replace(/\/+$/, '')
const apiKey = process.env.OPENAI_API_KEY ?? ''
const headers = {
const beta = new OpenAIShimBeta({
...(options.defaultHeaders ?? {}),
}
})
const beta = new OpenAIShimBeta(baseUrl, apiKey, headers)
// Duck-type as Anthropic client
return {
beta,
// Some code paths access .messages directly (non-beta)
messages: beta.messages,
}
}

View File

@@ -0,0 +1,313 @@
import { existsSync, readFileSync } from 'node:fs'
import { homedir } from 'node:os'
import { join } from 'node:path'
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
const CODEX_ALIAS_MODELS: Record<
string,
{
model: string
reasoningEffort?: ReasoningEffort
}
> = {
codexplan: {
model: 'gpt-5.4',
reasoningEffort: 'high',
},
codexspark: {
model: 'gpt-5.3-codex-spark',
},
} as const
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
type ReasoningEffort = 'low' | 'medium' | 'high'
export type ProviderTransport = 'chat_completions' | 'codex_responses'
export type ResolvedProviderRequest = {
transport: ProviderTransport
requestedModel: string
resolvedModel: string
baseUrl: string
reasoning?: {
effort: ReasoningEffort
}
}
export type ResolvedCodexCredentials = {
apiKey: string
accountId?: string
authPath?: string
source: 'env' | 'auth.json' | 'none'
}
type ModelDescriptor = {
raw: string
baseModel: string
reasoning?: {
effort: ReasoningEffort
}
}
const LOCALHOST_HOSTNAMES = new Set(['localhost', '127.0.0.1', '::1'])
function asTrimmedString(value: unknown): string | undefined {
return typeof value === 'string' && value.trim() ? value.trim() : undefined
}
function readNestedString(
value: unknown,
paths: string[][],
): string | undefined {
for (const path of paths) {
let current = value
let valid = true
for (const key of path) {
if (!current || typeof current !== 'object' || !(key in current)) {
valid = false
break
}
current = (current as Record<string, unknown>)[key]
}
if (!valid) continue
const stringValue = asTrimmedString(current)
if (stringValue) return stringValue
}
return undefined
}
function decodeJwtPayload(token: string): Record<string, unknown> | undefined {
const parts = token.split('.')
if (parts.length < 2) return undefined
try {
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
const json = Buffer.from(padded, 'base64').toString('utf8')
const parsed = JSON.parse(json)
return parsed && typeof parsed === 'object'
? (parsed as Record<string, unknown>)
: undefined
} catch {
return undefined
}
}
function parseReasoningEffort(value: string | undefined): ReasoningEffort | undefined {
if (!value) return undefined
const normalized = value.trim().toLowerCase()
if (normalized === 'low' || normalized === 'medium' || normalized === 'high') {
return normalized
}
return undefined
}
function parseModelDescriptor(model: string): ModelDescriptor {
const trimmed = model.trim()
const queryIndex = trimmed.indexOf('?')
if (queryIndex === -1) {
const alias = trimmed.toLowerCase() as CodexAlias
const aliasConfig = CODEX_ALIAS_MODELS[alias]
if (aliasConfig) {
return {
raw: trimmed,
baseModel: aliasConfig.model,
reasoning: aliasConfig.reasoningEffort
? { effort: aliasConfig.reasoningEffort }
: undefined,
}
}
return {
raw: trimmed,
baseModel: trimmed,
}
}
const baseModel = trimmed.slice(0, queryIndex).trim()
const params = new URLSearchParams(trimmed.slice(queryIndex + 1))
const alias = baseModel.toLowerCase() as CodexAlias
const aliasConfig = CODEX_ALIAS_MODELS[alias]
const resolvedBaseModel = aliasConfig?.model ?? baseModel
const reasoning =
parseReasoningEffort(params.get('reasoning') ?? undefined) ??
(aliasConfig?.reasoningEffort
? { effort: aliasConfig.reasoningEffort }
: undefined)
return {
raw: trimmed,
baseModel: resolvedBaseModel,
reasoning: typeof reasoning === 'string' ? { effort: reasoning } : reasoning,
}
}
function isCodexAlias(model: string): boolean {
const normalized = model.trim().toLowerCase()
const base = normalized.split('?', 1)[0] ?? normalized
return base in CODEX_ALIAS_MODELS
}
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
return LOCALHOST_HOSTNAMES.has(new URL(baseUrl).hostname)
} catch {
return false
}
}
export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
const parsed = new URL(baseUrl)
return (
parsed.hostname === 'chatgpt.com' &&
parsed.pathname.replace(/\/+$/, '') === '/backend-api/codex'
)
} catch {
return false
}
}
export function resolveProviderRequest(options?: {
model?: string
baseUrl?: string
fallbackModel?: string
}): ResolvedProviderRequest {
const requestedModel =
options?.model?.trim() ||
process.env.OPENAI_MODEL?.trim() ||
options?.fallbackModel?.trim() ||
'gpt-4o'
const descriptor = parseModelDescriptor(requestedModel)
const rawBaseUrl =
options?.baseUrl ??
process.env.OPENAI_BASE_URL ??
process.env.OPENAI_API_BASE ??
undefined
const transport: ProviderTransport =
isCodexAlias(requestedModel) || isCodexBaseUrl(rawBaseUrl)
? 'codex_responses'
: 'chat_completions'
return {
transport,
requestedModel,
resolvedModel: descriptor.baseModel,
baseUrl:
(rawBaseUrl ??
(transport === 'codex_responses'
? DEFAULT_CODEX_BASE_URL
: DEFAULT_OPENAI_BASE_URL)
).replace(/\/+$/, ''),
reasoning: descriptor.reasoning,
}
}
export function resolveCodexAuthPath(
env: NodeJS.ProcessEnv = process.env,
): string {
const explicit = asTrimmedString(env.CODEX_AUTH_JSON_PATH)
if (explicit) return explicit
const codexHome = asTrimmedString(env.CODEX_HOME)
if (codexHome) return join(codexHome, 'auth.json')
return join(homedir(), '.codex', 'auth.json')
}
export function parseChatgptAccountId(
token: string | undefined,
): string | undefined {
if (!token) return undefined
const payload = decodeJwtPayload(token)
const fromClaim = asTrimmedString(
payload?.['https://api.openai.com/auth.chatgpt_account_id'],
)
if (fromClaim) return fromClaim
return asTrimmedString(payload?.chatgpt_account_id)
}
function loadCodexAuthJson(
authPath: string,
): Record<string, unknown> | undefined {
if (!existsSync(authPath)) return undefined
try {
const raw = readFileSync(authPath, 'utf8')
const parsed = JSON.parse(raw)
return parsed && typeof parsed === 'object'
? (parsed as Record<string, unknown>)
: undefined
} catch {
return undefined
}
}
export function resolveCodexApiCredentials(
env: NodeJS.ProcessEnv = process.env,
): ResolvedCodexCredentials {
const envApiKey = asTrimmedString(env.CODEX_API_KEY)
const envAccountId =
asTrimmedString(env.CODEX_ACCOUNT_ID) ??
asTrimmedString(env.CHATGPT_ACCOUNT_ID)
if (envApiKey) {
return {
apiKey: envApiKey,
accountId: envAccountId ?? parseChatgptAccountId(envApiKey),
source: 'env',
}
}
const authPath = resolveCodexAuthPath(env)
const authJson = loadCodexAuthJson(authPath)
if (!authJson) {
return {
apiKey: '',
authPath,
source: 'none',
}
}
const apiKey = readNestedString(authJson, [
['access_token'],
['accessToken'],
['tokens', 'access_token'],
['tokens', 'accessToken'],
['auth', 'access_token'],
['auth', 'accessToken'],
['token', 'access_token'],
['token', 'accessToken'],
['tokens', 'id_token'],
['tokens', 'idToken'],
])
const accountId =
envAccountId ??
readNestedString(authJson, [
['account_id'],
['accountId'],
['tokens', 'account_id'],
['tokens', 'accountId'],
['auth', 'account_id'],
['auth', 'accountId'],
]) ??
parseChatgptAccountId(apiKey)
if (!apiKey) {
return {
apiKey: '',
accountId,
authPath,
source: 'none',
}
}
return {
apiKey,
accountId,
authPath,
source: 'auth.json',
}
}

View File

@@ -6,6 +6,8 @@ export const MODEL_ALIASES = [
'sonnet[1m]',
'opus[1m]',
'opusplan',
'codexplan',
'codexspark',
] as const
export type ModelAlias = (typeof MODEL_ALIASES)[number]

View File

@@ -323,6 +323,12 @@ export function renderDefaultModelSetting(
if (setting === 'opusplan') {
return 'Opus 4.6 in plan mode, else Sonnet 4.6'
}
if (setting === 'codexplan') {
return 'Codex Plan (GPT-5.4 high reasoning)'
}
if (setting === 'codexspark') {
return 'Codex Spark (GPT-5.3 Codex Spark)'
}
return renderModelName(parseUserSpecifiedModel(setting))
}
@@ -357,6 +363,12 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
if (setting === 'opusplan') {
return 'Opus Plan'
}
if (setting === 'codexplan') {
return 'Codex Plan'
}
if (setting === 'codexspark') {
return 'Codex Spark'
}
if (isModelAlias(setting)) {
return capitalize(setting)
}
@@ -374,6 +386,10 @@ export function getPublicModelDisplayName(model: ModelName): string | null {
return null
}
switch (model) {
case 'gpt-5.4':
return 'GPT-5.4'
case 'gpt-5.3-codex-spark':
return 'GPT-5.3 Codex Spark'
case getModelStrings().opus46:
return 'Opus 4.6'
case getModelStrings().opus46 + '[1m]':
@@ -481,6 +497,10 @@ export function parseUserSpecifiedModel(
if (isModelAlias(modelString)) {
switch (modelString) {
case 'codexplan':
return modelInputTrimmed
case 'codexspark':
return modelInputTrimmed
case 'opusplan':
return getDefaultSonnetModel() + (has1mTag ? '[1m]' : '') // Sonnet is default, Opus in plan mode
case 'sonnet':

View File

@@ -266,6 +266,22 @@ function getOpusPlanOption(): ModelOption {
}
}
function getCodexPlanOption(): ModelOption {
return {
value: 'codexplan',
label: 'Codex Plan',
description: 'GPT-5.4 on the Codex backend with high reasoning',
}
}
function getCodexSparkOption(): ModelOption {
return {
value: 'codexspark',
label: 'Codex Spark',
description: 'GPT-5.3 Codex Spark on the Codex backend for fast tool loops',
}
}
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
function getModelOptionsBase(fastMode = false): ModelOption[] {
@@ -344,6 +360,10 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
// PAYG 3P: Default (Sonnet 4.5) + Sonnet (3P custom) or Sonnet 4.6/1M + Opus (3P custom) or Opus 4.1/Opus 4.6/Opus1M + Haiku + Opus 4.1
const payg3pOptions = [getDefaultOptionForUser(fastMode)]
if (getAPIProvider() === 'openai') {
payg3pOptions.push(getCodexPlanOption(), getCodexSparkOption())
}
const customSonnet = getCustomSonnetOption()
if (customSonnet !== undefined) {
payg3pOptions.push(customSonnet)
@@ -497,6 +517,10 @@ export function getModelOptions(fastMode = false): ModelOption[] {
return filterModelOptionsByAllowlist(options)
} else if (customModel === 'opusplan') {
return filterModelOptionsByAllowlist([...options, getOpusPlanOption()])
} else if (customModel === 'codexplan') {
return filterModelOptionsByAllowlist([...options, getCodexPlanOption()])
} else if (customModel === 'codexspark') {
return filterModelOptionsByAllowlist([...options, getCodexSparkOption()])
} else if (customModel === 'opus' && getAPIProvider() === 'firstParty') {
return filterModelOptionsByAllowlist([
...options,