Merge origin/main into provider-profile-recommendations

This commit is contained in:
Vasanthdev2004
2026-04-01 18:38:59 +05:30
16 changed files with 581 additions and 49 deletions

View File

@@ -9,14 +9,13 @@
import { existsSync } from 'fs' import { existsSync } from 'fs'
import { join, dirname } from 'path' import { join, dirname } from 'path'
import { fileURLToPath } from 'url' import { fileURLToPath, pathToFileURL } from 'url'
import { getDistImportSpecifier } from './import-specifier.mjs'
const __dirname = dirname(fileURLToPath(import.meta.url)) const __dirname = dirname(fileURLToPath(import.meta.url))
const distPath = join(__dirname, '..', 'dist', 'cli.mjs') const distPath = join(__dirname, '..', 'dist', 'cli.mjs')
if (existsSync(distPath)) { if (existsSync(distPath)) {
await import(getDistImportSpecifier(__dirname)) await import(pathToFileURL(distPath).href)
} else { } else {
console.error(` console.error(`
openclaude: dist/cli.mjs not found. openclaude: dist/cli.mjs not found.

View File

@@ -1,6 +1,6 @@
{ {
"name": "@gitlawb/openclaude", "name": "@gitlawb/openclaude",
"version": "0.1.2", "version": "0.1.4",
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models", "description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"type": "module", "type": "module",
"bin": { "bin": {
@@ -18,6 +18,7 @@
"dev:profile:fast": "bun run scripts/provider-launch.ts auto --fast --bare", "dev:profile:fast": "bun run scripts/provider-launch.ts auto --fast --bare",
"dev:codex": "bun run scripts/provider-launch.ts codex", "dev:codex": "bun run scripts/provider-launch.ts codex",
"dev:openai": "bun run scripts/provider-launch.ts openai", "dev:openai": "bun run scripts/provider-launch.ts openai",
"dev:gemini": "bun run scripts/provider-launch.ts gemini",
"dev:ollama": "bun run scripts/provider-launch.ts ollama", "dev:ollama": "bun run scripts/provider-launch.ts ollama",
"dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare", "dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare",
"profile:init": "bun run scripts/provider-bootstrap.ts", "profile:init": "bun run scripts/provider-bootstrap.ts",

View File

@@ -65,6 +65,39 @@ const result = await Bun.build({
{ {
name: 'bun-bundle-shim', name: 'bun-bundle-shim',
setup(build) { setup(build) {
const internalFeatureStubModules = new Map([
[
'../daemon/workerRegistry.js',
'export async function runDaemonWorker() { throw new Error("Daemon worker is unavailable in the open build."); }',
],
[
'../daemon/main.js',
'export async function daemonMain() { throw new Error("Daemon mode is unavailable in the open build."); }',
],
[
'../cli/bg.js',
`
export async function psHandler() { throw new Error("Background sessions are unavailable in the open build."); }
export async function logsHandler() { throw new Error("Background sessions are unavailable in the open build."); }
export async function attachHandler() { throw new Error("Background sessions are unavailable in the open build."); }
export async function killHandler() { throw new Error("Background sessions are unavailable in the open build."); }
export async function handleBgFlag() { throw new Error("Background sessions are unavailable in the open build."); }
`,
],
[
'../cli/handlers/templateJobs.js',
'export async function templatesMain() { throw new Error("Template jobs are unavailable in the open build."); }',
],
[
'../environment-runner/main.js',
'export async function environmentRunnerMain() { throw new Error("Environment runner is unavailable in the open build."); }',
],
[
'../self-hosted-runner/main.js',
'export async function selfHostedRunnerMain() { throw new Error("Self-hosted runner is unavailable in the open build."); }',
],
] as const)
// Resolve `import { feature } from 'bun:bundle'` to a shim // Resolve `import { feature } from 'bun:bundle'` to a shim
build.onResolve({ filter: /^bun:bundle$/ }, () => ({ build.onResolve({ filter: /^bun:bundle$/ }, () => ({
path: 'bun:bundle', path: 'bun:bundle',
@@ -78,6 +111,26 @@ const result = await Bun.build({
}), }),
) )
build.onResolve(
{ filter: /^\.\.\/(daemon\/workerRegistry|daemon\/main|cli\/bg|cli\/handlers\/templateJobs|environment-runner\/main|self-hosted-runner\/main)\.js$/ },
args => {
if (!internalFeatureStubModules.has(args.path)) return null
return {
path: args.path,
namespace: 'internal-feature-stub',
}
},
)
build.onLoad(
{ filter: /.*/, namespace: 'internal-feature-stub' },
args => ({
contents:
internalFeatureStubModules.get(args.path) ??
'export {}',
loader: 'js',
}),
)
// Resolve react/compiler-runtime to the standalone package // Resolve react/compiler-runtime to the standalone package
build.onResolve({ filter: /^react\/compiler-runtime$/ }, () => ({ build.onResolve({ filter: /^react\/compiler-runtime$/ }, () => ({
path: 'react/compiler-runtime', path: 'react/compiler-runtime',

View File

@@ -11,6 +11,7 @@ import {
} from '../src/utils/providerRecommendation.ts' } from '../src/utils/providerRecommendation.ts'
import { import {
buildCodexProfileEnv, buildCodexProfileEnv,
buildGeminiProfileEnv,
buildOllamaProfileEnv, buildOllamaProfileEnv,
buildOpenAIProfileEnv, buildOpenAIProfileEnv,
createProfileFile, createProfileFile,
@@ -33,7 +34,7 @@ function parseArg(name: string): string | null {
function parseProviderArg(): ProviderProfile | 'auto' { function parseProviderArg(): ProviderProfile | 'auto' {
const p = parseArg('--provider')?.toLowerCase() const p = parseArg('--provider')?.toLowerCase()
if (p === 'openai' || p === 'ollama' || p === 'codex') return p if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
return 'auto' return 'auto'
} }
@@ -72,7 +73,22 @@ async function main(): Promise<void> {
} }
let env: ProfileFile['env'] let env: ProfileFile['env']
if (selected === 'ollama') { if (selected === 'gemini') {
const builtEnv = buildGeminiProfileEnv({
model: argModel || null,
baseUrl: argBaseUrl || null,
apiKey: argApiKey || null,
processEnv: process.env,
})
if (!builtEnv) {
console.error('Gemini profile requires an API key. Use --api-key or set GEMINI_API_KEY.')
console.error('Get a free key at: https://aistudio.google.com/apikey')
process.exit(1)
}
env = builtEnv
} else if (selected === 'ollama') {
resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal) resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal)
if (!resolvedOllamaModel) { if (!resolvedOllamaModel) {
console.error('No viable Ollama chat model was discovered. Pull a chat model first or pass --model explicitly.') console.error('No viable Ollama chat model was discovered. Pull a chat model first or pass --model explicitly.')
@@ -136,7 +152,7 @@ async function main(): Promise<void> {
console.log(`Saved profile: ${selected}`) console.log(`Saved profile: ${selected}`)
console.log(`Goal: ${goal}`) console.log(`Goal: ${goal}`)
console.log(`Model: ${profile.env.OPENAI_MODEL}`) console.log(`Model: ${profile.env.GEMINI_MODEL || profile.env.OPENAI_MODEL || getGoalDefaultOpenAIModel(goal)}`)
console.log(`Path: ${outputPath}`) console.log(`Path: ${outputPath}`)
console.log('Next: bun run dev:profile') console.log('Next: bun run dev:profile')
} }

View File

@@ -48,7 +48,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
continue continue
} }
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex') && requestedProfile === 'auto') { if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
requestedProfile = lower as ProviderProfile | 'auto' requestedProfile = lower as ProviderProfile | 'auto'
continue continue
} }
@@ -79,7 +79,7 @@ function loadPersistedProfile(): ProfileFile | null {
if (!existsSync(path)) return null if (!existsSync(path)) return null
try { try {
const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile const parsed = JSON.parse(readFileSync(path, 'utf8')) as ProfileFile
if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex') { if (parsed.profile === 'openai' || parsed.profile === 'ollama' || parsed.profile === 'codex' || parsed.profile === 'gemini') {
return parsed return parsed
} }
return null return null
@@ -126,22 +126,26 @@ function quoteArg(arg: string): string {
} }
function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void { function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
const keySet = profile === 'codex'
? Boolean(resolveCodexApiCredentials(env).apiKey)
: Boolean(env.OPENAI_API_KEY)
console.log(`Launching profile: ${profile}`) console.log(`Launching profile: ${profile}`)
if (profile === 'gemini') {
console.log(`GEMINI_MODEL=${env.GEMINI_MODEL}`)
console.log(`GEMINI_API_KEY_SET=${Boolean(env.GEMINI_API_KEY)}`)
} else if (profile === 'codex') {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`) console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`) console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log( console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
`${profile === 'codex' ? 'CODEX_API_KEY_SET' : 'OPENAI_API_KEY_SET'}=${keySet}`, } else {
) console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log(`OPENAI_API_KEY_SET=${Boolean(env.OPENAI_API_KEY)}`)
}
} }
async function main(): Promise<void> { async function main(): Promise<void> {
const options = parseLaunchOptions(process.argv.slice(2)) const options = parseLaunchOptions(process.argv.slice(2))
const requestedProfile = options.requestedProfile const requestedProfile = options.requestedProfile
if (!requestedProfile) { if (!requestedProfile) {
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]') console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
process.exit(1) process.exit(1)
} }
@@ -184,6 +188,11 @@ async function main(): Promise<void> {
applyFastFlags(env) applyFastFlags(env)
} }
if (profile === 'gemini' && !env.GEMINI_API_KEY) {
console.error('GEMINI_API_KEY is required for gemini profile. Run: bun run profile:init -- --provider gemini --api-key <key>')
process.exit(1)
}
if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) { if (profile === 'openai' && (!env.OPENAI_API_KEY || env.OPENAI_API_KEY === 'SUA_CHAVE')) {
console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>') console.error('OPENAI_API_KEY is required for openai profile and cannot be SUA_CHAVE. Run: bun run profile:init -- --provider openai --api-key <key>')
process.exit(1) process.exit(1)

View File

@@ -92,14 +92,49 @@ function isLocalBaseUrl(baseUrl: string): boolean {
return isProviderLocalUrl(baseUrl) return isProviderLocalUrl(baseUrl)
} }
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
function currentBaseUrl(): string { function currentBaseUrl(): string {
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
}
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1' return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
} }
function checkGeminiEnv(): CheckResult[] {
const results: CheckResult[] = []
const model = process.env.GEMINI_MODEL
const key = process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY
const baseUrl = process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
results.push(pass('Provider mode', 'Google Gemini provider enabled.'))
if (!model) {
results.push(pass('GEMINI_MODEL', 'Not set. Default gemini-2.0-flash will be used.'))
} else {
results.push(pass('GEMINI_MODEL', model))
}
results.push(pass('GEMINI_BASE_URL', baseUrl))
if (!key) {
results.push(fail('GEMINI_API_KEY', 'Missing. Set GEMINI_API_KEY or GOOGLE_API_KEY.'))
} else {
results.push(pass('GEMINI_API_KEY', 'Configured.'))
}
return results
}
function checkOpenAIEnv(): CheckResult[] { function checkOpenAIEnv(): CheckResult[] {
const results: CheckResult[] = [] const results: CheckResult[] = []
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
if (useGemini) {
return checkGeminiEnv()
}
if (!useOpenAI) { if (!useOpenAI) {
results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).')) results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).'))
return results return results
@@ -160,13 +195,20 @@ function checkOpenAIEnv(): CheckResult[] {
} }
async function checkBaseUrlReachability(): Promise<CheckResult> { async function checkBaseUrlReachability(): Promise<CheckResult> {
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
if (!useGemini && !useOpenAI) {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).') return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
} }
const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai'
const resolvedBaseUrl = useGemini
? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl)
: undefined
const request = resolveProviderRequest({ const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL, model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL, baseUrl: resolvedBaseUrl ?? process.env.OPENAI_BASE_URL,
}) })
const endpoint = request.transport === 'codex_responses' const endpoint = request.transport === 'codex_responses'
? `${request.baseUrl}/responses` ? `${request.baseUrl}/responses`
@@ -203,6 +245,8 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
store: false, store: false,
stream: true, stream: true,
}) })
} else if (useGemini && (process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY)) {
headers.Authorization = `Bearer ${process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY}`
} else if (process.env.OPENAI_API_KEY) { } else if (process.env.OPENAI_API_KEY) {
headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}` headers.Authorization = `Bearer ${process.env.OPENAI_API_KEY}`
} }
@@ -228,7 +272,7 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
} }
function checkOllamaProcessorMode(): CheckResult { function checkOllamaProcessorMode(): CheckResult {
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) { if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).') return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
} }
@@ -267,6 +311,14 @@ function checkOllamaProcessorMode(): CheckResult {
} }
function serializeSafeEnvSummary(): Record<string, string | boolean> { function serializeSafeEnvSummary(): Record<string, string | boolean> {
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
return {
CLAUDE_CODE_USE_GEMINI: true,
GEMINI_MODEL: process.env.GEMINI_MODEL ?? '(unset, default: gemini-2.0-flash)',
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL ?? 'https://generativelanguage.googleapis.com/v1beta/openai',
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
}
}
const request = resolveProviderRequest({ const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL, model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL, baseUrl: process.env.OPENAI_BASE_URL,

View File

@@ -265,13 +265,14 @@ export const setAttribute = (
markDirty(node) markDirty(node)
} }
export const setStyle = (node: DOMNode, style: Styles): void => { export const setStyle = (node: DOMNode, style: Styles | undefined): void => {
const nextStyle = style ?? {}
// Compare style properties to avoid marking dirty unnecessarily. // Compare style properties to avoid marking dirty unnecessarily.
// React creates new style objects on every render even when unchanged. // React creates new style objects on every render even when unchanged.
if (stylesEqual(node.style, style)) { if (stylesEqual(node.style, nextStyle)) {
return return
} }
node.style = style node.style = nextStyle
markDirty(node) markDirty(node)
} }

View File

@@ -59,6 +59,12 @@ $ npm install --save-dev react-devtools-core
type AnyObject = Record<string, unknown> type AnyObject = Record<string, unknown>
type UpdatePayload = {
props?: AnyObject
style?: AnyObject
nextStyle?: Styles | undefined
}
const diff = (before: AnyObject, after: AnyObject): AnyObject | undefined => { const diff = (before: AnyObject, after: AnyObject): AnyObject | undefined => {
if (before === after) { if (before === after) {
return return
@@ -232,7 +238,7 @@ const reconciler = createReconciler<
unknown, unknown,
DOMElement, DOMElement,
HostContext, HostContext,
boolean, UpdatePayload | null,
NodeJS.Timeout, NodeJS.Timeout,
-1, -1,
null null
@@ -403,8 +409,19 @@ const reconciler = createReconciler<
_type: ElementNames, _type: ElementNames,
oldProps: Props, oldProps: Props,
newProps: Props, newProps: Props,
): boolean { ): UpdatePayload | null {
return oldProps !== newProps const props = diff(oldProps, newProps)
const style = diff(oldProps['style'] as Styles, newProps['style'] as Styles)
if (!props && !style) {
return null
}
return {
props,
style,
nextStyle: newProps['style'] as Styles | undefined,
}
}, },
commitMount(node: DOMElement): void { commitMount(node: DOMElement): void {
getFocusManager(node).handleAutoFocus(node) getFocusManager(node).handleAutoFocus(node)
@@ -432,13 +449,16 @@ const reconciler = createReconciler<
}, },
commitUpdate( commitUpdate(
node: DOMElement, node: DOMElement,
_updatePayload: boolean, updatePayload: UpdatePayload | null,
_type: ElementNames, _type: ElementNames,
oldProps: Props, _oldProps: Props,
newProps: Props, _newProps: Props,
): void { ): void {
const props = diff(oldProps, newProps) if (!updatePayload) {
const style = diff(oldProps['style'] as Styles, newProps['style'] as Styles) return
}
const { props, style, nextStyle } = updatePayload
if (props) { if (props) {
for (const [key, value] of Object.entries(props)) { for (const [key, value] of Object.entries(props)) {
@@ -462,7 +482,7 @@ const reconciler = createReconciler<
} }
if (style && node.yogaNode) { if (style && node.yogaNode) {
applyStyles(node.yogaNode, style, newProps['style'] as Styles) applyStyles(node.yogaNode, style, nextStyle)
} }
}, },
commitTextUpdate(node: TextNode, _oldText: string, newText: string): void { commitTextUpdate(node: TextNode, _oldText: string, newText: string): void {

View File

@@ -292,6 +292,7 @@ async function* openaiStreamToAnthropic(
let hasEmittedContentStart = false let hasEmittedContentStart = false
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
let hasEmittedFinalUsage = false let hasEmittedFinalUsage = false
let hasProcessedFinishReason = false
// Emit message_start // Emit message_start
yield { yield {
@@ -422,8 +423,11 @@ async function* openaiStreamToAnthropic(
} }
} }
// Finish // Finish — guard ensures we only process finish_reason once even if
if (choice.finish_reason) { // multiple chunks arrive with finish_reason set (some providers do this)
if (choice.finish_reason && !hasProcessedFinishReason) {
hasProcessedFinishReason = true
// Close any open content blocks // Close any open content blocks
if (hasEmittedContentStart) { if (hasEmittedContentStart) {
yield { yield {
@@ -741,6 +745,22 @@ export function createOpenAIShimClient(options: {
maxRetries?: number maxRetries?: number
timeout?: number timeout?: number
}): unknown { }): unknown {
// When Gemini provider is active, map Gemini env vars to OpenAI-compatible ones
// so the existing providerConfig.ts infrastructure picks them up correctly.
if (
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
) {
process.env.OPENAI_BASE_URL ??=
process.env.GEMINI_BASE_URL ??
'https://generativelanguage.googleapis.com/v1beta/openai'
process.env.OPENAI_API_KEY ??=
process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY ?? ''
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
}
}
const beta = new OpenAIShimBeta({ const beta = new OpenAIShimBeta({
...(options.defaultHeaders ?? {}), ...(options.defaultHeaders ?? {}),
}) })

View File

@@ -4,6 +4,7 @@ import { getGlobalConfig } from './config.js'
import { isEnvTruthy } from './envUtils.js' import { isEnvTruthy } from './envUtils.js'
import { getCanonicalName } from './model/model.js' import { getCanonicalName } from './model/model.js'
import { getModelCapability } from './model/modelCapabilities.js' import { getModelCapability } from './model/modelCapabilities.js'
import { getOpenAIContextWindow, getOpenAIMaxOutputTokens } from './model/openaiContextWindows.js'
// Model context window size (200k tokens for all models right now) // Model context window size (200k tokens for all models right now)
export const MODEL_CONTEXT_WINDOW_DEFAULT = 200_000 export const MODEL_CONTEXT_WINDOW_DEFAULT = 200_000
@@ -71,6 +72,19 @@ export function getContextWindowForModel(
return 1_000_000 return 1_000_000
} }
// OpenAI-compatible provider — use known context windows for the model
if (
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
) {
const openaiWindow = getOpenAIContextWindow(model)
if (openaiWindow !== undefined) {
return openaiWindow
}
}
const cap = getModelCapability(model) const cap = getModelCapability(model)
if (cap?.max_input_tokens && cap.max_input_tokens >= 100_000) { if (cap?.max_input_tokens && cap.max_input_tokens >= 100_000) {
if ( if (
@@ -162,6 +176,19 @@ export function getModelMaxOutputTokens(model: string): {
} }
} }
// OpenAI-compatible provider — use known output limits to avoid 400 errors
if (
process.env.CLAUDE_CODE_USE_OPENAI === '1' ||
process.env.CLAUDE_CODE_USE_OPENAI === 'true' ||
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
) {
const openaiMax = getOpenAIMaxOutputTokens(model)
if (openaiMax !== undefined) {
return { default: openaiMax, upperLimit: openaiMax }
}
}
const m = getCanonicalName(model) const m = getCanonicalName(model)
if (m.includes('opus-4-6')) { if (m.includes('opus-4-6')) {

View File

@@ -14,6 +14,17 @@ export const OPENAI_MODEL_DEFAULTS = {
haiku: 'gpt-4o-mini', // fast & cheap haiku: 'gpt-4o-mini', // fast & cheap
} as const } as const
// ---------------------------------------------------------------------------
// Gemini model mappings
// Maps Claude model tiers to Google Gemini equivalents.
// Override with GEMINI_MODEL env var.
// ---------------------------------------------------------------------------
export const GEMINI_MODEL_DEFAULTS = {
opus: 'gemini-2.5-pro-preview-03-25', // most capable
sonnet: 'gemini-2.0-flash', // balanced
haiku: 'gemini-2.0-flash-lite', // fast & cheap
} as const
// @[MODEL LAUNCH]: Add a new CLAUDE_*_CONFIG constant here. Double check the correct model strings // @[MODEL LAUNCH]: Add a new CLAUDE_*_CONFIG constant here. Double check the correct model strings
// here since the pattern may change. // here since the pattern may change.
@@ -23,6 +34,7 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
vertex: 'claude-3-7-sonnet@20250219', vertex: 'claude-3-7-sonnet@20250219',
foundry: 'claude-3-7-sonnet', foundry: 'claude-3-7-sonnet',
openai: 'gpt-4o-mini', openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_3_5_V2_SONNET_CONFIG = { export const CLAUDE_3_5_V2_SONNET_CONFIG = {
@@ -31,6 +43,7 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
vertex: 'claude-3-5-sonnet-v2@20241022', vertex: 'claude-3-5-sonnet-v2@20241022',
foundry: 'claude-3-5-sonnet', foundry: 'claude-3-5-sonnet',
openai: 'gpt-4o-mini', openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_3_5_HAIKU_CONFIG = { export const CLAUDE_3_5_HAIKU_CONFIG = {
@@ -39,6 +52,7 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
vertex: 'claude-3-5-haiku@20241022', vertex: 'claude-3-5-haiku@20241022',
foundry: 'claude-3-5-haiku', foundry: 'claude-3-5-haiku',
openai: 'gpt-4o-mini', openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash-lite',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_HAIKU_4_5_CONFIG = { export const CLAUDE_HAIKU_4_5_CONFIG = {
@@ -47,6 +61,7 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
vertex: 'claude-haiku-4-5@20251001', vertex: 'claude-haiku-4-5@20251001',
foundry: 'claude-haiku-4-5', foundry: 'claude-haiku-4-5',
openai: 'gpt-4o-mini', openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash-lite',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_CONFIG = { export const CLAUDE_SONNET_4_CONFIG = {
@@ -55,6 +70,7 @@ export const CLAUDE_SONNET_4_CONFIG = {
vertex: 'claude-sonnet-4@20250514', vertex: 'claude-sonnet-4@20250514',
foundry: 'claude-sonnet-4', foundry: 'claude-sonnet-4',
openai: 'gpt-4o-mini', openai: 'gpt-4o-mini',
gemini: 'gemini-2.0-flash',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_5_CONFIG = { export const CLAUDE_SONNET_4_5_CONFIG = {
@@ -63,6 +79,7 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
vertex: 'claude-sonnet-4-5@20250929', vertex: 'claude-sonnet-4-5@20250929',
foundry: 'claude-sonnet-4-5', foundry: 'claude-sonnet-4-5',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.0-flash',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_CONFIG = { export const CLAUDE_OPUS_4_CONFIG = {
@@ -71,6 +88,7 @@ export const CLAUDE_OPUS_4_CONFIG = {
vertex: 'claude-opus-4@20250514', vertex: 'claude-opus-4@20250514',
foundry: 'claude-opus-4', foundry: 'claude-opus-4',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_1_CONFIG = { export const CLAUDE_OPUS_4_1_CONFIG = {
@@ -79,6 +97,7 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
vertex: 'claude-opus-4-1@20250805', vertex: 'claude-opus-4-1@20250805',
foundry: 'claude-opus-4-1', foundry: 'claude-opus-4-1',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_5_CONFIG = { export const CLAUDE_OPUS_4_5_CONFIG = {
@@ -87,6 +106,7 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
vertex: 'claude-opus-4-5@20251101', vertex: 'claude-opus-4-5@20251101',
foundry: 'claude-opus-4-5', foundry: 'claude-opus-4-5',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_6_CONFIG = { export const CLAUDE_OPUS_4_6_CONFIG = {
@@ -95,6 +115,7 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
vertex: 'claude-opus-4-6', vertex: 'claude-opus-4-6',
foundry: 'claude-opus-4-6', foundry: 'claude-opus-4-6',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.5-pro-preview-03-25',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_6_CONFIG = { export const CLAUDE_SONNET_4_6_CONFIG = {
@@ -103,6 +124,7 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
vertex: 'claude-sonnet-4-6', vertex: 'claude-sonnet-4-6',
foundry: 'claude-sonnet-4-6', foundry: 'claude-sonnet-4-6',
openai: 'gpt-4o', openai: 'gpt-4o',
gemini: 'gemini-2.0-flash',
} as const satisfies ModelConfig } as const satisfies ModelConfig
// @[MODEL LAUNCH]: Register the new config here. // @[MODEL LAUNCH]: Register the new config here.

View File

@@ -35,6 +35,10 @@ export type ModelSetting = ModelName | ModelAlias | null
export function getSmallFastModel(): ModelName { export function getSmallFastModel(): ModelName {
if (process.env.ANTHROPIC_SMALL_FAST_MODEL) return process.env.ANTHROPIC_SMALL_FAST_MODEL if (process.env.ANTHROPIC_SMALL_FAST_MODEL) return process.env.ANTHROPIC_SMALL_FAST_MODEL
// For Gemini provider, use a fast model
if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
}
// For OpenAI provider, use OPENAI_MODEL or a sensible default // For OpenAI provider, use OPENAI_MODEL or a sensible default
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o-mini' return process.env.OPENAI_MODEL || 'gpt-4o-mini'
@@ -71,7 +75,7 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
specifiedModel = modelOverride specifiedModel = modelOverride
} else { } else {
const settings = getSettings_DEPRECATED() || {} const settings = getSettings_DEPRECATED() || {}
specifiedModel = process.env.ANTHROPIC_MODEL || process.env.OPENAI_MODEL || settings.model || undefined specifiedModel = process.env.ANTHROPIC_MODEL || process.env.GEMINI_MODEL || process.env.OPENAI_MODEL || settings.model || undefined
} }
// Ignore the user-specified model if it's not in the availableModels allowlist. // Ignore the user-specified model if it's not in the availableModels allowlist.
@@ -111,6 +115,10 @@ export function getDefaultOpusModel(): ModelName {
if (process.env.ANTHROPIC_DEFAULT_OPUS_MODEL) { if (process.env.ANTHROPIC_DEFAULT_OPUS_MODEL) {
return process.env.ANTHROPIC_DEFAULT_OPUS_MODEL return process.env.ANTHROPIC_DEFAULT_OPUS_MODEL
} }
// Gemini provider
if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.5-pro-preview-03-25'
}
// OpenAI provider: use user-specified model or default // OpenAI provider: use user-specified model or default
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
@@ -129,6 +137,10 @@ export function getDefaultSonnetModel(): ModelName {
if (process.env.ANTHROPIC_DEFAULT_SONNET_MODEL) { if (process.env.ANTHROPIC_DEFAULT_SONNET_MODEL) {
return process.env.ANTHROPIC_DEFAULT_SONNET_MODEL return process.env.ANTHROPIC_DEFAULT_SONNET_MODEL
} }
// Gemini provider
if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.0-flash'
}
// OpenAI provider // OpenAI provider
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
@@ -145,6 +157,10 @@ export function getDefaultHaikuModel(): ModelName {
if (process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL) { if (process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL) {
return process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL return process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL
} }
// Gemini provider
if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
}
// OpenAI provider // OpenAI provider
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o-mini' return process.env.OPENAI_MODEL || 'gpt-4o-mini'
@@ -193,6 +209,10 @@ export function getRuntimeMainLoopModel(params: {
* @returns The default model setting to use * @returns The default model setting to use
*/ */
export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias { export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
// Gemini provider: always use the configured Gemini model
if (getAPIProvider() === 'gemini') {
return process.env.GEMINI_MODEL || 'gemini-2.0-flash'
}
// OpenAI provider: always use the configured OpenAI model // OpenAI provider: always use the configured OpenAI model
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai') {
return process.env.OPENAI_MODEL || 'gpt-4o' return process.env.OPENAI_MODEL || 'gpt-4o'
@@ -381,8 +401,8 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
* if the model is not recognized as a public model. * if the model is not recognized as a public model.
*/ */
export function getPublicModelDisplayName(model: ModelName): string | null { export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI provider, show the actual model name (e.g. 'gpt-4o') not a Claude alias // For OpenAI/Gemini providers, show the actual model name not a Claude alias
if (getAPIProvider() === 'openai') { if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini') {
return null return null
} }
switch (model) { switch (model) {

View File

@@ -0,0 +1,132 @@
/**
* openaiContextWindows.ts
* Context window sizes for OpenAI-compatible models used via the shim.
* Fixes: auto-compact and warnings using wrong 200k default for OpenAI models.
*
* When CLAUDE_CODE_USE_OPENAI=1, getContextWindowForModel() falls through to
* MODEL_CONTEXT_WINDOW_DEFAULT (200k). This causes the warning and blocking
* thresholds to be set at 200k even for models like gpt-4o (128k) or llama3 (8k),
* meaning users get no warning before hitting a hard API error.
*
* Prices in tokens as of April 2026 — update as needed.
*/
const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
// OpenAI
'gpt-4o': 128_000,
'gpt-4o-mini': 128_000,
'gpt-4.1': 1_047_576,
'gpt-4.1-mini': 1_047_576,
'gpt-4.1-nano': 1_047_576,
'gpt-4-turbo': 128_000,
'gpt-4': 8_192,
'o3-mini': 200_000,
'o4-mini': 200_000,
'o3': 200_000,
// DeepSeek
'deepseek-chat': 64_000,
'deepseek-reasoner': 64_000,
// Groq (fast inference)
'llama-3.3-70b-versatile': 128_000,
'llama-3.1-8b-instant': 128_000,
'mixtral-8x7b-32768': 32_768,
// Mistral
'mistral-large-latest': 131_072,
'mistral-small-latest': 131_072,
// Google (via OpenRouter)
'google/gemini-2.0-flash':1_048_576,
'google/gemini-2.5-pro': 1_048_576,
// Ollama local models
'llama3.3:70b': 8_192,
'llama3.1:8b': 8_192,
'llama3.2:3b': 8_192,
'qwen2.5-coder:32b': 32_768,
'qwen2.5-coder:7b': 32_768,
'deepseek-coder-v2:16b': 163_840,
'deepseek-r1:14b': 65_536,
'mistral:7b': 32_768,
'phi4:14b': 16_384,
'gemma2:27b': 8_192,
'codellama:13b': 16_384,
}
/**
* Max output (completion) tokens per model.
* This is separate from the context window (input limit).
* Fixes: 400 error "max_tokens is too large" when default 32k exceeds model limit.
*/
const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
// OpenAI
'gpt-4o': 16_384,
'gpt-4o-mini': 16_384,
'gpt-4.1': 32_768,
'gpt-4.1-mini': 32_768,
'gpt-4.1-nano': 32_768,
'gpt-4-turbo': 4_096,
'gpt-4': 4_096,
'o3-mini': 100_000,
'o4-mini': 100_000,
'o3': 100_000,
// DeepSeek
'deepseek-chat': 8_192,
'deepseek-reasoner': 32_768,
// Groq
'llama-3.3-70b-versatile': 32_768,
'llama-3.1-8b-instant': 8_192,
'mixtral-8x7b-32768': 32_768,
// Mistral
'mistral-large-latest': 32_768,
'mistral-small-latest': 32_768,
// Google (via OpenRouter)
'google/gemini-2.0-flash': 8_192,
'google/gemini-2.5-pro': 32_768,
// Ollama local models (conservative safe defaults)
'llama3.3:70b': 4_096,
'llama3.1:8b': 4_096,
'llama3.2:3b': 4_096,
'qwen2.5-coder:32b': 8_192,
'qwen2.5-coder:7b': 8_192,
'deepseek-coder-v2:16b': 8_192,
'deepseek-r1:14b': 8_192,
'mistral:7b': 4_096,
'phi4:14b': 4_096,
'gemma2:27b': 4_096,
'codellama:13b': 4_096,
}
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
if (table[model] !== undefined) return table[model]
for (const key of Object.keys(table)) {
if (model.startsWith(key)) return table[key]
}
return undefined
}
/**
* Look up the context window for an OpenAI-compatible model.
* Returns undefined if the model is not in the table.
*
* Falls back to prefix matching so dated variants like
* "gpt-4o-2024-11-20" resolve to the base "gpt-4o" entry.
*/
export function getOpenAIContextWindow(model: string): number | undefined {
return lookupByModel(OPENAI_CONTEXT_WINDOWS, model)
}
/**
* Look up the max output tokens for an OpenAI-compatible model.
* Returns undefined if the model is not in the table.
*/
export function getOpenAIMaxOutputTokens(model: string): number | undefined {
return lookupByModel(OPENAI_MAX_OUTPUT_TOKENS, model)
}

View File

@@ -1,10 +1,12 @@
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js' import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
import { isEnvTruthy } from '../envUtils.js' import { isEnvTruthy } from '../envUtils.js'
export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai' export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai' | 'gemini'
export function getAPIProvider(): APIProvider { export function getAPIProvider(): APIProvider {
return isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
? 'gemini'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
? 'openai' ? 'openai'
: isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
? 'bedrock' ? 'bedrock'

View File

@@ -6,6 +6,7 @@ import test from 'node:test'
import { import {
buildCodexProfileEnv, buildCodexProfileEnv,
buildGeminiProfileEnv,
buildLaunchEnv, buildLaunchEnv,
buildOllamaProfileEnv, buildOllamaProfileEnv,
buildOpenAIProfileEnv, buildOpenAIProfileEnv,
@@ -127,6 +128,60 @@ test('openai launch ignores codex persisted transport hints', async () => {
assert.equal(env.OPENAI_API_KEY, 'sk-live') assert.equal(env.OPENAI_API_KEY, 'sk-live')
}) })
test('matching persisted gemini env is reused for gemini launch', async () => {
const env = await buildLaunchEnv({
profile: 'gemini',
persisted: profile('gemini', {
GEMINI_MODEL: 'gemini-2.5-flash',
GEMINI_API_KEY: 'gem-persisted',
GEMINI_BASE_URL: 'https://example.test/v1beta/openai',
}),
goal: 'balanced',
processEnv: {},
})
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
assert.equal(env.GEMINI_MODEL, 'gemini-2.5-flash')
assert.equal(env.GEMINI_API_KEY, 'gem-persisted')
assert.equal(env.GEMINI_BASE_URL, 'https://example.test/v1beta/openai')
})
test('gemini launch ignores mismatched persisted openai env and strips other provider secrets', async () => {
const env = await buildLaunchEnv({
profile: 'gemini',
persisted: profile('openai', {
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o',
OPENAI_API_KEY: 'sk-persisted',
}),
goal: 'balanced',
processEnv: {
GEMINI_API_KEY: 'gem-live',
GOOGLE_API_KEY: 'google-live',
OPENAI_API_KEY: 'sk-live',
OPENAI_BASE_URL: 'https://api.openai.com/v1',
OPENAI_MODEL: 'gpt-4o-mini',
CODEX_API_KEY: 'codex-live',
CHATGPT_ACCOUNT_ID: 'acct_live',
CLAUDE_CODE_USE_OPENAI: '1',
},
})
assert.equal(env.CLAUDE_CODE_USE_GEMINI, '1')
assert.equal(env.CLAUDE_CODE_USE_OPENAI, undefined)
assert.equal(env.GEMINI_MODEL, 'gemini-2.0-flash')
assert.equal(env.GEMINI_API_KEY, 'gem-live')
assert.equal(
env.GEMINI_BASE_URL,
'https://generativelanguage.googleapis.com/v1beta/openai',
)
assert.equal(env.GOOGLE_API_KEY, undefined)
assert.equal(env.OPENAI_API_KEY, undefined)
assert.equal(env.CODEX_API_KEY, undefined)
assert.equal(env.CHATGPT_ACCOUNT_ID, undefined)
})
test('matching persisted codex env is reused for codex launch', async () => { test('matching persisted codex env is reused for codex launch', async () => {
const env = await buildLaunchEnv({ const env = await buildLaunchEnv({
profile: 'codex', profile: 'codex',
@@ -283,6 +338,27 @@ test('codex profiles require a chatgpt account id', () => {
assert.equal(env, null) assert.equal(env, null)
}) })
test('gemini profiles accept google api key fallback', () => {
const env = buildGeminiProfileEnv({
processEnv: {
GOOGLE_API_KEY: 'gem-live',
},
})
assert.deepEqual(env, {
GEMINI_MODEL: 'gemini-2.0-flash',
GEMINI_API_KEY: 'gem-live',
})
})
test('gemini profiles require a key', () => {
const env = buildGeminiProfileEnv({
processEnv: {},
})
assert.equal(env, null)
})
test('openai profiles ignore codex shell transport hints', () => { test('openai profiles ignore codex shell transport hints', () => {
const env = buildOpenAIProfileEnv({ const env = buildOpenAIProfileEnv({
goal: 'balanced', goal: 'balanced',

View File

@@ -10,7 +10,10 @@ import {
type RecommendationGoal, type RecommendationGoal,
} from './providerRecommendation.ts' } from './providerRecommendation.ts'
export type ProviderProfile = 'openai' | 'ollama' | 'codex' const DEFAULT_GEMINI_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
const DEFAULT_GEMINI_MODEL = 'gemini-2.0-flash'
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini'
export type ProfileEnv = { export type ProfileEnv = {
OPENAI_BASE_URL?: string OPENAI_BASE_URL?: string
@@ -19,6 +22,9 @@ export type ProfileEnv = {
CODEX_API_KEY?: string CODEX_API_KEY?: string
CHATGPT_ACCOUNT_ID?: string CHATGPT_ACCOUNT_ID?: string
CODEX_ACCOUNT_ID?: string CODEX_ACCOUNT_ID?: string
GEMINI_API_KEY?: string
GEMINI_MODEL?: string
GEMINI_BASE_URL?: string
} }
export type ProfileFile = { export type ProfileFile = {
@@ -47,6 +53,36 @@ export function buildOllamaProfileEnv(
} }
} }
export function buildGeminiProfileEnv(options: {
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
const processEnv = options.processEnv ?? process.env
const key = sanitizeApiKey(
options.apiKey ??
processEnv.GEMINI_API_KEY ??
processEnv.GOOGLE_API_KEY,
)
if (!key) {
return null
}
const env: ProfileEnv = {
GEMINI_MODEL:
options.model || processEnv.GEMINI_MODEL || DEFAULT_GEMINI_MODEL,
GEMINI_API_KEY: key,
}
const baseUrl = options.baseUrl || processEnv.GEMINI_BASE_URL
if (baseUrl) {
env.GEMINI_BASE_URL = baseUrl
}
return env
}
export function buildOpenAIProfileEnv(options: { export function buildOpenAIProfileEnv(options: {
goal: RecommendationGoal goal: RecommendationGoal
model?: string | null model?: string | null
@@ -142,11 +178,57 @@ export async function buildLaunchEnv(options: {
? options.persisted.env ?? {} ? options.persisted.env ?? {}
: {} : {}
const shellGeminiKey = sanitizeApiKey(
processEnv.GEMINI_API_KEY ?? processEnv.GOOGLE_API_KEY,
)
const persistedGeminiKey = sanitizeApiKey(persistedEnv.GEMINI_API_KEY)
if (options.profile === 'gemini') {
const env: NodeJS.ProcessEnv = {
...processEnv,
CLAUDE_CODE_USE_GEMINI: '1',
}
delete env.CLAUDE_CODE_USE_OPENAI
env.GEMINI_MODEL =
processEnv.GEMINI_MODEL ||
persistedEnv.GEMINI_MODEL ||
DEFAULT_GEMINI_MODEL
env.GEMINI_BASE_URL =
processEnv.GEMINI_BASE_URL ||
persistedEnv.GEMINI_BASE_URL ||
DEFAULT_GEMINI_BASE_URL
const geminiKey = shellGeminiKey || persistedGeminiKey
if (geminiKey) {
env.GEMINI_API_KEY = geminiKey
} else {
delete env.GEMINI_API_KEY
}
delete env.GOOGLE_API_KEY
delete env.OPENAI_BASE_URL
delete env.OPENAI_MODEL
delete env.OPENAI_API_KEY
delete env.CODEX_API_KEY
delete env.CHATGPT_ACCOUNT_ID
delete env.CODEX_ACCOUNT_ID
return env
}
const env: NodeJS.ProcessEnv = { const env: NodeJS.ProcessEnv = {
...processEnv, ...processEnv,
CLAUDE_CODE_USE_OPENAI: '1', CLAUDE_CODE_USE_OPENAI: '1',
} }
delete env.CLAUDE_CODE_USE_GEMINI
delete env.GEMINI_API_KEY
delete env.GEMINI_MODEL
delete env.GEMINI_BASE_URL
delete env.GOOGLE_API_KEY
if (options.profile === 'ollama') { if (options.profile === 'ollama') {
const getOllamaBaseUrl = const getOllamaBaseUrl =
options.getOllamaChatBaseUrl ?? (() => 'http://localhost:11434/v1') options.getOllamaChatBaseUrl ?? (() => 'http://localhost:11434/v1')