Merge origin/main into provider-setup-wizard

This commit is contained in:
Vasanthdev2004
2026-04-02 18:03:44 +05:30
40 changed files with 1801 additions and 132 deletions

View File

@@ -8,6 +8,7 @@ import {
recommendOllamaModel,
} from '../src/utils/providerRecommendation.ts'
import {
buildAtomicChatProfileEnv,
buildCodexProfileEnv,
buildGeminiProfileEnv,
buildOllamaProfileEnv,
@@ -19,8 +20,11 @@ import {
type ProviderProfile,
} from '../src/utils/providerProfile.ts'
import {
getAtomicChatChatBaseUrl,
getOllamaChatBaseUrl,
hasLocalAtomicChat,
hasLocalOllama,
listAtomicChatModels,
listOllamaModels,
} from './provider-discovery.ts'
@@ -33,7 +37,7 @@ function parseArg(name: string): string | null {
function parseProviderArg(): ProviderProfile | 'auto' {
const p = parseArg('--provider')?.toLowerCase()
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini') return p
if (p === 'openai' || p === 'ollama' || p === 'codex' || p === 'gemini' || p === 'atomic-chat') return p
return 'auto'
}
@@ -101,6 +105,21 @@ async function main(): Promise<void> {
getOllamaChatBaseUrl,
},
)
} else if (selected === 'atomic-chat') {
const model = argModel || (await listAtomicChatModels(argBaseUrl || undefined))[0]
if (!model) {
if (!(await hasLocalAtomicChat(argBaseUrl || undefined))) {
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
} else {
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
}
process.exit(1)
}
env = buildAtomicChatProfileEnv(model, {
baseUrl: argBaseUrl,
getAtomicChatChatBaseUrl,
})
} else if (selected === 'codex') {
const builtEnv = buildCodexProfileEnv({
model: argModel,

View File

@@ -1,8 +1,13 @@
export {
benchmarkOllamaModel,
DEFAULT_ATOMIC_CHAT_BASE_URL,
DEFAULT_OLLAMA_BASE_URL,
getAtomicChatApiBaseUrl,
getAtomicChatChatBaseUrl,
getOllamaApiBaseUrl,
getOllamaChatBaseUrl,
hasLocalAtomicChat,
hasLocalOllama,
listAtomicChatModels,
listOllamaModels,
} from '../src/utils/providerDiscovery.ts'

View File

@@ -15,8 +15,11 @@ import {
type ProviderProfile,
} from '../src/utils/providerProfile.ts'
import {
getAtomicChatChatBaseUrl,
getOllamaChatBaseUrl,
hasLocalAtomicChat,
hasLocalOllama,
listAtomicChatModels,
listOllamaModels,
} from './provider-discovery.ts'
@@ -47,7 +50,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions {
continue
}
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini') && requestedProfile === 'auto') {
if ((lower === 'auto' || lower === 'openai' || lower === 'ollama' || lower === 'codex' || lower === 'gemini' || lower === 'atomic-chat') && requestedProfile === 'auto') {
requestedProfile = lower as ProviderProfile | 'auto'
continue
}
@@ -85,6 +88,11 @@ async function resolveOllamaDefaultModel(
return recommended?.name ?? null
}
async function resolveAtomicChatDefaultModel(): Promise<string | null> {
const models = await listAtomicChatModels()
return models[0] ?? null
}
function runCommand(command: string, env: NodeJS.ProcessEnv): Promise<number> {
return runProcess(command, [], env)
}
@@ -121,6 +129,10 @@ function printSummary(profile: ProviderProfile, env: NodeJS.ProcessEnv): void {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log(`CODEX_API_KEY_SET=${Boolean(resolveCodexApiCredentials(env).apiKey)}`)
} else if (profile === 'atomic-chat') {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
console.log('OPENAI_API_KEY_SET=false (local provider, no key required)')
} else {
console.log(`OPENAI_BASE_URL=${env.OPENAI_BASE_URL}`)
console.log(`OPENAI_MODEL=${env.OPENAI_MODEL}`)
@@ -132,7 +144,7 @@ async function main(): Promise<void> {
const options = parseLaunchOptions(process.argv.slice(2))
const requestedProfile = options.requestedProfile
if (!requestedProfile) {
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
console.error('Usage: bun run scripts/provider-launch.ts [openai|ollama|codex|gemini|atomic-chat|auto] [--fast] [--goal <latency|balanced|coding>] [-- <cli args>]')
process.exit(1)
}
@@ -164,12 +176,30 @@ async function main(): Promise<void> {
}
}
let resolvedAtomicChatModel: string | null = null
if (
profile === 'atomic-chat' &&
(persisted?.profile !== 'atomic-chat' || !persisted?.env?.OPENAI_MODEL)
) {
if (!(await hasLocalAtomicChat())) {
console.error('Atomic Chat is not running (could not connect to 127.0.0.1:1337).\n Download from https://atomic.chat/ and launch the application.')
process.exit(1)
}
resolvedAtomicChatModel = await resolveAtomicChatDefaultModel()
if (!resolvedAtomicChatModel) {
console.error('Atomic Chat is running but no model is loaded. Open Atomic Chat and download or start a model first.')
process.exit(1)
}
}
const env = await buildLaunchEnv({
profile,
persisted,
goal: options.goal,
getOllamaChatBaseUrl,
resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b',
getAtomicChatChatBaseUrl,
resolveAtomicChatDefaultModel: async () => resolvedAtomicChatModel,
})
if (options.fast) {
applyFastFlags(env)

View File

@@ -93,11 +93,15 @@ function isLocalBaseUrl(baseUrl: string): boolean {
}
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
function currentBaseUrl(): string {
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
}
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
}
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
}
@@ -126,15 +130,47 @@ function checkGeminiEnv(): CheckResult[] {
return results
}
function checkGithubEnv(): CheckResult[] {
const results: CheckResult[] = []
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
if (!token?.trim()) {
results.push(fail('GITHUB_TOKEN', 'Missing. Set GITHUB_TOKEN or GH_TOKEN.'))
} else {
results.push(pass('GITHUB_TOKEN', 'Configured.'))
}
if (!process.env.OPENAI_MODEL) {
results.push(
pass(
'OPENAI_MODEL',
'Not set. Default github:copilot → openai/gpt-4.1 at runtime.',
),
)
} else {
results.push(pass('OPENAI_MODEL', process.env.OPENAI_MODEL))
}
results.push(pass('OPENAI_BASE_URL', baseUrl))
return results
}
function checkOpenAIEnv(): CheckResult[] {
const results: CheckResult[] = []
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
if (useGemini) {
return checkGeminiEnv()
}
if (useGithub && !useOpenAI) {
return checkGithubEnv()
}
if (!useOpenAI) {
results.push(pass('Provider mode', 'Anthropic login flow enabled (CLAUDE_CODE_USE_OPENAI is off).'))
return results
@@ -181,12 +217,21 @@ function checkOpenAIEnv(): CheckResult[] {
}
const key = process.env.OPENAI_API_KEY
const githubToken = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
if (key === 'SUA_CHAVE') {
results.push(fail('OPENAI_API_KEY', 'Placeholder value detected: SUA_CHAVE.'))
} else if (!key && !isLocalBaseUrl(request.baseUrl)) {
} else if (
!key &&
!isLocalBaseUrl(request.baseUrl) &&
!(useGithub && githubToken?.trim())
) {
results.push(fail('OPENAI_API_KEY', 'Missing key for non-local provider URL.'))
} else if (!key && useGithub && githubToken?.trim()) {
results.push(
pass('OPENAI_API_KEY', 'Not set; GITHUB_TOKEN/GH_TOKEN will be used for GitHub Models.'),
)
} else if (!key) {
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Ollama/LM Studio).'))
results.push(pass('OPENAI_API_KEY', 'Not set (allowed for local providers like Atomic Chat/Ollama/LM Studio).'))
} else {
results.push(pass('OPENAI_API_KEY', 'Configured.'))
}
@@ -197,11 +242,19 @@ function checkOpenAIEnv(): CheckResult[] {
async function checkBaseUrlReachability(): Promise<CheckResult> {
const useGemini = isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
const useOpenAI = isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
const useGithub = isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
if (!useGemini && !useOpenAI) {
if (!useGemini && !useOpenAI && !useGithub) {
return pass('Provider reachability', 'Skipped (OpenAI-compatible mode disabled).')
}
if (useGithub) {
return pass(
'Provider reachability',
'Skipped for GitHub Models (inference endpoint differs from OpenAI /models probe).',
)
}
const geminiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai'
const resolvedBaseUrl = useGemini
? (process.env.GEMINI_BASE_URL ?? geminiBaseUrl)
@@ -271,8 +324,21 @@ async function checkBaseUrlReachability(): Promise<CheckResult> {
}
}
function isAtomicChatUrl(baseUrl: string): boolean {
try {
const parsed = new URL(baseUrl)
return parsed.port === '1337' && isLocalBaseUrl(baseUrl)
} catch {
return false
}
}
function checkOllamaProcessorMode(): CheckResult {
if (!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) || isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
if (
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
) {
return pass('Ollama processor mode', 'Skipped (OpenAI-compatible mode disabled).')
}
@@ -281,6 +347,10 @@ function checkOllamaProcessorMode(): CheckResult {
return pass('Ollama processor mode', 'Skipped (provider URL is not local).')
}
if (isAtomicChatUrl(baseUrl)) {
return pass('Ollama processor mode', 'Skipped (Atomic Chat local provider detected, not Ollama).')
}
const result = spawnSync('ollama', ['ps'], {
cwd: process.cwd(),
encoding: 'utf8',
@@ -319,6 +389,22 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
GEMINI_API_KEY_SET: Boolean(process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY),
}
}
if (
isTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
!isTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
) {
return {
CLAUDE_CODE_USE_GITHUB: true,
OPENAI_MODEL:
process.env.OPENAI_MODEL ??
'(unset, default: github:copilot → openai/gpt-4.1)',
OPENAI_BASE_URL:
process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE,
GITHUB_TOKEN_SET: Boolean(
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
),
}
}
const request = resolveProviderRequest({
model: process.env.OPENAI_MODEL,
baseUrl: process.env.OPENAI_BASE_URL,
@@ -374,6 +460,13 @@ async function main(): Promise<void> {
const options = parseOptions(process.argv.slice(2))
const results: CheckResult[] = []
const { enableConfigs } = await import('../src/utils/config.js')
enableConfigs()
const { applySafeConfigEnvironmentVariables } = await import('../src/utils/managedEnv.js')
applySafeConfigEnvironmentVariables()
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../src/utils/githubModelsCredentials.js')
hydrateGithubModelsTokenFromSecureStorage()
results.push(checkNodeVersion())
results.push(checkBunRuntime())
results.push(checkBuildArtifacts())