From 174eb8ad3b94166e86ab63e95c69d6da46982799 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Wed, 1 Apr 2026 11:10:51 +0530 Subject: [PATCH 1/2] feat: add intelligent provider profile recommendation --- PLAYBOOK.md | 26 ++ README.md | 12 + package.json | 3 + scripts/provider-bootstrap.ts | 60 +++-- scripts/provider-discovery.ts | 129 ++++++++++ scripts/provider-launch.ts | 63 +++-- scripts/provider-recommend.ts | 277 +++++++++++++++++++++ src/utils/providerRecommendation.test.ts | 118 +++++++++ src/utils/providerRecommendation.ts | 297 +++++++++++++++++++++++ 9 files changed, 945 insertions(+), 40 deletions(-) create mode 100644 scripts/provider-discovery.ts create mode 100644 scripts/provider-recommend.ts create mode 100644 src/utils/providerRecommendation.test.ts create mode 100644 src/utils/providerRecommendation.ts diff --git a/PLAYBOOK.md b/PLAYBOOK.md index 662ee4dc..dfdaec76 100644 --- a/PLAYBOOK.md +++ b/PLAYBOOK.md @@ -37,6 +37,18 @@ If everything is healthy, OpenClaude starts directly. bun run profile:init -- --provider ollama --model llama3.1:8b ``` +Or let OpenClaude recommend the best local model for your goal: + +```powershell +bun run profile:init -- --provider ollama --goal coding +``` + +Preview recommendations before saving: + +```powershell +bun run profile:recommend -- --goal coding --benchmark +``` + ### 3.2 Confirm profile file ```powershell @@ -171,6 +183,12 @@ Fix: bun run profile:init -- --provider ollama --model llama3.1:8b ``` +Or auto-pick a local profile: + +```powershell +bun run profile:auto -- --goal balanced +``` + ## 6.5 Placeholder key (`SUA_CHAVE`) error Cause: @@ -202,6 +220,14 @@ bun run profile:fast # llama3.2:3b bun run profile:code # qwen2.5-coder:7b ``` +Goal-based auto-selection: + +```powershell +bun run profile:auto -- --goal latency +bun run profile:auto -- --goal balanced +bun run profile:auto -- --goal coding +``` + ## 8. Practical Prompt Playbook (Copy/Paste) ## 8.1 Code understanding diff --git a/README.md b/README.md index 5d17d276..358bf95d 100644 --- a/README.md +++ b/README.md @@ -206,12 +206,21 @@ Use profile launchers to avoid repeated environment setup: # one-time profile bootstrap (auto-detect ollama, otherwise openai) bun run profile:init +# preview the best provider/model for your goal +bun run profile:recommend -- --goal coding --benchmark + +# auto-apply the best available profile for your goal +bun run profile:auto -- --goal latency + # openai bootstrap with explicit key bun run profile:init -- --provider openai --api-key sk-... # ollama bootstrap with custom model bun run profile:init -- --provider ollama --model llama3.1:8b +# ollama bootstrap with intelligent model auto-selection +bun run profile:init -- --provider ollama --goal coding + # launch using persisted profile (.openclaude-profile.json) bun run dev:profile @@ -222,6 +231,9 @@ bun run dev:openai bun run dev:ollama ``` +`profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly. +If no profile exists yet, `dev:profile` now uses the same goal-aware defaults when picking the initial model. + `dev:openai` and `dev:ollama` run `doctor:runtime` first and only launch the app if checks pass. For `dev:ollama`, make sure Ollama is running locally before launch. diff --git a/package.json b/package.json index 15f9f348..ab44903f 100644 --- a/package.json +++ b/package.json @@ -20,11 +20,14 @@ "dev:ollama": "bun run scripts/provider-launch.ts ollama", "dev:ollama:fast": "bun run scripts/provider-launch.ts ollama --fast --bare", "profile:init": "bun run scripts/provider-bootstrap.ts", + "profile:recommend": "bun run scripts/provider-recommend.ts", + "profile:auto": "bun run scripts/provider-recommend.ts --apply", "profile:fast": "bun run profile:init -- --provider ollama --model llama3.2:3b", "profile:code": "bun run profile:init -- --provider ollama --model qwen2.5-coder:7b", "dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:code": "bun run profile:code && bun run dev:profile", "start": "node dist/cli.mjs", + "test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts", "typecheck": "tsc --noEmit", "smoke": "bun run build && node dist/cli.mjs --version", "doctor:runtime": "bun run scripts/system-check.ts", diff --git a/scripts/provider-bootstrap.ts b/scripts/provider-bootstrap.ts index 7c066a00..31915b39 100644 --- a/scripts/provider-bootstrap.ts +++ b/scripts/provider-bootstrap.ts @@ -1,6 +1,16 @@ // @ts-nocheck import { writeFileSync } from 'node:fs' import { resolve } from 'node:path' +import { + getGoalDefaultOpenAIModel, + normalizeRecommendationGoal, + recommendOllamaModel, +} from '../src/utils/providerRecommendation.ts' +import { + getOllamaChatBaseUrl, + hasLocalOllama, + listOllamaModels, +} from './provider-discovery.ts' type ProviderProfile = 'openai' | 'ollama' @@ -27,51 +37,55 @@ function parseProviderArg(): ProviderProfile | 'auto' { return 'auto' } -async function hasLocalOllama(): Promise { - const endpoint = 'http://localhost:11434/api/tags' - const controller = new AbortController() - const timeout = setTimeout(() => controller.abort(), 1200) - - try { - const response = await fetch(endpoint, { - method: 'GET', - signal: controller.signal, - }) - return response.ok - } catch { - return false - } finally { - clearTimeout(timeout) - } -} - function sanitizeApiKey(key: string | null): string | undefined { if (!key || key === 'SUA_CHAVE') return undefined return key } +async function resolveOllamaModel( + argModel: string | null, + argBaseUrl: string | null, + goal: ReturnType, +): Promise { + if (argModel) return argModel + + const discovered = await listOllamaModels(argBaseUrl || undefined) + const recommended = recommendOllamaModel(discovered, goal) + if (recommended) { + return recommended.name + } + + return process.env.OPENAI_MODEL || 'llama3.1:8b' +} + async function main(): Promise { const provider = parseProviderArg() const argModel = parseArg('--model') const argBaseUrl = parseArg('--base-url') const argApiKey = parseArg('--api-key') + const goal = normalizeRecommendationGoal( + parseArg('--goal') || process.env.OPENCLAUDE_PROFILE_GOAL, + ) let selected: ProviderProfile if (provider === 'auto') { - selected = (await hasLocalOllama()) ? 'ollama' : 'openai' + selected = (await hasLocalOllama(argBaseUrl || undefined)) ? 'ollama' : 'openai' } else { selected = provider } const env: ProfileFile['env'] = {} if (selected === 'ollama') { - env.OPENAI_BASE_URL = argBaseUrl || 'http://localhost:11434/v1' - env.OPENAI_MODEL = argModel || process.env.OPENAI_MODEL || 'llama3.1:8b' + env.OPENAI_BASE_URL = getOllamaChatBaseUrl(argBaseUrl || undefined) + env.OPENAI_MODEL = await resolveOllamaModel(argModel, argBaseUrl, goal) const key = sanitizeApiKey(argApiKey || process.env.OPENAI_API_KEY || null) if (key) env.OPENAI_API_KEY = key } else { env.OPENAI_BASE_URL = argBaseUrl || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' - env.OPENAI_MODEL = argModel || process.env.OPENAI_MODEL || 'gpt-4o' + env.OPENAI_MODEL = + argModel || + process.env.OPENAI_MODEL || + getGoalDefaultOpenAIModel(goal) const key = sanitizeApiKey(argApiKey || process.env.OPENAI_API_KEY || null) if (!key) { console.error('OpenAI profile requires a real API key. Use --api-key or set OPENAI_API_KEY.') @@ -90,6 +104,8 @@ async function main(): Promise { writeFileSync(outputPath, JSON.stringify(profile, null, 2), 'utf8') console.log(`Saved profile: ${selected}`) + console.log(`Goal: ${goal}`) + console.log(`Model: ${profile.env.OPENAI_MODEL}`) console.log(`Path: ${outputPath}`) console.log('Next: bun run dev:profile') } diff --git a/scripts/provider-discovery.ts b/scripts/provider-discovery.ts new file mode 100644 index 00000000..9e3aacda --- /dev/null +++ b/scripts/provider-discovery.ts @@ -0,0 +1,129 @@ +import type { OllamaModelDescriptor } from '../src/utils/providerRecommendation.ts' + +export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' + +function withTimeoutSignal(timeoutMs: number): { + signal: AbortSignal + clear: () => void +} { + const controller = new AbortController() + const timeout = setTimeout(() => controller.abort(), timeoutMs) + return { + signal: controller.signal, + clear: () => clearTimeout(timeout), + } +} + +function trimTrailingSlash(value: string): string { + return value.replace(/\/+$/, '') +} + +export function getOllamaApiBaseUrl(baseUrl?: string): string { + const parsed = new URL( + baseUrl || process.env.OLLAMA_BASE_URL || DEFAULT_OLLAMA_BASE_URL, + ) + const pathname = trimTrailingSlash(parsed.pathname) + parsed.pathname = pathname.endsWith('/v1') + ? pathname.slice(0, -3) || '/' + : pathname || '/' + parsed.search = '' + parsed.hash = '' + return trimTrailingSlash(parsed.toString()) +} + +export function getOllamaChatBaseUrl(baseUrl?: string): string { + return `${getOllamaApiBaseUrl(baseUrl)}/v1` +} + +export async function hasLocalOllama(baseUrl?: string): Promise { + const { signal, clear } = withTimeoutSignal(1200) + try { + const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, { + method: 'GET', + signal, + }) + return response.ok + } catch { + return false + } finally { + clear() + } +} + +export async function listOllamaModels( + baseUrl?: string, +): Promise { + const { signal, clear } = withTimeoutSignal(5000) + try { + const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/tags`, { + method: 'GET', + signal, + }) + if (!response.ok) { + return [] + } + + const data = await response.json() as { + models?: Array<{ + name?: string + size?: number + details?: { + family?: string + families?: string[] + parameter_size?: string + quantization_level?: string + } + }> + } + + return (data.models ?? []) + .filter(model => Boolean(model.name)) + .map(model => ({ + name: model.name!, + sizeBytes: typeof model.size === 'number' ? model.size : null, + family: model.details?.family ?? null, + families: model.details?.families ?? [], + parameterSize: model.details?.parameter_size ?? null, + quantizationLevel: model.details?.quantization_level ?? null, + })) + } catch { + return [] + } finally { + clear() + } +} + +export async function benchmarkOllamaModel( + modelName: string, + baseUrl?: string, +): Promise { + const start = Date.now() + const { signal, clear } = withTimeoutSignal(20000) + try { + const response = await fetch(`${getOllamaApiBaseUrl(baseUrl)}/api/chat`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + signal, + body: JSON.stringify({ + model: modelName, + stream: false, + messages: [{ role: 'user', content: 'Reply with OK.' }], + options: { + temperature: 0, + num_predict: 8, + }, + }), + }) + if (!response.ok) { + return null + } + await response.json() + return Date.now() - start + } catch { + return null + } finally { + clear() + } +} diff --git a/scripts/provider-launch.ts b/scripts/provider-launch.ts index fa103d53..26666072 100644 --- a/scripts/provider-launch.ts +++ b/scripts/provider-launch.ts @@ -2,6 +2,16 @@ import { spawn } from 'node:child_process' import { existsSync, readFileSync } from 'node:fs' import { resolve } from 'node:path' +import { + getGoalDefaultOpenAIModel, + normalizeRecommendationGoal, + recommendOllamaModel, +} from '../src/utils/providerRecommendation.ts' +import { + getOllamaChatBaseUrl, + hasLocalOllama, + listOllamaModels, +} from './provider-discovery.ts' type ProviderProfile = 'openai' | 'ollama' @@ -18,20 +28,29 @@ type LaunchOptions = { requestedProfile: ProviderProfile | 'auto' | null passthroughArgs: string[] fast: boolean + goal: ReturnType } function parseLaunchOptions(argv: string[]): LaunchOptions { let requestedProfile: ProviderProfile | 'auto' | null = 'auto' const passthroughArgs: string[] = [] let fast = false + let goal = normalizeRecommendationGoal(process.env.OPENCLAUDE_PROFILE_GOAL) - for (const arg of argv) { + for (let i = 0; i < argv.length; i++) { + const arg = argv[i]! const lower = arg.toLowerCase() if (lower === '--fast') { fast = true continue } + if (lower === '--goal') { + goal = normalizeRecommendationGoal(argv[i + 1] ?? null) + i++ + continue + } + if ((lower === 'auto' || lower === 'openai' || lower === 'ollama') && requestedProfile === 'auto') { requestedProfile = lower as ProviderProfile | 'auto' continue @@ -54,6 +73,7 @@ function parseLaunchOptions(argv: string[]): LaunchOptions { requestedProfile, passthroughArgs, fast, + goal, } } @@ -71,18 +91,12 @@ function loadPersistedProfile(): ProfileFile | null { } } -async function hasLocalOllama(): Promise { - const endpoint = 'http://localhost:11434/api/tags' - const controller = new AbortController() - const timeout = setTimeout(() => controller.abort(), 1200) - try { - const response = await fetch(endpoint, { signal: controller.signal }) - return response.ok - } catch { - return false - } finally { - clearTimeout(timeout) - } +async function resolveOllamaDefaultModel( + goal: ReturnType, +): Promise { + const models = await listOllamaModels() + const recommended = recommendOllamaModel(models, goal) + return recommended?.name || process.env.OPENAI_MODEL || 'llama3.1:8b' } function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { @@ -99,7 +113,11 @@ function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { }) } -function buildEnv(profile: ProviderProfile, persisted: ProfileFile | null): NodeJS.ProcessEnv { +async function buildEnv( + profile: ProviderProfile, + persisted: ProfileFile | null, + goal: ReturnType, +): Promise { const persistedEnv = persisted?.env ?? {} const env: NodeJS.ProcessEnv = { ...process.env, @@ -107,8 +125,14 @@ function buildEnv(profile: ProviderProfile, persisted: ProfileFile | null): Node } if (profile === 'ollama') { - env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL || process.env.OPENAI_BASE_URL || 'http://localhost:11434/v1' - env.OPENAI_MODEL = persistedEnv.OPENAI_MODEL || process.env.OPENAI_MODEL || 'llama3.1:8b' + env.OPENAI_BASE_URL = + persistedEnv.OPENAI_BASE_URL || + process.env.OPENAI_BASE_URL || + getOllamaChatBaseUrl() + env.OPENAI_MODEL = + persistedEnv.OPENAI_MODEL || + process.env.OPENAI_MODEL || + await resolveOllamaDefaultModel(goal) if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'SUA_CHAVE') { delete env.OPENAI_API_KEY } @@ -116,7 +140,10 @@ function buildEnv(profile: ProviderProfile, persisted: ProfileFile | null): Node } env.OPENAI_BASE_URL = process.env.OPENAI_BASE_URL || persistedEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1' - env.OPENAI_MODEL = process.env.OPENAI_MODEL || persistedEnv.OPENAI_MODEL || 'gpt-4o' + env.OPENAI_MODEL = + process.env.OPENAI_MODEL || + persistedEnv.OPENAI_MODEL || + getGoalDefaultOpenAIModel(goal) env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY return env } @@ -165,7 +192,7 @@ async function main(): Promise { profile = requestedProfile } - const env = buildEnv(profile, persisted) + const env = await buildEnv(profile, persisted, options.goal) if (options.fast) { applyFastFlags(env) } diff --git a/scripts/provider-recommend.ts b/scripts/provider-recommend.ts new file mode 100644 index 00000000..8cfdc883 --- /dev/null +++ b/scripts/provider-recommend.ts @@ -0,0 +1,277 @@ +// @ts-nocheck +import { writeFileSync } from 'node:fs' +import { resolve } from 'node:path' + +import { + applyBenchmarkLatency, + getGoalDefaultOpenAIModel, + normalizeRecommendationGoal, + rankOllamaModels, + type BenchmarkedOllamaModel, + type RecommendationGoal, +} from '../src/utils/providerRecommendation.ts' +import { + benchmarkOllamaModel, + getOllamaChatBaseUrl, + hasLocalOllama, + listOllamaModels, +} from './provider-discovery.ts' + +type ProviderProfile = 'openai' | 'ollama' + +type ProfileFile = { + profile: ProviderProfile + env: { + OPENAI_BASE_URL?: string + OPENAI_MODEL?: string + OPENAI_API_KEY?: string + } + createdAt: string +} + +type CliOptions = { + apply: boolean + benchmark: boolean + goal: RecommendationGoal + json: boolean + provider: ProviderProfile | 'auto' + baseUrl: string | null +} + +function parseOptions(argv: string[]): CliOptions { + const options: CliOptions = { + apply: false, + benchmark: false, + goal: normalizeRecommendationGoal(process.env.OPENCLAUDE_PROFILE_GOAL), + json: false, + provider: 'auto', + baseUrl: null, + } + + for (let i = 0; i < argv.length; i++) { + const arg = argv[i]?.toLowerCase() + if (!arg) continue + + if (arg === '--apply') { + options.apply = true + continue + } + if (arg === '--benchmark') { + options.benchmark = true + continue + } + if (arg === '--json') { + options.json = true + continue + } + if (arg === '--goal') { + options.goal = normalizeRecommendationGoal(argv[i + 1] ?? null) + i++ + continue + } + if (arg === '--provider') { + const provider = argv[i + 1]?.toLowerCase() + if ( + provider === 'openai' || + provider === 'ollama' || + provider === 'auto' + ) { + options.provider = provider + } + i++ + continue + } + if (arg === '--base-url') { + options.baseUrl = argv[i + 1] ?? null + i++ + } + } + + return options +} + +function sanitizeApiKey(key: string | undefined): string | undefined { + if (!key || key === 'SUA_CHAVE') return undefined + return key +} + +function printHumanSummary(payload: { + goal: RecommendationGoal + recommendedProfile: ProviderProfile + recommendedModel: string + rankedModels: BenchmarkedOllamaModel[] + benchmarked: boolean + applied: boolean +}): void { + console.log(`Recommendation goal: ${payload.goal}`) + console.log(`Recommended profile: ${payload.recommendedProfile}`) + console.log(`Recommended model: ${payload.recommendedModel}`) + + if (payload.rankedModels.length > 0) { + console.log('\nRanked Ollama models:') + for (const [index, model] of payload.rankedModels.slice(0, 5).entries()) { + const benchmarkPart = + payload.benchmarked && model.benchmarkMs !== null + ? ` | ${Math.round(model.benchmarkMs)}ms` + : '' + console.log( + `${index + 1}. ${model.name} | score=${model.score}${benchmarkPart} | ${model.summary}`, + ) + } + } + + if (payload.applied) { + console.log('\nSaved .openclaude-profile.json with the recommended profile.') + console.log('Next: bun run dev:profile') + } else { + console.log( + '\nTip: run `bun run profile:auto -- --goal ' + + payload.goal + + '` to apply this automatically.', + ) + } +} + +async function maybeApplyProfile( + profile: ProviderProfile, + model: string, + goal: RecommendationGoal, + baseUrl: string | null, +): Promise { + const env: ProfileFile['env'] = {} + if (profile === 'ollama') { + env.OPENAI_BASE_URL = getOllamaChatBaseUrl(baseUrl ?? undefined) + env.OPENAI_MODEL = model + const key = sanitizeApiKey(process.env.OPENAI_API_KEY) + if (key) env.OPENAI_API_KEY = key + } else { + const key = sanitizeApiKey(process.env.OPENAI_API_KEY) + if (!key) { + console.error('Cannot apply an OpenAI profile without OPENAI_API_KEY.') + return false + } + env.OPENAI_BASE_URL = + process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' + env.OPENAI_MODEL = model || getGoalDefaultOpenAIModel(goal) + env.OPENAI_API_KEY = key + } + + const profileFile: ProfileFile = { + profile, + env, + createdAt: new Date().toISOString(), + } + + writeFileSync( + resolve(process.cwd(), '.openclaude-profile.json'), + JSON.stringify(profileFile, null, 2), + 'utf8', + ) + return true +} + +async function main(): Promise { + const options = parseOptions(process.argv.slice(2)) + const ollamaAvailable = + options.provider !== 'openai' && + (await hasLocalOllama(options.baseUrl ?? undefined)) + const ollamaModels = ollamaAvailable + ? await listOllamaModels(options.baseUrl ?? undefined) + : [] + + const heuristicRanked = rankOllamaModels(ollamaModels, options.goal) + const benchmarkInput = options.benchmark ? heuristicRanked.slice(0, 3) : [] + + const benchmarkResults: Record = {} + for (const model of benchmarkInput) { + benchmarkResults[model.name] = await benchmarkOllamaModel( + model.name, + options.baseUrl ?? undefined, + ) + } + + const rankedModels: BenchmarkedOllamaModel[] = options.benchmark + ? applyBenchmarkLatency(heuristicRanked, benchmarkResults, options.goal) + : heuristicRanked.map(model => ({ + ...model, + benchmarkMs: null, + })) + + const recommendedOllama = rankedModels[0] ?? null + const openAIConfigured = Boolean(sanitizeApiKey(process.env.OPENAI_API_KEY)) + + let recommendedProfile: ProviderProfile + let recommendedModel: string + + if (options.provider === 'openai') { + recommendedProfile = 'openai' + recommendedModel = getGoalDefaultOpenAIModel(options.goal) + } else if (options.provider === 'ollama') { + if (!recommendedOllama) { + console.error( + 'No Ollama models were discovered. Pull a model first or switch to --provider openai.', + ) + process.exit(1) + } + recommendedProfile = 'ollama' + recommendedModel = recommendedOllama.name + } else if (recommendedOllama) { + recommendedProfile = 'ollama' + recommendedModel = recommendedOllama.name + } else { + recommendedProfile = 'openai' + recommendedModel = getGoalDefaultOpenAIModel(options.goal) + } + + let applied = false + if (options.apply) { + applied = await maybeApplyProfile( + recommendedProfile, + recommendedModel, + options.goal, + options.baseUrl, + ) + if (!applied) { + process.exit(1) + } + } + + const payload = { + goal: options.goal, + provider: options.provider, + ollamaAvailable, + openAIConfigured, + recommendedProfile, + recommendedModel, + benchmarked: options.benchmark, + rankedModels, + applied, + } + + if (options.json) { + console.log(JSON.stringify(payload, null, 2)) + return + } + + printHumanSummary({ + goal: options.goal, + recommendedProfile, + recommendedModel, + rankedModels, + benchmarked: options.benchmark, + applied, + }) + + if (!recommendedOllama && !openAIConfigured) { + console.log( + '\nNo local Ollama model was detected and OPENAI_API_KEY is unset.', + ) + console.log( + 'Next steps: `ollama pull qwen2.5-coder:7b` or set OPENAI_API_KEY.', + ) + } +} + +await main() + +export {} diff --git a/src/utils/providerRecommendation.test.ts b/src/utils/providerRecommendation.test.ts new file mode 100644 index 00000000..986e403f --- /dev/null +++ b/src/utils/providerRecommendation.test.ts @@ -0,0 +1,118 @@ +import assert from 'node:assert/strict' +import test from 'node:test' + +import { + applyBenchmarkLatency, + getGoalDefaultOpenAIModel, + normalizeRecommendationGoal, + rankOllamaModels, + recommendOllamaModel, + type OllamaModelDescriptor, +} from './providerRecommendation.ts' + +function model( + name: string, + overrides: Partial = {}, +): OllamaModelDescriptor { + return { + name, + sizeBytes: null, + family: null, + families: [], + parameterSize: null, + quantizationLevel: null, + ...overrides, + } +} + +test('normalizes recommendation goals safely', () => { + assert.equal(normalizeRecommendationGoal('coding'), 'coding') + assert.equal(normalizeRecommendationGoal(' LATENCY '), 'latency') + assert.equal(normalizeRecommendationGoal('weird'), 'balanced') + assert.equal(normalizeRecommendationGoal(undefined), 'balanced') +}) + +test('coding goal prefers coding-oriented ollama models', () => { + const recommended = recommendOllamaModel( + [ + model('llama3.1:8b', { + parameterSize: '8B', + quantizationLevel: 'Q4_K_M', + }), + model('qwen2.5-coder:7b', { + parameterSize: '7B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'coding', + ) + + assert.equal(recommended?.name, 'qwen2.5-coder:7b') +}) + +test('latency goal prefers smaller models', () => { + const recommended = recommendOllamaModel( + [ + model('llama3.1:70b', { + parameterSize: '70B', + quantizationLevel: 'Q4_K_M', + }), + model('llama3.2:3b', { + parameterSize: '3B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'latency', + ) + + assert.equal(recommended?.name, 'llama3.2:3b') +}) + +test('non-chat embedding models are heavily demoted', () => { + const ranked = rankOllamaModels( + [ + model('nomic-embed-text', { parameterSize: '0.5B' }), + model('mistral:7b-instruct', { + parameterSize: '7B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'balanced', + ) + + assert.equal(ranked[0]?.name, 'mistral:7b-instruct') +}) + +test('benchmark latency can reorder close recommendations', () => { + const ranked = rankOllamaModels( + [ + model('llama3.1:8b', { + parameterSize: '8B', + quantizationLevel: 'Q4_K_M', + }), + model('mistral:7b-instruct', { + parameterSize: '7B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'latency', + ) + + const benchmarked = applyBenchmarkLatency( + ranked, + { + 'llama3.1:8b': 2000, + 'mistral:7b-instruct': 350, + }, + 'latency', + ) + + assert.equal(benchmarked[0]?.name, 'mistral:7b-instruct') + assert.equal(benchmarked[0]?.benchmarkMs, 350) +}) + +test('goal defaults choose sensible openai models', () => { + assert.equal(getGoalDefaultOpenAIModel('latency'), 'gpt-4o-mini') + assert.equal(getGoalDefaultOpenAIModel('balanced'), 'gpt-4o') + assert.equal(getGoalDefaultOpenAIModel('coding'), 'gpt-4o') +}) diff --git a/src/utils/providerRecommendation.ts b/src/utils/providerRecommendation.ts new file mode 100644 index 00000000..e49c37aa --- /dev/null +++ b/src/utils/providerRecommendation.ts @@ -0,0 +1,297 @@ +export type RecommendationGoal = 'latency' | 'balanced' | 'coding' + +export type OllamaModelDescriptor = { + name: string + sizeBytes?: number | null + family?: string | null + families?: string[] + parameterSize?: string | null + quantizationLevel?: string | null +} + +export type RankedOllamaModel = OllamaModelDescriptor & { + score: number + reasons: string[] + summary: string +} + +export type BenchmarkedOllamaModel = RankedOllamaModel & { + benchmarkMs: number | null +} + +const CODING_HINTS = [ + 'coder', + 'codellama', + 'codegemma', + 'starcoder', + 'deepseek-coder', + 'qwen2.5-coder', + 'qwen-coder', +] + +const GENERAL_HINTS = [ + 'llama', + 'qwen', + 'mistral', + 'gemma', + 'phi', + 'deepseek', +] + +const INSTRUCT_HINTS = ['instruct', 'chat', 'assistant'] +const NON_CHAT_HINTS = ['embed', 'embedding', 'rerank', 'bge', 'whisper'] + +function modelHaystack(model: OllamaModelDescriptor): string { + return [ + model.name, + model.family ?? '', + ...(model.families ?? []), + model.parameterSize ?? '', + model.quantizationLevel ?? '', + ] + .join(' ') + .toLowerCase() +} + +function includesAny(text: string, needles: string[]): boolean { + return needles.some(needle => text.includes(needle)) +} + +function inferParameterBillions(model: OllamaModelDescriptor): number | null { + const text = `${model.parameterSize ?? ''} ${model.name}`.toLowerCase() + const match = text.match(/(\d+(?:\.\d+)?)\s*b\b/) + if (match?.[1]) { + return Number(match[1]) + } + if (typeof model.sizeBytes === 'number' && model.sizeBytes > 0) { + return Number((model.sizeBytes / 1_000_000_000).toFixed(1)) + } + return null +} + +function quantizationBucket(model: OllamaModelDescriptor): string { + return (model.quantizationLevel ?? model.name).toLowerCase() +} + +function scoreSizeTier( + paramsB: number | null, + goal: RecommendationGoal, + reasons: string[], +): number { + if (paramsB === null) { + reasons.push('unknown size') + return 0 + } + + if (goal === 'latency') { + if (paramsB <= 4) { + reasons.push('tiny model for low latency') + return 32 + } + if (paramsB <= 8) { + reasons.push('small model for fast responses') + return 26 + } + if (paramsB <= 14) { + reasons.push('mid-sized model with acceptable latency') + return 16 + } + if (paramsB <= 24) { + reasons.push('larger model may be slower') + return 8 + } + reasons.push('large model likely slower locally') + return paramsB <= 40 ? 0 : -8 + } + + if (goal === 'coding') { + if (paramsB >= 7 && paramsB <= 14) { + reasons.push('strong coding size tier') + return 24 + } + if (paramsB > 14 && paramsB <= 34) { + reasons.push('large coding-capable size tier') + return 28 + } + if (paramsB > 34) { + reasons.push('very large model with higher quality potential') + return 18 + } + reasons.push('compact model may trade off coding depth') + return 12 + } + + if (paramsB >= 7 && paramsB <= 14) { + reasons.push('great balanced size tier') + return 26 + } + if (paramsB >= 3 && paramsB < 7) { + reasons.push('compact balanced size tier') + return 18 + } + if (paramsB > 14 && paramsB <= 24) { + reasons.push('high quality balanced size tier') + return 20 + } + if (paramsB > 24) { + reasons.push('large model for quality-first usage') + return 10 + } + reasons.push('very small model for general usage') + return 8 +} + +function scoreQuantization( + model: OllamaModelDescriptor, + goal: RecommendationGoal, + reasons: string[], +): number { + const quant = quantizationBucket(model) + if (quant.includes('q4')) { + reasons.push('efficient Q4 quantization') + return goal === 'latency' ? 8 : 4 + } + if (quant.includes('q5')) { + reasons.push('balanced Q5 quantization') + return goal === 'latency' ? 6 : 5 + } + if (quant.includes('q8')) { + reasons.push('higher quality Q8 quantization') + return goal === 'latency' ? 2 : 5 + } + return 0 +} + +function compareRankedModels( + a: RankedOllamaModel | BenchmarkedOllamaModel, + b: RankedOllamaModel | BenchmarkedOllamaModel, + goal: RecommendationGoal, +): number { + if (b.score !== a.score) { + return b.score - a.score + } + + const aSize = inferParameterBillions(a) ?? Number.POSITIVE_INFINITY + const bSize = inferParameterBillions(b) ?? Number.POSITIVE_INFINITY + + if (goal === 'latency') { + return aSize - bSize + } + + if (goal === 'coding') { + return bSize - aSize + } + + const target = 14 + return Math.abs(aSize - target) - Math.abs(bSize - target) +} + +export function normalizeRecommendationGoal( + goal: string | null | undefined, +): RecommendationGoal { + const normalized = goal?.trim().toLowerCase() + if ( + normalized === 'latency' || + normalized === 'balanced' || + normalized === 'coding' + ) { + return normalized + } + return 'balanced' +} + +export function getGoalDefaultOpenAIModel(goal: RecommendationGoal): string { + switch (goal) { + case 'latency': + return 'gpt-4o-mini' + case 'coding': + return 'gpt-4o' + case 'balanced': + default: + return 'gpt-4o' + } +} + +export function rankOllamaModels( + models: OllamaModelDescriptor[], + goal: RecommendationGoal, +): RankedOllamaModel[] { + return models + .map(model => { + const haystack = modelHaystack(model) + const reasons: string[] = [] + let score = 0 + + if (includesAny(haystack, NON_CHAT_HINTS)) { + score -= 40 + reasons.push('not a chat-first model') + } + + if (includesAny(haystack, CODING_HINTS)) { + score += goal === 'coding' ? 24 : goal === 'balanced' ? 10 : 4 + reasons.push('coding-oriented model family') + } + + if (includesAny(haystack, GENERAL_HINTS)) { + score += goal === 'latency' ? 4 : goal === 'coding' ? 6 : 8 + reasons.push('strong general-purpose model family') + } + + if (includesAny(haystack, INSTRUCT_HINTS)) { + score += goal === 'latency' ? 2 : 6 + reasons.push('chat/instruct tuned') + } + + if (haystack.includes('vision') || haystack.includes('vl')) { + score -= 2 + reasons.push('vision model adds extra overhead') + } + + score += scoreSizeTier(inferParameterBillions(model), goal, reasons) + score += scoreQuantization(model, goal, reasons) + + const summary = reasons.slice(0, 3).join(', ') + return { + ...model, + score, + reasons, + summary, + } + }) + .sort((a, b) => compareRankedModels(a, b, goal)) +} + +export function recommendOllamaModel( + models: OllamaModelDescriptor[], + goal: RecommendationGoal, +): RankedOllamaModel | null { + return rankOllamaModels(models, goal)[0] ?? null +} + +export function applyBenchmarkLatency( + models: RankedOllamaModel[], + benchmarkMs: Record, + goal: RecommendationGoal, +): BenchmarkedOllamaModel[] { + const divisor = + goal === 'latency' ? 120 : goal === 'coding' ? 500 : 240 + + return models + .map(model => { + const latency = benchmarkMs[model.name] ?? null + const benchmarkPenalty = latency === null ? 0 : latency / divisor + const reasons = + latency === null + ? model.reasons + : [`benchmarked at ${Math.round(latency)}ms`, ...model.reasons] + + return { + ...model, + benchmarkMs: latency, + reasons, + summary: reasons.slice(0, 3).join(', '), + score: Number((model.score - benchmarkPenalty).toFixed(2)), + } + }) + .sort((a, b) => compareRankedModels(a, b, goal)) +} From 8fe03cba57322dd30421fa892adaefe8865b76a4 Mon Sep 17 00:00:00 2001 From: Vasanthdev2004 Date: Wed, 1 Apr 2026 11:55:24 +0530 Subject: [PATCH 2/2] fix: harden provider recommendation safety --- PLAYBOOK.md | 14 +-- README.md | 9 +- package.json | 2 +- scripts/provider-bootstrap.ts | 91 +++++++++-------- scripts/provider-launch.ts | 79 ++++++--------- scripts/provider-recommend.ts | 63 ++++++------ src/utils/providerProfile.test.ts | 92 +++++++++++++++++ src/utils/providerProfile.ts | 123 +++++++++++++++++++++++ src/utils/providerRecommendation.test.ts | 76 ++++++++++++++ src/utils/providerRecommendation.ts | 26 ++++- 10 files changed, 434 insertions(+), 141 deletions(-) create mode 100644 src/utils/providerProfile.test.ts create mode 100644 src/utils/providerProfile.ts diff --git a/PLAYBOOK.md b/PLAYBOOK.md index dfdaec76..076c377f 100644 --- a/PLAYBOOK.md +++ b/PLAYBOOK.md @@ -183,10 +183,10 @@ Fix: bun run profile:init -- --provider ollama --model llama3.1:8b ``` -Or auto-pick a local profile: +Or pick a local Ollama profile automatically by goal: ```powershell -bun run profile:auto -- --goal balanced +bun run profile:init -- --provider ollama --goal balanced ``` ## 6.5 Placeholder key (`SUA_CHAVE`) error @@ -220,14 +220,16 @@ bun run profile:fast # llama3.2:3b bun run profile:code # qwen2.5-coder:7b ``` -Goal-based auto-selection: +Goal-based local auto-selection: ```powershell -bun run profile:auto -- --goal latency -bun run profile:auto -- --goal balanced -bun run profile:auto -- --goal coding +bun run profile:init -- --provider ollama --goal latency +bun run profile:init -- --provider ollama --goal balanced +bun run profile:init -- --provider ollama --goal coding ``` +`profile:auto` is a best-available provider picker, not a local-only command. Use `--provider ollama` when you want to stay on a local model. + ## 8. Practical Prompt Playbook (Copy/Paste) ## 8.1 Code understanding diff --git a/README.md b/README.md index 358bf95d..cef1c5cb 100644 --- a/README.md +++ b/README.md @@ -187,7 +187,7 @@ bun run doctor:runtime:json # persist a diagnostics report to reports/doctor-runtime.json bun run doctor:report -# full local hardening check (typecheck + smoke + runtime doctor) +# full local hardening check (smoke + runtime doctor) bun run hardening:check # strict hardening (includes project-wide typecheck) @@ -203,13 +203,13 @@ Notes: Use profile launchers to avoid repeated environment setup: ```bash -# one-time profile bootstrap (auto-detect ollama, otherwise openai) +# one-time profile bootstrap (best available provider) bun run profile:init # preview the best provider/model for your goal bun run profile:recommend -- --goal coding --benchmark -# auto-apply the best available profile for your goal +# auto-apply the best available provider/model for your goal bun run profile:auto -- --goal latency # openai bootstrap with explicit key @@ -234,6 +234,9 @@ bun run dev:ollama `profile:recommend` ranks installed Ollama models for `latency`, `balanced`, or `coding`, and `profile:auto` can persist the recommendation directly. If no profile exists yet, `dev:profile` now uses the same goal-aware defaults when picking the initial model. +Use `--provider ollama` when you want a local-only path. Auto mode falls back to OpenAI when no viable local chat model is installed. +Goal-based Ollama selection only recommends among models that are already installed and reachable from Ollama. + `dev:openai` and `dev:ollama` run `doctor:runtime` first and only launch the app if checks pass. For `dev:ollama`, make sure Ollama is running locally before launch. diff --git a/package.json b/package.json index ab44903f..6e28e367 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,7 @@ "dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:code": "bun run profile:code && bun run dev:profile", "start": "node dist/cli.mjs", - "test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts", + "test:provider-recommendation": "node --test --experimental-strip-types src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", "typecheck": "tsc --noEmit", "smoke": "bun run build && node dist/cli.mjs --version", "doctor:runtime": "bun run scripts/system-check.ts", diff --git a/scripts/provider-bootstrap.ts b/scripts/provider-bootstrap.ts index 31915b39..7fc3ff55 100644 --- a/scripts/provider-bootstrap.ts +++ b/scripts/provider-bootstrap.ts @@ -6,24 +6,20 @@ import { normalizeRecommendationGoal, recommendOllamaModel, } from '../src/utils/providerRecommendation.ts' +import { + buildOllamaProfileEnv, + buildOpenAIProfileEnv, + createProfileFile, + selectAutoProfile, + type ProfileFile, + type ProviderProfile, +} from '../src/utils/providerProfile.ts' import { getOllamaChatBaseUrl, hasLocalOllama, listOllamaModels, } from './provider-discovery.ts' -type ProviderProfile = 'openai' | 'ollama' - -type ProfileFile = { - profile: ProviderProfile - env: { - OPENAI_BASE_URL?: string - OPENAI_MODEL?: string - OPENAI_API_KEY?: string - } - createdAt: string -} - function parseArg(name: string): string | null { const args = process.argv.slice(2) const idx = args.indexOf(name) @@ -37,25 +33,16 @@ function parseProviderArg(): ProviderProfile | 'auto' { return 'auto' } -function sanitizeApiKey(key: string | null): string | undefined { - if (!key || key === 'SUA_CHAVE') return undefined - return key -} - async function resolveOllamaModel( argModel: string | null, argBaseUrl: string | null, goal: ReturnType, -): Promise { +) : Promise { if (argModel) return argModel const discovered = await listOllamaModels(argBaseUrl || undefined) const recommended = recommendOllamaModel(discovered, goal) - if (recommended) { - return recommended.name - } - - return process.env.OPENAI_MODEL || 'llama3.1:8b' + return recommended?.name ?? null } async function main(): Promise { @@ -68,37 +55,57 @@ async function main(): Promise { ) let selected: ProviderProfile + let resolvedOllamaModel: string | null = null if (provider === 'auto') { - selected = (await hasLocalOllama(argBaseUrl || undefined)) ? 'ollama' : 'openai' + if (await hasLocalOllama(argBaseUrl || undefined)) { + resolvedOllamaModel = await resolveOllamaModel(argModel, argBaseUrl, goal) + selected = selectAutoProfile(resolvedOllamaModel) + } else { + selected = 'openai' + } } else { selected = provider } - const env: ProfileFile['env'] = {} + let env: ProfileFile['env'] if (selected === 'ollama') { - env.OPENAI_BASE_URL = getOllamaChatBaseUrl(argBaseUrl || undefined) - env.OPENAI_MODEL = await resolveOllamaModel(argModel, argBaseUrl, goal) - const key = sanitizeApiKey(argApiKey || process.env.OPENAI_API_KEY || null) - if (key) env.OPENAI_API_KEY = key + resolvedOllamaModel ??= await resolveOllamaModel(argModel, argBaseUrl, goal) + if (!resolvedOllamaModel) { + console.error('No viable Ollama chat model was discovered. Pull a chat model first or pass --model explicitly.') + process.exit(1) + } + + env = buildOllamaProfileEnv( + resolvedOllamaModel, + { + baseUrl: argBaseUrl, + getOllamaChatBaseUrl, + }, + ) } else { - env.OPENAI_BASE_URL = argBaseUrl || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' - env.OPENAI_MODEL = - argModel || - process.env.OPENAI_MODEL || - getGoalDefaultOpenAIModel(goal) - const key = sanitizeApiKey(argApiKey || process.env.OPENAI_API_KEY || null) - if (!key) { + const builtEnv = buildOpenAIProfileEnv({ + goal, + model: + argModel || + process.env.OPENAI_MODEL || + getGoalDefaultOpenAIModel(goal), + apiKey: argApiKey || process.env.OPENAI_API_KEY || null, + processEnv: { + ...process.env, + OPENAI_BASE_URL: + argBaseUrl || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1', + }, + }) + + if (!builtEnv) { console.error('OpenAI profile requires a real API key. Use --api-key or set OPENAI_API_KEY.') process.exit(1) } - env.OPENAI_API_KEY = key + + env = builtEnv } - const profile: ProfileFile = { - profile: selected, - env, - createdAt: new Date().toISOString(), - } + const profile = createProfileFile(selected, env) const outputPath = resolve(process.cwd(), '.openclaude-profile.json') writeFileSync(outputPath, JSON.stringify(profile, null, 2), 'utf8') diff --git a/scripts/provider-launch.ts b/scripts/provider-launch.ts index 26666072..d4e321c4 100644 --- a/scripts/provider-launch.ts +++ b/scripts/provider-launch.ts @@ -3,27 +3,21 @@ import { spawn } from 'node:child_process' import { existsSync, readFileSync } from 'node:fs' import { resolve } from 'node:path' import { - getGoalDefaultOpenAIModel, normalizeRecommendationGoal, recommendOllamaModel, } from '../src/utils/providerRecommendation.ts' +import { + buildLaunchEnv, + selectAutoProfile, + type ProfileFile, + type ProviderProfile, +} from '../src/utils/providerProfile.ts' import { getOllamaChatBaseUrl, hasLocalOllama, listOllamaModels, } from './provider-discovery.ts' -type ProviderProfile = 'openai' | 'ollama' - -type ProfileFile = { - profile: ProviderProfile - env?: { - OPENAI_BASE_URL?: string - OPENAI_MODEL?: string - OPENAI_API_KEY?: string - } -} - type LaunchOptions = { requestedProfile: ProviderProfile | 'auto' | null passthroughArgs: string[] @@ -93,10 +87,10 @@ function loadPersistedProfile(): ProfileFile | null { async function resolveOllamaDefaultModel( goal: ReturnType, -): Promise { +): Promise { const models = await listOllamaModels() const recommended = recommendOllamaModel(models, goal) - return recommended?.name || process.env.OPENAI_MODEL || 'llama3.1:8b' + return recommended?.name ?? null } function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { @@ -113,41 +107,6 @@ function runCommand(command: string, env: NodeJS.ProcessEnv): Promise { }) } -async function buildEnv( - profile: ProviderProfile, - persisted: ProfileFile | null, - goal: ReturnType, -): Promise { - const persistedEnv = persisted?.env ?? {} - const env: NodeJS.ProcessEnv = { - ...process.env, - CLAUDE_CODE_USE_OPENAI: '1', - } - - if (profile === 'ollama') { - env.OPENAI_BASE_URL = - persistedEnv.OPENAI_BASE_URL || - process.env.OPENAI_BASE_URL || - getOllamaChatBaseUrl() - env.OPENAI_MODEL = - persistedEnv.OPENAI_MODEL || - process.env.OPENAI_MODEL || - await resolveOllamaDefaultModel(goal) - if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'SUA_CHAVE') { - delete env.OPENAI_API_KEY - } - return env - } - - env.OPENAI_BASE_URL = process.env.OPENAI_BASE_URL || persistedEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1' - env.OPENAI_MODEL = - process.env.OPENAI_MODEL || - persistedEnv.OPENAI_MODEL || - getGoalDefaultOpenAIModel(goal) - env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY - return env -} - function applyFastFlags(env: NodeJS.ProcessEnv): NodeJS.ProcessEnv { env.CLAUDE_CODE_SIMPLE ??= '1' env.CLAUDE_CODE_DISABLE_THINKING ??= '1' @@ -181,18 +140,36 @@ async function main(): Promise { const persisted = loadPersistedProfile() let profile: ProviderProfile + let resolvedOllamaModel: string | null = null if (requestedProfile === 'auto') { if (persisted) { profile = persisted.profile + } else if (await hasLocalOllama()) { + resolvedOllamaModel = await resolveOllamaDefaultModel(options.goal) + profile = selectAutoProfile(resolvedOllamaModel) } else { - profile = (await hasLocalOllama()) ? 'ollama' : 'openai' + profile = 'openai' } } else { profile = requestedProfile } - const env = await buildEnv(profile, persisted, options.goal) + if (profile === 'ollama' && persisted?.profile !== 'ollama') { + resolvedOllamaModel ??= await resolveOllamaDefaultModel(options.goal) + if (!resolvedOllamaModel) { + console.error('No viable Ollama chat model was discovered. Pull a chat model first or save one with `bun run profile:init -- --provider ollama --model `.') + process.exit(1) + } + } + + const env = await buildLaunchEnv({ + profile, + persisted, + goal: options.goal, + getOllamaChatBaseUrl, + resolveOllamaDefaultModel: async () => resolvedOllamaModel || 'llama3.1:8b', + }) if (options.fast) { applyFastFlags(env) } diff --git a/scripts/provider-recommend.ts b/scripts/provider-recommend.ts index 8cfdc883..eca811e6 100644 --- a/scripts/provider-recommend.ts +++ b/scripts/provider-recommend.ts @@ -5,11 +5,21 @@ import { resolve } from 'node:path' import { applyBenchmarkLatency, getGoalDefaultOpenAIModel, + isViableOllamaChatModel, normalizeRecommendationGoal, rankOllamaModels, + selectRecommendedOllamaModel, type BenchmarkedOllamaModel, type RecommendationGoal, } from '../src/utils/providerRecommendation.ts' +import { + buildOllamaProfileEnv, + buildOpenAIProfileEnv, + createProfileFile, + sanitizeApiKey, + type ProfileFile, + type ProviderProfile, +} from '../src/utils/providerProfile.ts' import { benchmarkOllamaModel, getOllamaChatBaseUrl, @@ -17,18 +27,6 @@ import { listOllamaModels, } from './provider-discovery.ts' -type ProviderProfile = 'openai' | 'ollama' - -type ProfileFile = { - profile: ProviderProfile - env: { - OPENAI_BASE_URL?: string - OPENAI_MODEL?: string - OPENAI_API_KEY?: string - } - createdAt: string -} - type CliOptions = { apply: boolean benchmark: boolean @@ -90,11 +88,6 @@ function parseOptions(argv: string[]): CliOptions { return options } -function sanitizeApiKey(key: string | undefined): string | undefined { - if (!key || key === 'SUA_CHAVE') return undefined - return key -} - function printHumanSummary(payload: { goal: RecommendationGoal recommendedProfile: ProviderProfile @@ -138,29 +131,27 @@ async function maybeApplyProfile( goal: RecommendationGoal, baseUrl: string | null, ): Promise { - const env: ProfileFile['env'] = {} + let env: ProfileFile['env'] | null if (profile === 'ollama') { - env.OPENAI_BASE_URL = getOllamaChatBaseUrl(baseUrl ?? undefined) - env.OPENAI_MODEL = model - const key = sanitizeApiKey(process.env.OPENAI_API_KEY) - if (key) env.OPENAI_API_KEY = key + env = buildOllamaProfileEnv(model, { + baseUrl, + getOllamaChatBaseUrl, + }) } else { - const key = sanitizeApiKey(process.env.OPENAI_API_KEY) - if (!key) { + env = buildOpenAIProfileEnv({ + goal, + model: model || getGoalDefaultOpenAIModel(goal), + apiKey: process.env.OPENAI_API_KEY, + processEnv: process.env, + }) + + if (!env) { console.error('Cannot apply an OpenAI profile without OPENAI_API_KEY.') return false } - env.OPENAI_BASE_URL = - process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1' - env.OPENAI_MODEL = model || getGoalDefaultOpenAIModel(goal) - env.OPENAI_API_KEY = key } - const profileFile: ProfileFile = { - profile, - env, - createdAt: new Date().toISOString(), - } + const profileFile = createProfileFile(profile, env) writeFileSync( resolve(process.cwd(), '.openclaude-profile.json'), @@ -180,7 +171,9 @@ async function main(): Promise { : [] const heuristicRanked = rankOllamaModels(ollamaModels, options.goal) - const benchmarkInput = options.benchmark ? heuristicRanked.slice(0, 3) : [] + const benchmarkInput = options.benchmark + ? heuristicRanked.filter(isViableOllamaChatModel).slice(0, 3) + : [] const benchmarkResults: Record = {} for (const model of benchmarkInput) { @@ -197,7 +190,7 @@ async function main(): Promise { benchmarkMs: null, })) - const recommendedOllama = rankedModels[0] ?? null + const recommendedOllama = selectRecommendedOllamaModel(rankedModels) const openAIConfigured = Boolean(sanitizeApiKey(process.env.OPENAI_API_KEY)) let recommendedProfile: ProviderProfile diff --git a/src/utils/providerProfile.test.ts b/src/utils/providerProfile.test.ts new file mode 100644 index 00000000..f549584f --- /dev/null +++ b/src/utils/providerProfile.test.ts @@ -0,0 +1,92 @@ +import assert from 'node:assert/strict' +import test from 'node:test' + +import { + buildLaunchEnv, + buildOllamaProfileEnv, + selectAutoProfile, + type ProfileFile, +} from './providerProfile.ts' + +function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): ProfileFile { + return { + profile, + env, + createdAt: '2026-04-01T00:00:00.000Z', + } +} + +test('matching persisted ollama env is reused for ollama launch', async () => { + const env = await buildLaunchEnv({ + profile: 'ollama', + persisted: profile('ollama', { + OPENAI_BASE_URL: 'http://127.0.0.1:11435/v1', + OPENAI_MODEL: 'mistral:7b-instruct', + }), + goal: 'balanced', + processEnv: {}, + getOllamaChatBaseUrl: () => 'http://localhost:11434/v1', + resolveOllamaDefaultModel: async () => 'llama3.1:8b', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://127.0.0.1:11435/v1') + assert.equal(env.OPENAI_MODEL, 'mistral:7b-instruct') +}) + +test('ollama launch ignores mismatched persisted openai env and shell model fallback', async () => { + const env = await buildLaunchEnv({ + profile: 'ollama', + persisted: profile('openai', { + OPENAI_BASE_URL: 'https://api.openai.com/v1', + OPENAI_MODEL: 'gpt-4o', + OPENAI_API_KEY: 'sk-persisted', + }), + goal: 'coding', + processEnv: { + OPENAI_BASE_URL: 'https://api.deepseek.com/v1', + OPENAI_MODEL: 'gpt-4o-mini', + }, + getOllamaChatBaseUrl: () => 'http://localhost:11434/v1', + resolveOllamaDefaultModel: async () => 'qwen2.5-coder:7b', + }) + + assert.equal(env.OPENAI_BASE_URL, 'http://localhost:11434/v1') + assert.equal(env.OPENAI_MODEL, 'qwen2.5-coder:7b') +}) + +test('openai launch ignores mismatched persisted ollama env', async () => { + const env = await buildLaunchEnv({ + profile: 'openai', + persisted: profile('ollama', { + OPENAI_BASE_URL: 'http://localhost:11434/v1', + OPENAI_MODEL: 'llama3.1:8b', + }), + goal: 'latency', + processEnv: { + OPENAI_API_KEY: 'sk-live', + }, + getOllamaChatBaseUrl: () => 'http://localhost:11434/v1', + resolveOllamaDefaultModel: async () => 'llama3.1:8b', + }) + + assert.equal(env.OPENAI_BASE_URL, 'https://api.openai.com/v1') + assert.equal(env.OPENAI_MODEL, 'gpt-4o-mini') + assert.equal(env.OPENAI_API_KEY, 'sk-live') +}) + +test('ollama profiles never persist openai api keys', () => { + const env = buildOllamaProfileEnv('llama3.1:8b', { + getOllamaChatBaseUrl: () => 'http://localhost:11434/v1', + }) + + assert.deepEqual(env, { + OPENAI_BASE_URL: 'http://localhost:11434/v1', + OPENAI_MODEL: 'llama3.1:8b', + }) + assert.equal('OPENAI_API_KEY' in env, false) +}) + +test('auto profile falls back to openai when no viable ollama model exists', () => { + assert.equal(selectAutoProfile(null), 'openai') + assert.equal(selectAutoProfile('qwen2.5-coder:7b'), 'ollama') +}) diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts new file mode 100644 index 00000000..2cd7188b --- /dev/null +++ b/src/utils/providerProfile.ts @@ -0,0 +1,123 @@ +import { + getGoalDefaultOpenAIModel, + type RecommendationGoal, +} from './providerRecommendation.ts' + +export type ProviderProfile = 'openai' | 'ollama' + +export type ProfileEnv = { + OPENAI_BASE_URL?: string + OPENAI_MODEL?: string + OPENAI_API_KEY?: string +} + +export type ProfileFile = { + profile: ProviderProfile + env: ProfileEnv + createdAt: string +} + +export function sanitizeApiKey( + key: string | null | undefined, +): string | undefined { + if (!key || key === 'SUA_CHAVE') return undefined + return key +} + +export function buildOllamaProfileEnv( + model: string, + options: { + baseUrl?: string | null + getOllamaChatBaseUrl: (baseUrl?: string) => string + }, +): ProfileEnv { + return { + OPENAI_BASE_URL: options.getOllamaChatBaseUrl(options.baseUrl ?? undefined), + OPENAI_MODEL: model, + } +} + +export function buildOpenAIProfileEnv(options: { + goal: RecommendationGoal + model?: string | null + apiKey?: string | null + processEnv?: NodeJS.ProcessEnv +}): ProfileEnv | null { + const processEnv = options.processEnv ?? process.env + const key = sanitizeApiKey(options.apiKey ?? processEnv.OPENAI_API_KEY) + if (!key) { + return null + } + + return { + OPENAI_BASE_URL: processEnv.OPENAI_BASE_URL || 'https://api.openai.com/v1', + OPENAI_MODEL: options.model || getGoalDefaultOpenAIModel(options.goal), + OPENAI_API_KEY: key, + } +} + +export function createProfileFile( + profile: ProviderProfile, + env: ProfileEnv, +): ProfileFile { + return { + profile, + env, + createdAt: new Date().toISOString(), + } +} + +export function selectAutoProfile( + recommendedOllamaModel: string | null, +): ProviderProfile { + return recommendedOllamaModel ? 'ollama' : 'openai' +} + +export async function buildLaunchEnv(options: { + profile: ProviderProfile + persisted: ProfileFile | null + goal: RecommendationGoal + processEnv?: NodeJS.ProcessEnv + getOllamaChatBaseUrl?: (baseUrl?: string) => string + resolveOllamaDefaultModel?: (goal: RecommendationGoal) => Promise +}): Promise { + const processEnv = options.processEnv ?? process.env + const persistedEnv = + options.persisted?.profile === options.profile + ? options.persisted.env ?? {} + : {} + + const env: NodeJS.ProcessEnv = { + ...processEnv, + CLAUDE_CODE_USE_OPENAI: '1', + } + + if (options.profile === 'ollama') { + const getOllamaBaseUrl = + options.getOllamaChatBaseUrl ?? (() => 'http://localhost:11434/v1') + const resolveOllamaModel = + options.resolveOllamaDefaultModel ?? (async () => 'llama3.1:8b') + + env.OPENAI_BASE_URL = persistedEnv.OPENAI_BASE_URL || getOllamaBaseUrl() + env.OPENAI_MODEL = + persistedEnv.OPENAI_MODEL || + (await resolveOllamaModel(options.goal)) + + if (!processEnv.OPENAI_API_KEY || processEnv.OPENAI_API_KEY === 'SUA_CHAVE') { + delete env.OPENAI_API_KEY + } + + return env + } + + env.OPENAI_BASE_URL = + processEnv.OPENAI_BASE_URL || + persistedEnv.OPENAI_BASE_URL || + 'https://api.openai.com/v1' + env.OPENAI_MODEL = + processEnv.OPENAI_MODEL || + persistedEnv.OPENAI_MODEL || + getGoalDefaultOpenAIModel(options.goal) + env.OPENAI_API_KEY = processEnv.OPENAI_API_KEY || persistedEnv.OPENAI_API_KEY + return env +} diff --git a/src/utils/providerRecommendation.test.ts b/src/utils/providerRecommendation.test.ts index 986e403f..f85777bf 100644 --- a/src/utils/providerRecommendation.test.ts +++ b/src/utils/providerRecommendation.test.ts @@ -83,6 +83,19 @@ test('non-chat embedding models are heavily demoted', () => { assert.equal(ranked[0]?.name, 'mistral:7b-instruct') }) +test('auto-pick ignores non-chat ollama models', () => { + const recommended = recommendOllamaModel( + [ + model('nomic-embed-text', { parameterSize: '0.5B' }), + model('bge-reranker-v2', { parameterSize: '1.5B' }), + model('whisper-large-v3', { parameterSize: '1.6B' }), + ], + 'balanced', + ) + + assert.equal(recommended, null) +}) + test('benchmark latency can reorder close recommendations', () => { const ranked = rankOllamaModels( [ @@ -111,6 +124,69 @@ test('benchmark latency can reorder close recommendations', () => { assert.equal(benchmarked[0]?.benchmarkMs, 350) }) +test('unbenchmarked models stay behind benchmarked candidates', () => { + const ranked = rankOllamaModels( + [ + model('phi4-mini:4b', { + parameterSize: '4B', + quantizationLevel: 'Q4_K_M', + }), + model('mistral:7b-instruct', { + parameterSize: '7B', + quantizationLevel: 'Q4_K_M', + }), + model('llama3.1:8b', { + parameterSize: '8B', + quantizationLevel: 'Q4_K_M', + }), + model('qwen2.5:14b', { + parameterSize: '14B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'latency', + ) + + const benchmarked = applyBenchmarkLatency( + ranked, + { + 'phi4-mini:4b': 2400, + 'mistral:7b-instruct': 2200, + 'llama3.1:8b': 2100, + }, + 'latency', + ) + + assert.ok(benchmarked.slice(0, 3).every(item => item.benchmarkMs !== null)) + assert.equal(benchmarked[3]?.name, 'qwen2.5:14b') + assert.equal(benchmarked[3]?.benchmarkMs, null) +}) + +test('coding goal recognizes codestral and devstral families', () => { + const ranked = rankOllamaModels( + [ + model('mistral:7b-instruct', { + parameterSize: '7B', + quantizationLevel: 'Q4_K_M', + }), + model('codestral:22b', { + parameterSize: '22B', + quantizationLevel: 'Q4_K_M', + }), + model('devstral:24b', { + parameterSize: '24B', + quantizationLevel: 'Q4_K_M', + }), + ], + 'coding', + ) + + assert.deepEqual(ranked.slice(0, 2).map(item => item.name), [ + 'devstral:24b', + 'codestral:22b', + ]) +}) + test('goal defaults choose sensible openai models', () => { assert.equal(getGoalDefaultOpenAIModel('latency'), 'gpt-4o-mini') assert.equal(getGoalDefaultOpenAIModel('balanced'), 'gpt-4o') diff --git a/src/utils/providerRecommendation.ts b/src/utils/providerRecommendation.ts index e49c37aa..8bd1e2cd 100644 --- a/src/utils/providerRecommendation.ts +++ b/src/utils/providerRecommendation.ts @@ -23,6 +23,8 @@ const CODING_HINTS = [ 'coder', 'codellama', 'codegemma', + 'codestral', + 'devstral', 'starcoder', 'deepseek-coder', 'qwen2.5-coder', @@ -57,6 +59,16 @@ function includesAny(text: string, needles: string[]): boolean { return needles.some(needle => text.includes(needle)) } +export function isViableOllamaChatModel(model: OllamaModelDescriptor): boolean { + return !includesAny(modelHaystack(model), NON_CHAT_HINTS) +} + +export function selectRecommendedOllamaModel< + T extends OllamaModelDescriptor, +>(models: T[]): T | null { + return models.find(isViableOllamaChatModel) ?? null +} + function inferParameterBillions(model: OllamaModelDescriptor): number | null { const text = `${model.parameterSize ?? ''} ${model.name}`.toLowerCase() const match = text.match(/(\d+(?:\.\d+)?)\s*b\b/) @@ -265,7 +277,7 @@ export function recommendOllamaModel( models: OllamaModelDescriptor[], goal: RecommendationGoal, ): RankedOllamaModel | null { - return rankOllamaModels(models, goal)[0] ?? null + return selectRecommendedOllamaModel(rankOllamaModels(models, goal)) } export function applyBenchmarkLatency( @@ -276,7 +288,7 @@ export function applyBenchmarkLatency( const divisor = goal === 'latency' ? 120 : goal === 'coding' ? 500 : 240 - return models + const scoredModels = models .map(model => { const latency = benchmarkMs[model.name] ?? null const benchmarkPenalty = latency === null ? 0 : latency / divisor @@ -293,5 +305,13 @@ export function applyBenchmarkLatency( score: Number((model.score - benchmarkPenalty).toFixed(2)), } }) - .sort((a, b) => compareRankedModels(a, b, goal)) + + const benchmarkedModels = scoredModels.filter(model => model.benchmarkMs !== null) + if (benchmarkedModels.length === 0) { + return scoredModels.sort((a, b) => compareRankedModels(a, b, goal)) + } + + const unbenchmarkedModels = scoredModels.filter(model => model.benchmarkMs === null) + benchmarkedModels.sort((a, b) => compareRankedModels(a, b, goal)) + return [...benchmarkedModels, ...unbenchmarkedModels] }