feat: fix open-source build and add Ollama model picker (#302)
* feat: fix open-source build and add Ollama model picker - Fix build failures by stubbing 62+ missing Anthropic-internal modules with a catch-all plugin in scripts/build.ts - Add runtime shim exports (isReplBridgeActive, getReplBridgeHandle) in bootstrap/state.ts for feature-gated code references - Add /model picker support for Ollama: fetches available models from Ollama server at startup and displays them in the model selection menu - Add Ollama model validation against cached server model list Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: address PR review feedback for Ollama integration - Move Ollama validation before enterprise allowlist check in validateModel - Truncate model list in error messages to first 5 entries - Fix isOllamaProvider() to detect OLLAMA_BASE_URL-only configurations - Reuse getOllamaApiBaseUrl() from providerDiscovery instead of duplicating - Reset fetchPromise on failure to allow retry in prefetchOllamaModels - Include Default option in Ollama model picker, prevent Claude model fallthrough - Add file existence check for src/tasks/ stubs in build script Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: use pre-scanned exact-match resolvers to avoid Bun bundler corruption Bun's onResolve plugin corrupts the module graph even when returning null for non-matching imports. This caused lodash-es memoize and zod's util namespace to be incorrectly tree-shaken, producing runtime ReferenceErrors. Replace all pattern-based onResolve hooks with a pre-build scan that identifies missing modules upfront, then registers exact-match resolvers only for confirmed missing imports. This avoids touching any valid module resolution paths. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: move Ollama model prefetch outside startup throttle gate prefetchOllamaModels() was inside the skipStartupPrefetches condition, so it would be skipped on subsequent launches due to the bgRefresh throttle timestamp. Ollama model fetch targets a local/remote server and is fast & cheap, so it should always run at startup. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -250,6 +250,103 @@ export const SeverityNumber = {};
|
||||
loader: 'js',
|
||||
}),
|
||||
)
|
||||
|
||||
// Pre-scan: find all missing modules that need stubbing
|
||||
// (Bun's onResolve corrupts module graph even when returning null,
|
||||
// so we use exact-match resolvers instead of catch-all patterns)
|
||||
const fs = require('fs')
|
||||
const pathMod = require('path')
|
||||
const srcDir = pathMod.resolve(__dirname, '..', 'src')
|
||||
const missingModules = new Set<string>()
|
||||
const missingModuleExports = new Map<string, Set<string>>()
|
||||
|
||||
// Known missing external packages
|
||||
for (const pkg of [
|
||||
'@ant/computer-use-mcp',
|
||||
'@ant/computer-use-mcp/sentinelApps',
|
||||
'@ant/computer-use-mcp/types',
|
||||
'@ant/computer-use-swift',
|
||||
'@ant/computer-use-input',
|
||||
]) {
|
||||
missingModules.add(pkg)
|
||||
}
|
||||
|
||||
// Scan source to find imports that can't resolve
|
||||
function scanForMissingImports() {
|
||||
function walk(dir: string) {
|
||||
for (const ent of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = pathMod.join(dir, ent.name)
|
||||
if (ent.isDirectory()) { walk(full); continue }
|
||||
if (!/\.(ts|tsx)$/.test(ent.name)) continue
|
||||
const code: string = fs.readFileSync(full, 'utf-8')
|
||||
// Collect all imports
|
||||
for (const m of code.matchAll(/import\s+(?:\{([^}]*)\}|(\w+))?\s*(?:,\s*\{([^}]*)\})?\s*from\s+['"](.*?)['"]/g)) {
|
||||
const specifier = m[4]
|
||||
const namedPart = m[1] || m[3] || ''
|
||||
const names = namedPart.split(',')
|
||||
.map((s: string) => s.trim().replace(/^type\s+/, ''))
|
||||
.filter((s: string) => s && !s.startsWith('type '))
|
||||
|
||||
// Check src/tasks/ non-relative imports
|
||||
if (specifier.startsWith('src/tasks/')) {
|
||||
const resolved = pathMod.resolve(__dirname, '..', specifier)
|
||||
const candidates = [
|
||||
resolved,
|
||||
`${resolved}.ts`, `${resolved}.tsx`,
|
||||
resolved.replace(/\.js$/, '.ts'), resolved.replace(/\.js$/, '.tsx'),
|
||||
pathMod.join(resolved, 'index.ts'), pathMod.join(resolved, 'index.tsx'),
|
||||
]
|
||||
if (!candidates.some((c: string) => fs.existsSync(c))) {
|
||||
missingModules.add(specifier)
|
||||
}
|
||||
}
|
||||
// Check relative .js imports
|
||||
else if (specifier.endsWith('.js') && (specifier.startsWith('./') || specifier.startsWith('../'))) {
|
||||
const dir2 = pathMod.dirname(full)
|
||||
const resolved = pathMod.resolve(dir2, specifier)
|
||||
const tsVariant = resolved.replace(/\.js$/, '.ts')
|
||||
const tsxVariant = resolved.replace(/\.js$/, '.tsx')
|
||||
if (!fs.existsSync(resolved) && !fs.existsSync(tsVariant) && !fs.existsSync(tsxVariant)) {
|
||||
missingModules.add(specifier)
|
||||
}
|
||||
}
|
||||
|
||||
// Track named exports for missing modules
|
||||
if (names.length > 0) {
|
||||
if (!missingModuleExports.has(specifier)) missingModuleExports.set(specifier, new Set())
|
||||
for (const n of names) missingModuleExports.get(specifier)!.add(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
walk(srcDir)
|
||||
}
|
||||
scanForMissingImports()
|
||||
|
||||
// Register exact-match resolvers for each missing module
|
||||
for (const mod of missingModules) {
|
||||
const escaped = mod.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
||||
build.onResolve({ filter: new RegExp(`^${escaped}$`) }, () => ({
|
||||
path: mod,
|
||||
namespace: 'missing-module-stub',
|
||||
}))
|
||||
}
|
||||
|
||||
build.onLoad(
|
||||
{ filter: /.*/, namespace: 'missing-module-stub' },
|
||||
(args) => {
|
||||
const names = missingModuleExports.get(args.path) ?? new Set()
|
||||
const exports = [...names].map(n => `export const ${n} = noop;`).join('\n')
|
||||
return {
|
||||
contents: `
|
||||
const noop = () => null;
|
||||
export default noop;
|
||||
${exports}
|
||||
`,
|
||||
loader: 'js',
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
@@ -1756,3 +1756,12 @@ export function setPromptId(id: string | null): void {
|
||||
STATE.promptId = id
|
||||
}
|
||||
|
||||
// Stub for feature-gated REPL bridge (not available in open build)
|
||||
export function isReplBridgeActive(): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
export function getReplBridgeHandle(): null {
|
||||
return null
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ import type { Root } from './ink.js';
|
||||
import { launchRepl } from './replLauncher.js';
|
||||
import { hasGrowthBookEnvOverride, initializeGrowthBook, refreshGrowthBookAfterAuthChange } from './services/analytics/growthbook.js';
|
||||
import { fetchBootstrapData } from './services/api/bootstrap.js';
|
||||
import { prefetchOllamaModels } from './utils/model/ollamaModels.js';
|
||||
import { type DownloadResult, downloadSessionFiles, type FilesApiConfig, parseFileSpecs } from './services/api/filesApi.js';
|
||||
import { prefetchPassesEligibility } from './services/api/referral.js';
|
||||
import { prefetchOfficialMcpUrls } from './services/mcp/officialRegistry.js';
|
||||
@@ -2333,6 +2334,9 @@ async function run(): Promise<CommanderCommand> {
|
||||
const bgRefreshThrottleMs = getFeatureValue_CACHED_MAY_BE_STALE('tengu_cicada_nap_ms', 0);
|
||||
const lastPrefetched = getGlobalConfig().startupPrefetchedAt ?? 0;
|
||||
const skipStartupPrefetches = isBareMode() || bgRefreshThrottleMs > 0 && Date.now() - lastPrefetched < bgRefreshThrottleMs;
|
||||
// Always prefetch Ollama models (not gated by throttle — local server, fast & cheap)
|
||||
prefetchOllamaModels();
|
||||
|
||||
if (!skipStartupPrefetches) {
|
||||
const lastPrefetchedInfo = lastPrefetched > 0 ? ` last ran ${Math.round((Date.now() - lastPrefetched) / 1000)}s ago` : '';
|
||||
logForDebugging(`Starting background startup prefetches${lastPrefetchedInfo}`);
|
||||
|
||||
@@ -32,6 +32,7 @@ import {
|
||||
} from './model.js'
|
||||
import { has1mContext } from '../context.js'
|
||||
import { getGlobalConfig } from '../config.js'
|
||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||
|
||||
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
|
||||
|
||||
@@ -330,6 +331,28 @@ function getCodexModelOptions(): ModelOption[] {
|
||||
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
|
||||
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
|
||||
function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
// When using Ollama, show models from the Ollama server instead of Claude models
|
||||
if (getAPIProvider() === 'openai' && isOllamaProvider()) {
|
||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
||||
const ollamaModels = getCachedOllamaModelOptions()
|
||||
if (ollamaModels.length > 0) {
|
||||
return [defaultOption, ...ollamaModels]
|
||||
}
|
||||
// Fallback: if models not yet fetched, show current model instead of Claude models
|
||||
const currentModel = getUserSpecifiedModelSetting() ?? getInitialMainLoopModel()
|
||||
if (currentModel != null) {
|
||||
return [
|
||||
defaultOption,
|
||||
{
|
||||
value: currentModel,
|
||||
label: currentModel,
|
||||
description: 'Currently configured Ollama model',
|
||||
},
|
||||
]
|
||||
}
|
||||
return [defaultOption]
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
// Build options from antModels config
|
||||
const antModelOptions: ModelOption[] = getAntModels().map(m => ({
|
||||
|
||||
104
src/utils/model/ollamaModels.ts
Normal file
104
src/utils/model/ollamaModels.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* Ollama model discovery for the /model picker.
|
||||
* Fetches available models from the Ollama API and caches them
|
||||
* so the synchronous getModelOptions() can use them.
|
||||
*/
|
||||
|
||||
import type { ModelOption } from './modelOptions.js'
|
||||
import { getOllamaApiBaseUrl } from '../providerDiscovery.js'
|
||||
|
||||
let cachedOllamaOptions: ModelOption[] | null = null
|
||||
let fetchPromise: Promise<ModelOption[]> | null = null
|
||||
|
||||
/**
|
||||
* Returns true when the current OPENAI_BASE_URL points at an Ollama instance.
|
||||
* Detects OLLAMA_BASE_URL presence, /v1 suffixed URLs, and the raw base URL.
|
||||
*/
|
||||
export function isOllamaProvider(): boolean {
|
||||
// Explicit OLLAMA_BASE_URL is always sufficient
|
||||
if (process.env.OLLAMA_BASE_URL) return true
|
||||
if (!process.env.OPENAI_BASE_URL) return false
|
||||
const baseUrl = process.env.OPENAI_BASE_URL
|
||||
// Match common Ollama port
|
||||
try {
|
||||
const parsed = new URL(baseUrl)
|
||||
if (parsed.port === '11434') return true
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch models from the Ollama /api/tags endpoint.
|
||||
*/
|
||||
export async function fetchOllamaModels(): Promise<ModelOption[]> {
|
||||
const apiUrl = getOllamaApiBaseUrl()
|
||||
if (!apiUrl) return []
|
||||
|
||||
const controller = new AbortController()
|
||||
const timeout = setTimeout(() => controller.abort(), 5000)
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiUrl}/api/tags`, {
|
||||
method: 'GET',
|
||||
signal: controller.signal,
|
||||
})
|
||||
if (!response.ok) return []
|
||||
|
||||
const data = (await response.json()) as {
|
||||
models?: Array<{
|
||||
name?: string
|
||||
size?: number
|
||||
details?: {
|
||||
parameter_size?: string
|
||||
quantization_level?: string
|
||||
family?: string
|
||||
}
|
||||
}>
|
||||
}
|
||||
|
||||
return (data.models ?? [])
|
||||
.filter(m => Boolean(m.name))
|
||||
.map(m => {
|
||||
const paramSize = m.details?.parameter_size ?? ''
|
||||
const quant = m.details?.quantization_level ?? ''
|
||||
const sizeGB = m.size ? `${(m.size / 1e9).toFixed(1)}GB` : ''
|
||||
const parts = [paramSize, quant, sizeGB].filter(Boolean).join(' · ')
|
||||
return {
|
||||
value: m.name!,
|
||||
label: m.name!,
|
||||
description: parts ? `Ollama · ${parts}` : 'Ollama model',
|
||||
}
|
||||
})
|
||||
} catch {
|
||||
return []
|
||||
} finally {
|
||||
clearTimeout(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prefetch and cache Ollama models. Call during startup.
|
||||
*/
|
||||
export function prefetchOllamaModels(): void {
|
||||
if (!isOllamaProvider()) return
|
||||
if (cachedOllamaOptions && cachedOllamaOptions.length > 0) return
|
||||
if (fetchPromise) return
|
||||
fetchPromise = fetchOllamaModels()
|
||||
.then(options => {
|
||||
cachedOllamaOptions = options
|
||||
return options
|
||||
})
|
||||
.finally(() => {
|
||||
fetchPromise = null
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached Ollama model options (synchronous).
|
||||
* Returns empty array if not yet fetched.
|
||||
*/
|
||||
export function getCachedOllamaModelOptions(): ModelOption[] {
|
||||
return cachedOllamaOptions ?? []
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
AuthenticationError,
|
||||
} from '@anthropic-ai/sdk'
|
||||
import { getModelStrings } from './modelStrings.js'
|
||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||
|
||||
// Cache valid models to avoid repeated API calls
|
||||
const validModelCache = new Map<string, boolean>()
|
||||
@@ -27,6 +28,25 @@ export async function validateModel(
|
||||
return { valid: false, error: 'Model name cannot be empty' }
|
||||
}
|
||||
|
||||
// For Ollama provider, validate against cached model list instead of API call
|
||||
// (skip enterprise allowlist since Ollama models are user-managed)
|
||||
if (getAPIProvider() === 'openai' && isOllamaProvider()) {
|
||||
const ollamaModels = getCachedOllamaModelOptions()
|
||||
const found = ollamaModels.some(m => m.value === normalizedModel)
|
||||
if (found) {
|
||||
validModelCache.set(normalizedModel, true)
|
||||
return { valid: true }
|
||||
}
|
||||
if (ollamaModels.length > 0) {
|
||||
const MAX_SHOWN = 5
|
||||
const names = ollamaModels.map(m => m.value)
|
||||
const shown = names.slice(0, MAX_SHOWN).join(', ')
|
||||
const suffix = names.length > MAX_SHOWN ? ` and ${names.length - MAX_SHOWN} more` : ''
|
||||
return { valid: false, error: `Model '${normalizedModel}' not found on Ollama server. Available: ${shown}${suffix}` }
|
||||
}
|
||||
// If cache is empty, fall through to API validation
|
||||
}
|
||||
|
||||
// Check against availableModels allowlist before any API call
|
||||
if (!isModelAllowed(normalizedModel)) {
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user