Compare commits
8 Commits
fix/386-wi
...
v0.1.8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94de37d44f | ||
|
|
3b3aca716d | ||
|
|
d5852ca73d | ||
|
|
c534aa5771 | ||
|
|
60d3d8961a | ||
|
|
3b9893b586 | ||
|
|
daf2c90b6d | ||
|
|
4ac7367733 |
9
bun.lock
9
bun.lock
@@ -51,7 +51,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -88,6 +88,9 @@
|
||||
},
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1",
|
||||
},
|
||||
"packages": {
|
||||
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
|
||||
|
||||
@@ -657,7 +660,7 @@
|
||||
|
||||
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
|
||||
|
||||
"lodash-es": ["lodash-es@4.18.0", "", {}, "sha512-koAgswPPA+UTaPN64Etp+PGP+WT6oqOS2NMi5yDkMaiGw9qY4VxQbQF0mtKMyr4BlTznWyzePV5UpECTJQmSUA=="],
|
||||
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
|
||||
|
||||
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
|
||||
|
||||
@@ -891,8 +894,6 @@
|
||||
|
||||
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime/lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
|
||||
|
||||
"@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
|
||||
|
||||
"@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@gitlawb/openclaude",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -95,7 +95,7 @@
|
||||
"ignore": "7.0.5",
|
||||
"indent-string": "5.0.0",
|
||||
"jsonc-parser": "3.3.1",
|
||||
"lodash-es": "4.18.0",
|
||||
"lodash-es": "4.18.1",
|
||||
"lru-cache": "11.2.7",
|
||||
"marked": "15.0.12",
|
||||
"p-map": "7.0.4",
|
||||
@@ -150,5 +150,8 @@
|
||||
"license": "SEE LICENSE FILE",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"overrides": {
|
||||
"lodash-es": "4.18.1"
|
||||
}
|
||||
}
|
||||
|
||||
42
src/commands/model/model.test.tsx
Normal file
42
src/commands/model/model.test.tsx
Normal file
@@ -0,0 +1,42 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('opens the model picker without awaiting local model discovery refresh', async () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
|
||||
|
||||
let resolveDiscovery: (() => void) | undefined
|
||||
const discoverOpenAICompatibleModelOptions = mock(
|
||||
() =>
|
||||
new Promise<void>(resolve => {
|
||||
resolveDiscovery = resolve
|
||||
}),
|
||||
)
|
||||
|
||||
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
|
||||
discoverOpenAICompatibleModelOptions,
|
||||
}))
|
||||
|
||||
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
|
||||
const result = await Promise.race([
|
||||
call(() => {}, {} as never, ''),
|
||||
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
|
||||
])
|
||||
|
||||
resolveDiscovery?.()
|
||||
|
||||
expect(result).not.toBe('timeout')
|
||||
})
|
||||
@@ -4,6 +4,7 @@ import * as React from 'react';
|
||||
import type { CommandResultDisplay } from '../../commands.js';
|
||||
import { ModelPicker } from '../../components/ModelPicker.js';
|
||||
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
|
||||
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
|
||||
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
|
||||
import { useAppState, useSetAppState } from '../../state/AppState.js';
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js';
|
||||
@@ -19,6 +20,7 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
|
||||
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
|
||||
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
|
||||
import { validateModel } from '../../utils/model/validateModel.js';
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
|
||||
function ModelPickerWrapper(t0) {
|
||||
const $ = _c(17);
|
||||
const {
|
||||
@@ -319,7 +321,9 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
|
||||
});
|
||||
return <SetModelAndClose args={args} onDone={onDone} />;
|
||||
}
|
||||
await refreshOpenAIModelOptionsCache();
|
||||
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
void refreshOpenAIModelOptionsCache();
|
||||
}
|
||||
return <ModelPickerWrapper onDone={onDone} />;
|
||||
};
|
||||
function renderModelLabel(model: string | null): string {
|
||||
|
||||
@@ -197,6 +197,21 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
|
||||
expect(message).not.toContain('sk-secret-12345678')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'openai',
|
||||
{
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
|
||||
)
|
||||
|
||||
expect(message).toContain('Saved Local OpenAI-compatible profile.')
|
||||
expect(message).toContain('Model: gpt-5.4')
|
||||
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
|
||||
const message = buildProfileSaveMessage(
|
||||
'gemini',
|
||||
@@ -230,6 +245,36 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
|
||||
expect(summary.endpointLabel).toBe('sk-...5678')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-5.4',
|
||||
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
|
||||
expect(summary.modelLabel).toBe('gpt-5.4')
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
|
||||
const defaults = getProviderWizardDefaults({
|
||||
OPENAI_API_KEY: 'sk-secret-12345678',
|
||||
|
||||
@@ -15,6 +15,7 @@ import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
DEFAULT_CODEX_BASE_URL,
|
||||
DEFAULT_OPENAI_BASE_URL,
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
} from '../../services/api/providerConfig.js'
|
||||
@@ -52,7 +53,11 @@ import {
|
||||
recommendOllamaModel,
|
||||
type RecommendationGoal,
|
||||
} from '../../utils/providerRecommendation.js'
|
||||
import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
hasLocalOllama,
|
||||
listOllamaModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
|
||||
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
|
||||
|
||||
@@ -182,10 +187,8 @@ export function buildCurrentProviderSummary(options?: {
|
||||
let providerLabel = 'OpenAI-compatible'
|
||||
if (request.transport === 'codex_responses') {
|
||||
providerLabel = 'Codex'
|
||||
} else if (request.baseUrl.includes('localhost:11434')) {
|
||||
providerLabel = 'Ollama'
|
||||
} else if (request.baseUrl.includes('localhost:1234')) {
|
||||
providerLabel = 'LM Studio'
|
||||
} else if (isLocalProviderUrl(request.baseUrl)) {
|
||||
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -272,16 +275,20 @@ function buildSavedProfileSummary(
|
||||
),
|
||||
}
|
||||
case 'openai':
|
||||
default:
|
||||
default: {
|
||||
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
|
||||
|
||||
return {
|
||||
providerLabel: 'OpenAI-compatible',
|
||||
providerLabel: isLocalProviderUrl(baseUrl)
|
||||
? getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
: 'OpenAI-compatible',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
env.OPENAI_MODEL ?? 'gpt-4o',
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
|
||||
baseUrl,
|
||||
process.env,
|
||||
env,
|
||||
),
|
||||
@@ -292,6 +299,7 @@ function buildSavedProfileSummary(
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function buildProfileSaveMessage(
|
||||
profile: ProviderProfile,
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
|
||||
*/
|
||||
|
||||
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
|
||||
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
|
||||
|
||||
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
|
||||
|
||||
const ESC = '\x1b['
|
||||
@@ -99,7 +102,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
if (useOpenAI) {
|
||||
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
|
||||
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
|
||||
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
|
||||
const isLocal = isLocalProviderUrl(baseUrl)
|
||||
let name = 'OpenAI'
|
||||
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
|
||||
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
|
||||
@@ -107,10 +110,8 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
else if (/groq/i.test(baseUrl)) name = 'Groq'
|
||||
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
|
||||
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
|
||||
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
|
||||
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
|
||||
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
|
||||
else if (isLocal) name = 'Local'
|
||||
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
// Resolve model alias to actual model name + reasoning effort
|
||||
let displayModel = rawModel
|
||||
|
||||
@@ -14,7 +14,16 @@ import { lazySchema } from '../../utils/lazySchema.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import type { ModelOption } from '../../utils/model/modelOptions.js'
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from '../../utils/providerDiscovery.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const bootstrapResponseSchema = lazySchema(() =>
|
||||
z.object({
|
||||
@@ -39,6 +48,12 @@ const bootstrapResponseSchema = lazySchema(() =>
|
||||
|
||||
type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>>
|
||||
|
||||
type BootstrapCachePayload = {
|
||||
clientData: Record<string, unknown> | null
|
||||
additionalModelOptions: ModelOption[]
|
||||
additionalModelOptionsScope: string
|
||||
}
|
||||
|
||||
async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
if (isEssentialTrafficOnly()) {
|
||||
logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled')
|
||||
@@ -108,22 +123,70 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchLocalOpenAIModelOptions(): Promise<BootstrapCachePayload | null> {
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
if (!scope?.startsWith('openai:')) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { baseUrl } = resolveProviderRequest()
|
||||
const models = await listOpenAICompatibleModels({
|
||||
baseUrl,
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
})
|
||||
|
||||
if (models === null) {
|
||||
logForDebugging('[Bootstrap] Local OpenAI model discovery failed')
|
||||
return null
|
||||
}
|
||||
|
||||
const providerLabel = getLocalOpenAICompatibleProviderLabel(baseUrl)
|
||||
|
||||
return {
|
||||
clientData: getGlobalConfig().clientDataCache ?? null,
|
||||
additionalModelOptionsScope: scope,
|
||||
additionalModelOptions: models.map(model => ({
|
||||
value: model,
|
||||
label: model,
|
||||
description: `Detected from ${providerLabel}`,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch bootstrap data from the API and persist to disk cache.
|
||||
*/
|
||||
export async function fetchBootstrapData(): Promise<void> {
|
||||
try {
|
||||
const scope = getAdditionalModelOptionsCacheScope()
|
||||
let payload: BootstrapCachePayload | null = null
|
||||
|
||||
if (scope === 'firstParty') {
|
||||
const response = await fetchBootstrapAPI()
|
||||
if (!response) return
|
||||
|
||||
const clientData = response.client_data ?? null
|
||||
const additionalModelOptions = response.additional_model_options ?? []
|
||||
payload = {
|
||||
clientData: response.client_data ?? null,
|
||||
additionalModelOptions: response.additional_model_options ?? [],
|
||||
additionalModelOptionsScope: scope,
|
||||
}
|
||||
} else if (scope?.startsWith('openai:')) {
|
||||
payload = await fetchLocalOpenAIModelOptions()
|
||||
if (!payload) return
|
||||
} else {
|
||||
logForDebugging('[Bootstrap] Skipped: no additional model source')
|
||||
return
|
||||
}
|
||||
|
||||
const { clientData, additionalModelOptions, additionalModelOptionsScope } =
|
||||
payload
|
||||
|
||||
// Only persist if data actually changed — avoids a config write on every startup.
|
||||
const config = getGlobalConfig()
|
||||
if (
|
||||
isEqual(config.clientDataCache, clientData) &&
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions)
|
||||
isEqual(config.additionalModelOptionsCache, additionalModelOptions) &&
|
||||
config.additionalModelOptionsCacheScope === additionalModelOptionsScope
|
||||
) {
|
||||
logForDebugging('[Bootstrap] Cache unchanged, skipping write')
|
||||
return
|
||||
@@ -134,6 +197,7 @@ export async function fetchBootstrapData(): Promise<void> {
|
||||
...current,
|
||||
clientDataCache: clientData,
|
||||
additionalModelOptionsCache: additionalModelOptions,
|
||||
additionalModelOptionsCacheScope: additionalModelOptionsScope,
|
||||
}))
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
|
||||
@@ -14,12 +14,19 @@ import {
|
||||
} from './providerConfig.js'
|
||||
|
||||
const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
}
|
||||
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
})
|
||||
|
||||
function createTempAuthJson(payload: Record<string, unknown>): string {
|
||||
@@ -62,12 +69,26 @@ describe('Codex provider config', () => {
|
||||
})
|
||||
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
expect(resolved.reasoning).toEqual({ effort: 'high' })
|
||||
})
|
||||
|
||||
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
|
||||
const resolved = resolveProviderRequest({
|
||||
model: 'codexplan',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
|
||||
expect(resolved.transport).toBe('chat_completions')
|
||||
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
|
||||
expect(resolved.resolvedModel).toBe('gpt-5.4')
|
||||
})
|
||||
|
||||
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
|
||||
// On Windows, env vars can leak as the literal string "undefined" instead of
|
||||
// the JS value undefined when not properly unset (issue #336).
|
||||
|
||||
@@ -557,8 +557,12 @@ export function getAssistantMessageFromError(
|
||||
const stripped = error.message.replace(/^429\s+/, '')
|
||||
const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1]
|
||||
const detail = innerMessage || stripped
|
||||
const retryAfter = (error as APIError).headers?.get?.('retry-after')
|
||||
const retryHint = retryAfter && !isNaN(Number(retryAfter))
|
||||
? `Try again in ${retryAfter} seconds.`
|
||||
: 'Try again in a few seconds.'
|
||||
return createAssistantAPIErrorMessage({
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || `this may be a temporary capacity issue${getAPIProvider() === 'firstParty' ? ' — check status.anthropic.com' : ''}`}`,
|
||||
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || 'this may be a temporary capacity issue'} — ${retryHint}`,
|
||||
error: 'rate_limit',
|
||||
})
|
||||
}
|
||||
|
||||
@@ -573,3 +573,80 @@ test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async
|
||||
expect(properties?.priority?.enum).toEqual([0, 1, 2, 3])
|
||||
expect(properties?.priority).not.toHaveProperty('default')
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Issue #202 — consecutive role coalescing (Devstral, Mistral strict templates)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeNonStreamResponse(content = 'ok'): Response {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
id: 'chatcmpl-test',
|
||||
model: 'test-model',
|
||||
choices: [{ message: { role: 'assistant', content }, finish_reason: 'stop' }],
|
||||
usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 },
|
||||
}),
|
||||
{ headers: { 'Content-Type': 'application/json' } },
|
||||
)
|
||||
}
|
||||
|
||||
test('coalesces consecutive user messages to avoid alternation errors (issue #202)', async () => {
|
||||
let sentMessages: Array<{ role: string; content: unknown }> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
|
||||
sentMessages = JSON.parse(String(init?.body)).messages
|
||||
return makeNonStreamResponse()
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'test-model',
|
||||
system: 'sys',
|
||||
messages: [
|
||||
{ role: 'user', content: 'first message' },
|
||||
{ role: 'user', content: 'second message' },
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
expect(sentMessages?.length).toBe(2) // system + 1 merged user
|
||||
expect(sentMessages?.[0]?.role).toBe('system')
|
||||
expect(sentMessages?.[1]?.role).toBe('user')
|
||||
const userContent = sentMessages?.[1]?.content as string
|
||||
expect(userContent).toContain('first message')
|
||||
expect(userContent).toContain('second message')
|
||||
})
|
||||
|
||||
test('coalesces consecutive assistant messages preserving tool_calls (issue #202)', async () => {
|
||||
let sentMessages: Array<{ role: string; content: unknown; tool_calls?: unknown[] }> | undefined
|
||||
|
||||
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
|
||||
sentMessages = JSON.parse(String(init?.body)).messages
|
||||
return makeNonStreamResponse()
|
||||
}) as FetchType
|
||||
|
||||
const client = createOpenAIShimClient({}) as OpenAIShimClient
|
||||
|
||||
await client.beta.messages.create({
|
||||
model: 'test-model',
|
||||
system: 'sys',
|
||||
messages: [
|
||||
{ role: 'user', content: 'go' },
|
||||
{ role: 'assistant', content: 'thinking...' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'tool_use', id: 'call_1', name: 'Bash', input: { command: 'ls' } }],
|
||||
},
|
||||
{ role: 'user', content: [{ type: 'tool_result', tool_use_id: 'call_1', content: 'file.txt' }] },
|
||||
],
|
||||
max_tokens: 64,
|
||||
stream: false,
|
||||
})
|
||||
|
||||
// system + user + merged assistant + tool
|
||||
const assistantMsgs = sentMessages?.filter(m => m.role === 'assistant')
|
||||
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
|
||||
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
@@ -295,7 +295,41 @@ function convertMessages(
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
// Coalescing pass: merge consecutive messages of the same role.
|
||||
// OpenAI/vLLM/Ollama require strict user↔assistant alternation.
|
||||
// Multiple consecutive tool messages are allowed (assistant → tool* → user).
|
||||
// Consecutive user or assistant messages must be merged to avoid Jinja
|
||||
// template errors like "roles must alternate" (Devstral, Mistral models).
|
||||
const coalesced: OpenAIMessage[] = []
|
||||
for (const msg of result) {
|
||||
const prev = coalesced[coalesced.length - 1]
|
||||
|
||||
if (prev && prev.role === msg.role && msg.role !== 'tool' && msg.role !== 'system') {
|
||||
const prevContent = prev.content
|
||||
const curContent = msg.content
|
||||
|
||||
if (typeof prevContent === 'string' && typeof curContent === 'string') {
|
||||
prev.content = prevContent + (prevContent && curContent ? '\n' : '') + curContent
|
||||
} else {
|
||||
const toArray = (
|
||||
c: string | Array<{ type: string; text?: string; image_url?: { url: string } }> | undefined,
|
||||
): Array<{ type: string; text?: string; image_url?: { url: string } }> => {
|
||||
if (!c) return []
|
||||
if (typeof c === 'string') return c ? [{ type: 'text', text: c }] : []
|
||||
return c
|
||||
}
|
||||
prev.content = [...toArray(prevContent), ...toArray(curContent)]
|
||||
}
|
||||
|
||||
if (msg.tool_calls?.length) {
|
||||
prev.tool_calls = [...(prev.tool_calls ?? []), ...msg.tool_calls]
|
||||
}
|
||||
} else {
|
||||
coalesced.push(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return coalesced
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,6 +1,22 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { isLocalProviderUrl } from './providerConfig.js'
|
||||
import {
|
||||
getAdditionalModelOptionsCacheScope,
|
||||
isLocalProviderUrl,
|
||||
resolveProviderRequest,
|
||||
} from './providerConfig.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
test('treats localhost endpoints as local', () => {
|
||||
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
|
||||
@@ -33,3 +49,37 @@ test('treats public hosts as remote', () => {
|
||||
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
|
||||
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
|
||||
})
|
||||
|
||||
test('creates a cache scope for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
|
||||
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://localhost:1234/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
expect(resolveProviderRequest()).toMatchObject({
|
||||
transport: 'chat_completions',
|
||||
requestedModel: 'gpt-5.4',
|
||||
resolvedModel: 'gpt-5.4',
|
||||
baseUrl: 'http://127.0.0.1:8080/v1',
|
||||
})
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBe(
|
||||
'openai:http://127.0.0.1:8080/v1',
|
||||
)
|
||||
})
|
||||
|
||||
test('skips local model cache scope for remote openai-compatible providers', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
|
||||
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
|
||||
})
|
||||
|
||||
@@ -219,6 +219,14 @@ export function isCodexAlias(model: string): boolean {
|
||||
return base in CODEX_ALIAS_MODELS
|
||||
}
|
||||
|
||||
export function shouldUseCodexTransport(
|
||||
model: string,
|
||||
baseUrl: string | undefined,
|
||||
): boolean {
|
||||
const explicitBaseUrl = asEnvUrl(baseUrl)
|
||||
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
|
||||
}
|
||||
|
||||
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -302,13 +310,8 @@ export function resolveProviderRequest(options?: {
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
// Use Codex transport only when:
|
||||
// - the base URL is explicitly the Codex endpoint, OR
|
||||
// - the model is a Codex alias AND no custom base URL has been set
|
||||
// A custom OPENAI_BASE_URL (e.g. Azure, OpenRouter) always wins over
|
||||
// model-name-based Codex detection to prevent auth failures (#200, #203).
|
||||
const transport: ProviderTransport =
|
||||
isCodexBaseUrl(rawBaseUrl) || (!rawBaseUrl && isCodexAlias(requestedModel))
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl)
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
@@ -337,6 +340,30 @@ export function resolveProviderRequest(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
export function getAdditionalModelOptionsCacheScope(): string | null {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
|
||||
!isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
|
||||
return 'firstParty'
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const request = resolveProviderRequest()
|
||||
if (request.transport !== 'chat_completions') {
|
||||
return null
|
||||
}
|
||||
|
||||
if (!isLocalProviderUrl(request.baseUrl)) {
|
||||
return null
|
||||
}
|
||||
|
||||
return `openai:${request.baseUrl.toLowerCase()}`
|
||||
}
|
||||
|
||||
export function resolveCodexAuthPath(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
|
||||
@@ -576,6 +576,7 @@ export type GlobalConfig = {
|
||||
|
||||
// Additional model options for the model picker (fetched during bootstrap).
|
||||
additionalModelOptionsCache?: ModelOption[]
|
||||
additionalModelOptionsCacheScope?: string
|
||||
|
||||
// Additional model options discovered from OpenAI-compatible endpoints.
|
||||
openaiAdditionalModelOptionsCache?: ModelOption[]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
|
||||
import { getInitialMainLoopModel } from '../../bootstrap/state.js'
|
||||
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
|
||||
import {
|
||||
isClaudeAISubscriber,
|
||||
isMaxSubscriber,
|
||||
@@ -44,6 +45,25 @@ export type ModelOption = {
|
||||
descriptionForModel?: string
|
||||
}
|
||||
|
||||
function getScopedAdditionalModelOptions(): ModelOption[] {
|
||||
const config = getGlobalConfig()
|
||||
const activeScope = getAdditionalModelOptionsCacheScope()
|
||||
|
||||
if (!activeScope) {
|
||||
return []
|
||||
}
|
||||
|
||||
if (config.additionalModelOptionsCacheScope !== undefined) {
|
||||
return config.additionalModelOptionsCacheScope === activeScope
|
||||
? (config.additionalModelOptionsCache ?? [])
|
||||
: []
|
||||
}
|
||||
|
||||
return activeScope === 'firstParty'
|
||||
? (config.additionalModelOptionsCache ?? [])
|
||||
: []
|
||||
}
|
||||
|
||||
export function getDefaultOptionForUser(fastMode = false): ModelOption {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const currentModel = renderDefaultModelSetting(
|
||||
@@ -408,6 +428,16 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
return standardOptions
|
||||
}
|
||||
|
||||
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
|
||||
const activeOpenAIOptions = getActiveOpenAIModelOptionsCache()
|
||||
return [
|
||||
getDefaultOptionForUser(fastMode),
|
||||
...(activeOpenAIOptions.length > 0
|
||||
? activeOpenAIOptions
|
||||
: getScopedAdditionalModelOptions()),
|
||||
]
|
||||
}
|
||||
|
||||
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
|
||||
if (getAPIProvider() === 'firstParty') {
|
||||
const payg1POptions = [getDefaultOptionForUser(fastMode)]
|
||||
@@ -566,13 +596,8 @@ export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
})
|
||||
}
|
||||
|
||||
const additionalOptions =
|
||||
getAPIProvider() === 'openai'
|
||||
? getActiveOpenAIModelOptionsCache()
|
||||
: getGlobalConfig().additionalModelOptionsCache ?? []
|
||||
|
||||
// Append additional model options fetched during bootstrap/endpoints.
|
||||
for (const opt of additionalOptions) {
|
||||
// Append additional model options fetched during bootstrap
|
||||
for (const opt of getScopedAdditionalModelOptions()) {
|
||||
if (!options.some(existing => existing.value === opt.value)) {
|
||||
options.push(opt)
|
||||
}
|
||||
|
||||
@@ -23,9 +23,13 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'gpt-4.1-nano': 1_047_576,
|
||||
'gpt-4-turbo': 128_000,
|
||||
'gpt-4': 8_192,
|
||||
'o1': 200_000,
|
||||
'o1-mini': 128_000,
|
||||
'o1-preview': 128_000,
|
||||
'o1-pro': 200_000,
|
||||
'o3': 200_000,
|
||||
'o3-mini': 200_000,
|
||||
'o4-mini': 200_000,
|
||||
'o3': 200_000,
|
||||
|
||||
// DeepSeek (V3: 128k context per official docs)
|
||||
'deepseek-chat': 128_000,
|
||||
@@ -63,6 +67,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'phi4:14b': 16_384,
|
||||
'gemma2:27b': 8_192,
|
||||
'codellama:13b': 16_384,
|
||||
'llama3.2:1b': 128_000,
|
||||
'qwen3:8b': 128_000,
|
||||
'codestral': 32_768,
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -82,9 +89,13 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'gpt-4.1-nano': 32_768,
|
||||
'gpt-4-turbo': 4_096,
|
||||
'gpt-4': 4_096,
|
||||
'o1': 100_000,
|
||||
'o1-mini': 65_536,
|
||||
'o1-preview': 32_768,
|
||||
'o1-pro': 100_000,
|
||||
'o3': 100_000,
|
||||
'o3-mini': 100_000,
|
||||
'o4-mini': 100_000,
|
||||
'o3': 100_000,
|
||||
|
||||
// DeepSeek
|
||||
'deepseek-chat': 8_192,
|
||||
@@ -120,6 +131,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'phi4:14b': 4_096,
|
||||
'gemma2:27b': 4_096,
|
||||
'codellama:13b': 4_096,
|
||||
'llama3.2:1b': 4_096,
|
||||
'qwen3:8b': 8_192,
|
||||
'codestral': 8_192,
|
||||
}
|
||||
|
||||
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {
|
||||
|
||||
@@ -7,6 +7,9 @@ const originalEnv = {
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
@@ -16,6 +19,9 @@ afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
})
|
||||
|
||||
async function importFreshProvidersModule() {
|
||||
@@ -29,6 +35,9 @@ function clearProviderEnv(): void {
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.OPENAI_MODEL
|
||||
}
|
||||
|
||||
test('first-party provider keeps Anthropic account setup flow enabled', () => {
|
||||
@@ -69,3 +78,32 @@ test('GEMINI takes precedence over GitHub when both are set', async () => {
|
||||
|
||||
expect(getAPIProvider()).toBe('gemini')
|
||||
})
|
||||
|
||||
test('explicit local openai-compatible base URLs stay on the openai provider', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
test('codex aliases still resolve to the codex provider without a non-codex base URL', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_MODEL = 'codexplan'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('codex')
|
||||
})
|
||||
|
||||
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
|
||||
clearProviderEnv()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
process.env.OPENAI_MODEL = 'gpt-5.4'
|
||||
|
||||
const { getAPIProvider } = await importFreshProvidersModule()
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
|
||||
import { isCodexAlias } from '../../services/api/providerConfig.js'
|
||||
import { shouldUseCodexTransport } from '../../services/api/providerConfig.js'
|
||||
import { isEnvTruthy } from '../envUtils.js'
|
||||
|
||||
export type APIProvider =
|
||||
@@ -34,11 +34,10 @@ export function usesAnthropicAccountFlow(): boolean {
|
||||
return getAPIProvider() === 'firstParty'
|
||||
}
|
||||
function isCodexModel(): boolean {
|
||||
const model = (process.env.OPENAI_MODEL || '').trim()
|
||||
if (!model) return false
|
||||
// Delegate to the canonical alias table in providerConfig to keep
|
||||
// the two Codex detection systems (provider type + transport) in sync.
|
||||
return isCodexAlias(model)
|
||||
return shouldUseCodexTransport(
|
||||
process.env.OPENAI_MODEL || '',
|
||||
process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_BASE,
|
||||
)
|
||||
}
|
||||
|
||||
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
|
||||
|
||||
71
src/utils/plugins/pluginLoader.test.ts
Normal file
71
src/utils/plugins/pluginLoader.test.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { LoadedPlugin } from '../../types/plugin.js'
|
||||
import { mergePluginSources } from './pluginLoader.js'
|
||||
|
||||
function marketplacePlugin(
|
||||
name: string,
|
||||
marketplace: string,
|
||||
enabled: boolean,
|
||||
): LoadedPlugin {
|
||||
const pluginId = `${name}@${marketplace}`
|
||||
return {
|
||||
name,
|
||||
manifest: { name } as LoadedPlugin['manifest'],
|
||||
path: `/tmp/${pluginId}`,
|
||||
source: pluginId,
|
||||
repository: pluginId,
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
|
||||
describe('mergePluginSources', () => {
|
||||
test('keeps the enabled copy when duplicate marketplace plugins disagree on enabled state', () => {
|
||||
const enabledOfficial = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-plugins-official',
|
||||
true,
|
||||
)
|
||||
const disabledLegacy = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-code-plugins',
|
||||
false,
|
||||
)
|
||||
|
||||
const result = mergePluginSources({
|
||||
session: [],
|
||||
marketplace: [disabledLegacy, enabledOfficial],
|
||||
builtin: [],
|
||||
})
|
||||
|
||||
expect(result.plugins).toEqual([enabledOfficial])
|
||||
expect(result.errors).toEqual([])
|
||||
})
|
||||
|
||||
test('keeps the later copy when duplicate marketplace plugins are both enabled', () => {
|
||||
const legacy = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-code-plugins',
|
||||
true,
|
||||
)
|
||||
const official = marketplacePlugin(
|
||||
'frontend-design',
|
||||
'claude-plugins-official',
|
||||
true,
|
||||
)
|
||||
|
||||
const result = mergePluginSources({
|
||||
session: [],
|
||||
marketplace: [legacy, official],
|
||||
builtin: [],
|
||||
})
|
||||
|
||||
expect(result.plugins).toEqual([official])
|
||||
expect(result.errors).toHaveLength(1)
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'generic-error',
|
||||
source: legacy.source,
|
||||
plugin: legacy.name,
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -3045,24 +3045,63 @@ export function mergePluginSources(sources: {
|
||||
})
|
||||
|
||||
const sessionNames = new Set(sessionPlugins.map(p => p.name))
|
||||
const marketplacePlugins = sources.marketplace.filter(p => {
|
||||
if (sessionNames.has(p.name)) {
|
||||
// Different marketplaces can enable the same short plugin name, but
|
||||
// downstream command/skill loading scopes by plugin.name.
|
||||
const marketplacePluginsByName = new Map<string, LoadedPlugin>()
|
||||
for (const plugin of sources.marketplace) {
|
||||
if (sessionNames.has(plugin.name)) {
|
||||
logForDebugging(
|
||||
`Plugin "${p.name}" from --plugin-dir overrides installed version`,
|
||||
`Plugin "${plugin.name}" from --plugin-dir overrides installed version`,
|
||||
)
|
||||
return false
|
||||
continue
|
||||
}
|
||||
return true
|
||||
const existing = marketplacePluginsByName.get(plugin.name)
|
||||
if (!existing) {
|
||||
marketplacePluginsByName.set(plugin.name, plugin)
|
||||
continue
|
||||
}
|
||||
|
||||
const winner = selectMarketplacePlugin(existing, plugin)
|
||||
const dropped = winner === existing ? plugin : existing
|
||||
marketplacePluginsByName.set(plugin.name, winner)
|
||||
|
||||
logForDebugging(
|
||||
`Ignoring duplicate marketplace plugin "${plugin.name}" from ${dropped.source}; using ${winner.source}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
if (existing.enabled && plugin.enabled) {
|
||||
errors.push({
|
||||
type: 'generic-error',
|
||||
source: dropped.source,
|
||||
plugin: plugin.name,
|
||||
error: `Duplicate marketplace plugin "${plugin.name}" ignored: using "${winner.source}" and skipping "${dropped.source}" to avoid short-name collisions`,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Session first, then non-overridden marketplace, then builtin.
|
||||
// Downstream first-match consumers see session plugins before
|
||||
// installed ones for any that slipped past the name filter.
|
||||
return {
|
||||
plugins: [...sessionPlugins, ...marketplacePlugins, ...sources.builtin],
|
||||
plugins: [
|
||||
...sessionPlugins,
|
||||
...marketplacePluginsByName.values(),
|
||||
...sources.builtin,
|
||||
],
|
||||
errors,
|
||||
}
|
||||
}
|
||||
|
||||
function selectMarketplacePlugin(
|
||||
current: LoadedPlugin,
|
||||
candidate: LoadedPlugin,
|
||||
): LoadedPlugin {
|
||||
if (current.enabled !== candidate.enabled) {
|
||||
return candidate.enabled ? candidate : current
|
||||
}
|
||||
|
||||
return candidate
|
||||
}
|
||||
|
||||
/**
|
||||
* Main plugin loading function that discovers and loads all plugins.
|
||||
*
|
||||
|
||||
78
src/utils/providerDiscovery.test.ts
Normal file
78
src/utils/providerDiscovery.test.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
getLocalOpenAICompatibleProviderLabel,
|
||||
listOpenAICompatibleModels,
|
||||
} from './providerDiscovery.js'
|
||||
|
||||
const originalFetch = globalThis.fetch
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
})
|
||||
|
||||
test('lists models from a local openai-compatible /models endpoint', async () => {
|
||||
globalThis.fetch = mock((input, init) => {
|
||||
const url = typeof input === 'string' ? input : input.url
|
||||
expect(url).toBe('http://localhost:1234/v1/models')
|
||||
expect(init?.headers).toEqual({ Authorization: 'Bearer local-key' })
|
||||
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
data: [
|
||||
{ id: 'qwen2.5-coder-7b-instruct' },
|
||||
{ id: 'llama-3.2-3b-instruct' },
|
||||
{ id: 'qwen2.5-coder-7b-instruct' },
|
||||
],
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
)
|
||||
}) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
listOpenAICompatibleModels({
|
||||
baseUrl: 'http://localhost:1234/v1',
|
||||
apiKey: 'local-key',
|
||||
}),
|
||||
).resolves.toEqual([
|
||||
'qwen2.5-coder-7b-instruct',
|
||||
'llama-3.2-3b-instruct',
|
||||
])
|
||||
})
|
||||
|
||||
test('returns null when a local openai-compatible /models request fails', async () => {
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('not available', { status: 503 })),
|
||||
) as typeof globalThis.fetch
|
||||
|
||||
await expect(
|
||||
listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1' }),
|
||||
).resolves.toBeNull()
|
||||
})
|
||||
|
||||
test('detects LM Studio from the default localhost port', () => {
|
||||
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
|
||||
'LM Studio',
|
||||
)
|
||||
})
|
||||
|
||||
test('detects common local openai-compatible providers by hostname', () => {
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
|
||||
).toBe('LocalAI')
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://vllm.local:8000/v1'),
|
||||
).toBe('vLLM')
|
||||
})
|
||||
|
||||
test('falls back to a generic local openai-compatible label', () => {
|
||||
expect(
|
||||
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
|
||||
).toBe('Local OpenAI-compatible')
|
||||
})
|
||||
@@ -1,4 +1,5 @@
|
||||
import type { OllamaModelDescriptor } from './providerRecommendation.ts'
|
||||
import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
|
||||
|
||||
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
|
||||
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
|
||||
@@ -53,6 +54,64 @@ export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
|
||||
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
|
||||
}
|
||||
|
||||
export function getOpenAICompatibleModelsBaseUrl(baseUrl?: string): string {
|
||||
return (
|
||||
baseUrl || process.env.OPENAI_BASE_URL || DEFAULT_OPENAI_BASE_URL
|
||||
).replace(/\/+$/, '')
|
||||
}
|
||||
|
||||
export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string {
|
||||
try {
|
||||
const parsed = new URL(getOpenAICompatibleModelsBaseUrl(baseUrl))
|
||||
const host = parsed.host.toLowerCase()
|
||||
const hostname = parsed.hostname.toLowerCase()
|
||||
const path = parsed.pathname.toLowerCase()
|
||||
const haystack = `${hostname} ${path}`
|
||||
|
||||
if (
|
||||
host.endsWith(':1234') ||
|
||||
haystack.includes('lmstudio') ||
|
||||
haystack.includes('lm-studio')
|
||||
) {
|
||||
return 'LM Studio'
|
||||
}
|
||||
if (host.endsWith(':11434') || haystack.includes('ollama')) {
|
||||
return 'Ollama'
|
||||
}
|
||||
if (haystack.includes('localai')) {
|
||||
return 'LocalAI'
|
||||
}
|
||||
if (haystack.includes('jan')) {
|
||||
return 'Jan'
|
||||
}
|
||||
if (haystack.includes('kobold')) {
|
||||
return 'KoboldCpp'
|
||||
}
|
||||
if (haystack.includes('llama.cpp') || haystack.includes('llamacpp')) {
|
||||
return 'llama.cpp'
|
||||
}
|
||||
if (haystack.includes('vllm')) {
|
||||
return 'vLLM'
|
||||
}
|
||||
if (
|
||||
haystack.includes('open-webui') ||
|
||||
haystack.includes('openwebui')
|
||||
) {
|
||||
return 'Open WebUI'
|
||||
}
|
||||
if (
|
||||
haystack.includes('text-generation-webui') ||
|
||||
haystack.includes('oobabooga')
|
||||
) {
|
||||
return 'text-generation-webui'
|
||||
}
|
||||
} catch {
|
||||
// Fall back to the generic label when the base URL is malformed.
|
||||
}
|
||||
|
||||
return 'Local OpenAI-compatible'
|
||||
}
|
||||
|
||||
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
@@ -111,6 +170,46 @@ export async function listOllamaModels(
|
||||
}
|
||||
}
|
||||
|
||||
export async function listOpenAICompatibleModels(options?: {
|
||||
baseUrl?: string
|
||||
apiKey?: string
|
||||
}): Promise<string[] | null> {
|
||||
const { signal, clear } = withTimeoutSignal(5000)
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${getOpenAICompatibleModelsBaseUrl(options?.baseUrl)}/models`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: options?.apiKey
|
||||
? {
|
||||
Authorization: `Bearer ${options.apiKey}`,
|
||||
}
|
||||
: undefined,
|
||||
signal,
|
||||
},
|
||||
)
|
||||
if (!response.ok) {
|
||||
return null
|
||||
}
|
||||
|
||||
const data = (await response.json()) as {
|
||||
data?: Array<{ id?: string }>
|
||||
}
|
||||
|
||||
return Array.from(
|
||||
new Set(
|
||||
(data.data ?? [])
|
||||
.filter(model => Boolean(model.id))
|
||||
.map(model => model.id!),
|
||||
),
|
||||
)
|
||||
} catch {
|
||||
return null
|
||||
} finally {
|
||||
clear()
|
||||
}
|
||||
}
|
||||
|
||||
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
|
||||
const { signal, clear } = withTimeoutSignal(1200)
|
||||
try {
|
||||
|
||||
Reference in New Issue
Block a user