Compare commits

..

8 Commits

Author SHA1 Message Date
Kevin Codex
94de37d44f chore: release 0.1.8 2026-04-06 13:45:02 +08:00
Kevin Codex
3b3aca716d test: fix post-merge suite regressions (#419) 2026-04-06 13:32:05 +08:00
Juan Camilo Auriti
d5852ca73d fix: coalesce consecutive same-role messages for strict template models (#241)
Models served through Ollama/vLLM with strict Jinja templates (Devstral,
Mistral, etc.) require strict user↔assistant role alternation and reject
requests with consecutive messages of the same role.

convertMessages() could produce consecutive user or assistant messages in
three scenarios: batched user input, text-only + tool_use assistant turns,
and tool result remainders followed by another user message.

Added a coalescing pass at the end of convertMessages() that merges
consecutive same-role messages (string concat or array concat), preserving
tool_calls on assistant messages. Tool and system messages are excluded
from coalescing as they have their own alternation rules.

Includes regression tests for both user and assistant coalescing.

Fixes #202
2026-04-06 06:47:11 +08:00
Technomancer702
c534aa5771 Feature: Add local OpenAI-compatible model discovery to /model (#201)
* Add local OpenAI-compatible model discovery to /model

* Guard local OpenAI model discovery from Codex routing

* Preserve remote OpenAI Codex alias behavior
2026-04-06 06:46:06 +08:00
Juan Camilo Auriti
60d3d8961a fix: add missing o1-series and Ollama models to context window table (#250)
Models not in the lookup table fall through to a 200k default, causing
auto-compact to never trigger for models with smaller actual context
windows. Users hit hard context_window_exceeded errors instead.

Added to both context window and max output token tables:
- o1, o1-mini, o1-preview, o1-pro (OpenAI reasoning models)
- llama3.2:1b, qwen3:8b, codestral (common Ollama models)

Relates to #248
2026-04-06 06:39:24 +08:00
Juan Camilo Auriti
3b9893b586 security: force lodash-es 4.18.0 for transitive dependencies (#242)
* security: force lodash-es 4.18.0 for transitive dependencies

PR #225 bumped the direct lodash-es dependency to 4.18.0, but
@anthropic-ai/sandbox-runtime still pulled lodash-es@4.17.23 via its
own ^4.17.23 range. The transitive copy was vulnerable to:

- HIGH: Code Injection via _.template (GHSA-r5fr-rjxr-66jc)
- MODERATE: Prototype Pollution via _.unset/_.omit (GHSA-f23m-r3pf-42rh)

Added overrides field in package.json to force all copies to 4.18.0.
bun audit now reports zero vulnerabilities.

* fix: use lodash-es 4.18.1 instead of deprecated 4.18.0

lodash-es 4.18.0 is explicitly deprecated by the maintainer with
the message "Bad release. Please use lodash-es@4.17.23 instead."
Updated both the direct dependency and the override to 4.18.1, which
is the latest non-deprecated release that patches the CVEs.
2026-04-06 06:37:40 +08:00
Joe Tam
daf2c90b6d Fix duplicate marketplace plugin loading (#364)
Reproduction:
- Enable `frontend-design@claude-code-plugins`
- Enable `frontend-design@claude-plugins-official`
- Start OpenClaude with both marketplace plugins active
- Both plugins load, but downstream command and skill scopes key off the short plugin name, so both collapse to `frontend-design` and can interfere with interactive startup

Fix:
- Collapse duplicate marketplace plugins by short name during merge
- Keep the enabled copy when enabled state differs; otherwise keep the later config entry
- Add regression coverage for both cases
2026-04-06 06:36:45 +08:00
CRABHIVE
4ac7367733 fix: include retry timing in 429 error messages (#366)
## Summary

- Extract retry-after header from 429 API errors and include timing
  guidance in the user-facing error message
- Previously, non-quota 429 errors showed a generic message with no
  guidance on when to retry, only a link to status.anthropic.com

## Impact

- user-facing impact: 429 error messages now tell users when to retry
  instead of just linking to a status page
- developer/maintainer impact: none

## Testing

- [x] `bun run build`
- [ ] `bun run smoke`
- [ ] focused tests: error formatting is pure string construction,
  verified via build + manual inspection

## Notes

- provider/model path tested: applies to all providers returning 429
- screenshots attached (if UI changed): n/a
- follow-up work or known limitations: 529 errors could get similar
  treatment in a follow-up

https://claude.ai/code/session_01D7kprMn4c66a5WrZscF7rv

Co-authored-by: Claude <noreply@anthropic.com>
2026-04-06 06:36:14 +08:00
25 changed files with 805 additions and 219 deletions

View File

@@ -51,7 +51,7 @@
"ignore": "7.0.5",
"indent-string": "5.0.0",
"jsonc-parser": "3.3.1",
"lodash-es": "4.18.0",
"lodash-es": "4.18.1",
"lru-cache": "11.2.7",
"marked": "15.0.12",
"p-map": "7.0.4",
@@ -88,6 +88,9 @@
},
},
},
"overrides": {
"lodash-es": "4.18.1",
},
"packages": {
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
@@ -657,7 +660,7 @@
"locate-path": ["locate-path@5.0.0", "", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
"lodash-es": ["lodash-es@4.18.0", "", {}, "sha512-koAgswPPA+UTaPN64Etp+PGP+WT6oqOS2NMi5yDkMaiGw9qY4VxQbQF0mtKMyr4BlTznWyzePV5UpECTJQmSUA=="],
"lodash-es": ["lodash-es@4.18.1", "", {}, "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A=="],
"lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="],
@@ -891,8 +894,6 @@
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
"@anthropic-ai/sandbox-runtime/lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
"@aws-crypto/crc32/@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
"@aws-crypto/crc32/tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],

View File

@@ -1,6 +1,6 @@
{
"name": "@gitlawb/openclaude",
"version": "0.1.7",
"version": "0.1.8",
"description": "Claude Code opened to any LLM — OpenAI, Gemini, DeepSeek, Ollama, and 200+ models",
"type": "module",
"bin": {
@@ -95,7 +95,7 @@
"ignore": "7.0.5",
"indent-string": "5.0.0",
"jsonc-parser": "3.3.1",
"lodash-es": "4.18.0",
"lodash-es": "4.18.1",
"lru-cache": "11.2.7",
"marked": "15.0.12",
"p-map": "7.0.4",
@@ -150,5 +150,8 @@
"license": "SEE LICENSE FILE",
"publishConfig": {
"access": "public"
},
"overrides": {
"lodash-es": "4.18.1"
}
}

View File

@@ -0,0 +1,42 @@
import { afterEach, expect, mock, test } from 'bun:test'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
mock.restore()
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
test('opens the model picker without awaiting local model discovery refresh', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
let resolveDiscovery: (() => void) | undefined
const discoverOpenAICompatibleModelOptions = mock(
() =>
new Promise<void>(resolve => {
resolveDiscovery = resolve
}),
)
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
discoverOpenAICompatibleModelOptions,
}))
const { call } = await import(`./model.js?ts=${Date.now()}-${Math.random()}`)
const result = await Promise.race([
call(() => {}, {} as never, ''),
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
])
resolveDiscovery?.()
expect(result).not.toBe('timeout')
})

View File

@@ -4,6 +4,7 @@ import * as React from 'react';
import type { CommandResultDisplay } from '../../commands.js';
import { ModelPicker } from '../../components/ModelPicker.js';
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
import { useAppState, useSetAppState } from '../../state/AppState.js';
import type { LocalJSXCommandCall } from '../../types/command.js';
@@ -19,6 +20,7 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
import { validateModel } from '../../utils/model/validateModel.js';
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
function ModelPickerWrapper(t0) {
const $ = _c(17);
const {
@@ -319,7 +321,9 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
});
return <SetModelAndClose args={args} onDone={onDone} />;
}
await refreshOpenAIModelOptionsCache();
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
void refreshOpenAIModelOptionsCache();
}
return <ModelPickerWrapper onDone={onDone} />;
};
function renderModelLabel(model: string | null): string {

View File

@@ -197,6 +197,21 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
expect(message).not.toContain('sk-secret-12345678')
})
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
const message = buildProfileSaveMessage(
'openai',
{
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
)
expect(message).toContain('Saved Local OpenAI-compatible profile.')
expect(message).toContain('Model: gpt-5.4')
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
})
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
const message = buildProfileSaveMessage(
'gemini',
@@ -230,6 +245,36 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
expect(summary.endpointLabel).toBe('sk-...5678')
})
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('gpt-5.4')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
const defaults = getProviderWizardDefaults({
OPENAI_API_KEY: 'sk-secret-12345678',

View File

@@ -15,6 +15,7 @@ import { Box, Text } from '../../ink.js'
import {
DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL,
isLocalProviderUrl,
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../../services/api/providerConfig.js'
@@ -52,7 +53,11 @@ import {
recommendOllamaModel,
type RecommendationGoal,
} from '../../utils/providerRecommendation.js'
import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
import {
getLocalOpenAICompatibleProviderLabel,
hasLocalOllama,
listOllamaModels,
} from '../../utils/providerDiscovery.js'
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
@@ -182,10 +187,8 @@ export function buildCurrentProviderSummary(options?: {
let providerLabel = 'OpenAI-compatible'
if (request.transport === 'codex_responses') {
providerLabel = 'Codex'
} else if (request.baseUrl.includes('localhost:11434')) {
providerLabel = 'Ollama'
} else if (request.baseUrl.includes('localhost:1234')) {
providerLabel = 'LM Studio'
} else if (isLocalProviderUrl(request.baseUrl)) {
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
}
return {
@@ -272,16 +275,20 @@ function buildSavedProfileSummary(
),
}
case 'openai':
default:
default: {
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
return {
providerLabel: 'OpenAI-compatible',
providerLabel: isLocalProviderUrl(baseUrl)
? getLocalOpenAICompatibleProviderLabel(baseUrl)
: 'OpenAI-compatible',
modelLabel: getSafeDisplayValue(
env.OPENAI_MODEL ?? 'gpt-4o',
process.env,
env,
),
endpointLabel: getSafeDisplayValue(
env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
baseUrl,
process.env,
env,
),
@@ -292,6 +299,7 @@ function buildSavedProfileSummary(
}
}
}
}
export function buildProfileSaveMessage(
profile: ProviderProfile,

View File

@@ -5,6 +5,9 @@
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
*/
import { isLocalProviderUrl } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
const ESC = '\x1b['
@@ -99,7 +102,7 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
if (useOpenAI) {
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/.test(baseUrl)
const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter'
@@ -107,10 +110,8 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
else if (/groq/i.test(baseUrl)) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral'
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/localhost:11434/i.test(baseUrl)) name = 'Ollama'
else if (/localhost:1234/i.test(baseUrl)) name = 'LM Studio'
else if (/llama/i.test(rawModel)) name = 'Meta Llama'
else if (isLocal) name = 'Local'
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)
// Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel

View File

@@ -1,134 +0,0 @@
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
import { join } from 'node:path'
const originalEnv = { ...process.env }
const originalPlatform = process.platform
const mockedClipboardPath = join(process.cwd(), 'openclaude-clipboard.txt')
const generateTempFilePathMock = mock(() => mockedClipboardPath)
const execFileNoThrowMock = mock(
async () => ({ code: 0, stdout: '', stderr: '' }),
)
mock.module('../../utils/execFileNoThrow.js', () => ({
execFileNoThrow: execFileNoThrowMock,
}))
mock.module('../../utils/tempfile.js', () => ({
generateTempFilePath: generateTempFilePathMock,
}))
async function importFreshOscModule() {
return import(`./osc.ts?ts=${Date.now()}-${Math.random()}`)
}
async function flushClipboardCopy(): Promise<void> {
await new Promise(resolve => setTimeout(resolve, 0))
}
describe('Windows clipboard fallback', () => {
beforeEach(() => {
execFileNoThrowMock.mockClear()
generateTempFilePathMock.mockClear()
process.env = { ...originalEnv }
delete process.env['SSH_CONNECTION']
delete process.env['TMUX']
Object.defineProperty(process, 'platform', { value: 'win32' })
})
afterEach(() => {
process.env = { ...originalEnv }
Object.defineProperty(process, 'platform', { value: originalPlatform })
})
test('uses PowerShell instead of clip.exe for local Windows copy', async () => {
const { setClipboard } = await importFreshOscModule()
await setClipboard('Привет мир')
await flushClipboardCopy()
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'clip')).toBe(
false,
)
expect(
execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell'),
).toBe(true)
})
test('passes Windows clipboard text through a UTF-8 temp file instead of stdin', async () => {
const { setClipboard } = await importFreshOscModule()
await setClipboard('Привет мир')
await flushClipboardCopy()
const windowsCall = execFileNoThrowMock.mock.calls.find(
([cmd]) => cmd === 'powershell',
)
expect(windowsCall?.[2]).toMatchObject({
stdin: 'ignore',
})
expect(windowsCall?.[2]).not.toMatchObject({ input: 'Привет мир' })
expect(windowsCall?.[2]).not.toMatchObject({
env: expect.objectContaining({
OPENCLAUDE_CLIPBOARD_TEXT_B64: expect.any(String),
}),
})
expect(windowsCall?.[1]).toContain(
`$text = [System.IO.File]::ReadAllText('${mockedClipboardPath.replace(/'/g, "''")}', [System.Text.Encoding]::UTF8); Set-Clipboard -Value $text`,
)
})
})
describe('clipboard path behavior remains stable', () => {
beforeEach(() => {
execFileNoThrowMock.mockClear()
process.env = { ...originalEnv }
delete process.env['SSH_CONNECTION']
delete process.env['TMUX']
})
afterEach(() => {
process.env = { ...originalEnv }
Object.defineProperty(process, 'platform', { value: originalPlatform })
})
test('getClipboardPath stays native on local macOS', async () => {
Object.defineProperty(process, 'platform', { value: 'darwin' })
const { getClipboardPath } = await importFreshOscModule()
expect(getClipboardPath()).toBe('native')
})
test('getClipboardPath stays tmux-buffer when TMUX is set', async () => {
Object.defineProperty(process, 'platform', { value: 'linux' })
process.env['TMUX'] = '/tmp/tmux-1000/default,123,0'
const { getClipboardPath } = await importFreshOscModule()
expect(getClipboardPath()).toBe('tmux-buffer')
})
test('Windows clipboard fallback is skipped over SSH', async () => {
Object.defineProperty(process, 'platform', { value: 'win32' })
process.env['SSH_CONNECTION'] = '1 2 3 4'
const { setClipboard } = await importFreshOscModule()
await setClipboard('Привет мир')
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'powershell')).toBe(
false,
)
})
test('local macOS clipboard fallback still uses pbcopy', async () => {
Object.defineProperty(process, 'platform', { value: 'darwin' })
const { setClipboard } = await importFreshOscModule()
await setClipboard('hello')
expect(execFileNoThrowMock.mock.calls.some(([cmd]) => cmd === 'pbcopy')).toBe(
true,
)
})
})

View File

@@ -3,10 +3,8 @@
*/
import { Buffer } from 'buffer'
import { unlink, writeFile } from 'node:fs/promises'
import { env } from '../../utils/env.js'
import { execFileNoThrow } from '../../utils/execFileNoThrow.js'
import { generateTempFilePath } from '../../utils/tempfile.js'
import { BEL, ESC, ESC_TYPE, SEP } from './ansi.js'
import type { Action, Color, TabStatusAction } from './types.js'
@@ -131,7 +129,7 @@ export async function tmuxLoadBuffer(text: string): Promise<boolean> {
* Local (no SSH_CONNECTION): also shell out to a native clipboard utility.
* OSC 52 and tmux -w both depend on terminal settings — iTerm2 disables
* OSC 52 by default, VS Code shows a permission prompt on first use. Native
* utilities (pbcopy/wl-copy/xclip/xsel/PowerShell Set-Clipboard) always work locally. Over
* utilities (pbcopy/wl-copy/xclip/xsel/clip.exe) always work locally. Over
* SSH these would write to the remote clipboard — OSC 52 is the right path there.
*
* Returns the sequence for the caller to write to stdout (raw OSC 52
@@ -213,32 +211,9 @@ function copyNative(text: string): void {
return
}
case 'win32':
// Avoid piping non-ASCII text through the Windows stdin/codepage
// boundary. Write UTF-8 text to a temp file and let PowerShell read it
// directly as UTF-8 before calling Set-Clipboard.
void (async () => {
const tempPath = generateTempFilePath('openclaude-clipboard', '.txt')
const escapedTempPath = tempPath.replace(/'/g, "''")
try {
await writeFile(tempPath, text, { encoding: 'utf8' })
await execFileNoThrow(
'powershell',
[
'-NoProfile',
'-NonInteractive',
'-Command',
`$text = [System.IO.File]::ReadAllText('${escapedTempPath}', [System.Text.Encoding]::UTF8); Set-Clipboard -Value $text`,
],
{
useCwd: false,
timeout: opts.timeout,
stdin: 'ignore',
},
)
} finally {
await unlink(tempPath).catch(() => {})
}
})().catch(() => {})
// clip.exe is always available on Windows. Unicode handling is
// imperfect (system locale encoding) but good enough for a fallback.
void execFileNoThrow('clip', [], opts)
return
}
}

View File

@@ -14,7 +14,16 @@ import { lazySchema } from '../../utils/lazySchema.js'
import { logError } from '../../utils/log.js'
import { getAPIProvider } from '../../utils/model/providers.js'
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
import type { ModelOption } from '../../utils/model/modelOptions.js'
import {
getLocalOpenAICompatibleProviderLabel,
listOpenAICompatibleModels,
} from '../../utils/providerDiscovery.js'
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
import {
getAdditionalModelOptionsCacheScope,
resolveProviderRequest,
} from './providerConfig.js'
const bootstrapResponseSchema = lazySchema(() =>
z.object({
@@ -39,6 +48,12 @@ const bootstrapResponseSchema = lazySchema(() =>
type BootstrapResponse = z.infer<ReturnType<typeof bootstrapResponseSchema>>
type BootstrapCachePayload = {
clientData: Record<string, unknown> | null
additionalModelOptions: ModelOption[]
additionalModelOptionsScope: string
}
async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
if (isEssentialTrafficOnly()) {
logForDebugging('[Bootstrap] Skipped: Nonessential traffic disabled')
@@ -108,22 +123,70 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
}
}
async function fetchLocalOpenAIModelOptions(): Promise<BootstrapCachePayload | null> {
const scope = getAdditionalModelOptionsCacheScope()
if (!scope?.startsWith('openai:')) {
return null
}
const { baseUrl } = resolveProviderRequest()
const models = await listOpenAICompatibleModels({
baseUrl,
apiKey: process.env.OPENAI_API_KEY,
})
if (models === null) {
logForDebugging('[Bootstrap] Local OpenAI model discovery failed')
return null
}
const providerLabel = getLocalOpenAICompatibleProviderLabel(baseUrl)
return {
clientData: getGlobalConfig().clientDataCache ?? null,
additionalModelOptionsScope: scope,
additionalModelOptions: models.map(model => ({
value: model,
label: model,
description: `Detected from ${providerLabel}`,
})),
}
}
/**
* Fetch bootstrap data from the API and persist to disk cache.
*/
export async function fetchBootstrapData(): Promise<void> {
try {
const scope = getAdditionalModelOptionsCacheScope()
let payload: BootstrapCachePayload | null = null
if (scope === 'firstParty') {
const response = await fetchBootstrapAPI()
if (!response) return
const clientData = response.client_data ?? null
const additionalModelOptions = response.additional_model_options ?? []
payload = {
clientData: response.client_data ?? null,
additionalModelOptions: response.additional_model_options ?? [],
additionalModelOptionsScope: scope,
}
} else if (scope?.startsWith('openai:')) {
payload = await fetchLocalOpenAIModelOptions()
if (!payload) return
} else {
logForDebugging('[Bootstrap] Skipped: no additional model source')
return
}
const { clientData, additionalModelOptions, additionalModelOptionsScope } =
payload
// Only persist if data actually changed — avoids a config write on every startup.
const config = getGlobalConfig()
if (
isEqual(config.clientDataCache, clientData) &&
isEqual(config.additionalModelOptionsCache, additionalModelOptions)
isEqual(config.additionalModelOptionsCache, additionalModelOptions) &&
config.additionalModelOptionsCacheScope === additionalModelOptionsScope
) {
logForDebugging('[Bootstrap] Cache unchanged, skipping write')
return
@@ -134,6 +197,7 @@ export async function fetchBootstrapData(): Promise<void> {
...current,
clientDataCache: clientData,
additionalModelOptionsCache: additionalModelOptions,
additionalModelOptionsCacheScope: additionalModelOptionsScope,
}))
} catch (error) {
logError(error)

View File

@@ -14,12 +14,19 @@ import {
} from './providerConfig.js'
const tempDirs: string[] = []
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
}
afterEach(() => {
while (tempDirs.length > 0) {
const dir = tempDirs.pop()
if (dir) rmSync(dir, { recursive: true, force: true })
}
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
})
function createTempAuthJson(payload: Record<string, unknown>): string {
@@ -62,12 +69,26 @@ describe('Codex provider config', () => {
})
test('resolves codexplan alias to Codex transport with reasoning', () => {
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
const resolved = resolveProviderRequest({ model: 'codexplan' })
expect(resolved.transport).toBe('codex_responses')
expect(resolved.resolvedModel).toBe('gpt-5.4')
expect(resolved.reasoning).toEqual({ effort: 'high' })
})
test('does not force Codex transport when a local non-Codex base URL is explicit', () => {
const resolved = resolveProviderRequest({
model: 'codexplan',
baseUrl: 'http://127.0.0.1:8080/v1',
})
expect(resolved.transport).toBe('chat_completions')
expect(resolved.baseUrl).toBe('http://127.0.0.1:8080/v1')
expect(resolved.resolvedModel).toBe('gpt-5.4')
})
test('resolves codexplan to Codex transport even when OPENAI_BASE_URL is the string "undefined"', () => {
// On Windows, env vars can leak as the literal string "undefined" instead of
// the JS value undefined when not properly unset (issue #336).

View File

@@ -557,8 +557,12 @@ export function getAssistantMessageFromError(
const stripped = error.message.replace(/^429\s+/, '')
const innerMessage = stripped.match(/"message"\s*:\s*"([^"]*)"/)?.[1]
const detail = innerMessage || stripped
const retryAfter = (error as APIError).headers?.get?.('retry-after')
const retryHint = retryAfter && !isNaN(Number(retryAfter))
? `Try again in ${retryAfter} seconds.`
: 'Try again in a few seconds.'
return createAssistantAPIErrorMessage({
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || `this may be a temporary capacity issue${getAPIProvider() === 'firstParty' ? ' — check status.anthropic.com' : ''}`}`,
content: `${API_ERROR_MESSAGE_PREFIX}: Request rejected (429) · ${detail || 'this may be a temporary capacity issue'}${retryHint}`,
error: 'rate_limit',
})
}

View File

@@ -573,3 +573,80 @@ test('sanitizes malformed MCP tool schemas before sending them to OpenAI', async
expect(properties?.priority?.enum).toEqual([0, 1, 2, 3])
expect(properties?.priority).not.toHaveProperty('default')
})
// ---------------------------------------------------------------------------
// Issue #202 — consecutive role coalescing (Devstral, Mistral strict templates)
// ---------------------------------------------------------------------------
function makeNonStreamResponse(content = 'ok'): Response {
return new Response(
JSON.stringify({
id: 'chatcmpl-test',
model: 'test-model',
choices: [{ message: { role: 'assistant', content }, finish_reason: 'stop' }],
usage: { prompt_tokens: 5, completion_tokens: 1, total_tokens: 6 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}
test('coalesces consecutive user messages to avoid alternation errors (issue #202)', async () => {
let sentMessages: Array<{ role: string; content: unknown }> | undefined
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
sentMessages = JSON.parse(String(init?.body)).messages
return makeNonStreamResponse()
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'test-model',
system: 'sys',
messages: [
{ role: 'user', content: 'first message' },
{ role: 'user', content: 'second message' },
],
max_tokens: 64,
stream: false,
})
expect(sentMessages?.length).toBe(2) // system + 1 merged user
expect(sentMessages?.[0]?.role).toBe('system')
expect(sentMessages?.[1]?.role).toBe('user')
const userContent = sentMessages?.[1]?.content as string
expect(userContent).toContain('first message')
expect(userContent).toContain('second message')
})
test('coalesces consecutive assistant messages preserving tool_calls (issue #202)', async () => {
let sentMessages: Array<{ role: string; content: unknown; tool_calls?: unknown[] }> | undefined
globalThis.fetch = (async (_input: unknown, init: RequestInit | undefined) => {
sentMessages = JSON.parse(String(init?.body)).messages
return makeNonStreamResponse()
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'test-model',
system: 'sys',
messages: [
{ role: 'user', content: 'go' },
{ role: 'assistant', content: 'thinking...' },
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'call_1', name: 'Bash', input: { command: 'ls' } }],
},
{ role: 'user', content: [{ type: 'tool_result', tool_use_id: 'call_1', content: 'file.txt' }] },
],
max_tokens: 64,
stream: false,
})
// system + user + merged assistant + tool
const assistantMsgs = sentMessages?.filter(m => m.role === 'assistant')
expect(assistantMsgs?.length).toBe(1) // two assistant turns merged into one
expect(assistantMsgs?.[0]?.tool_calls?.length).toBeGreaterThan(0)
})

View File

@@ -295,7 +295,41 @@ function convertMessages(
}
}
return result
// Coalescing pass: merge consecutive messages of the same role.
// OpenAI/vLLM/Ollama require strict user↔assistant alternation.
// Multiple consecutive tool messages are allowed (assistant → tool* → user).
// Consecutive user or assistant messages must be merged to avoid Jinja
// template errors like "roles must alternate" (Devstral, Mistral models).
const coalesced: OpenAIMessage[] = []
for (const msg of result) {
const prev = coalesced[coalesced.length - 1]
if (prev && prev.role === msg.role && msg.role !== 'tool' && msg.role !== 'system') {
const prevContent = prev.content
const curContent = msg.content
if (typeof prevContent === 'string' && typeof curContent === 'string') {
prev.content = prevContent + (prevContent && curContent ? '\n' : '') + curContent
} else {
const toArray = (
c: string | Array<{ type: string; text?: string; image_url?: { url: string } }> | undefined,
): Array<{ type: string; text?: string; image_url?: { url: string } }> => {
if (!c) return []
if (typeof c === 'string') return c ? [{ type: 'text', text: c }] : []
return c
}
prev.content = [...toArray(prevContent), ...toArray(curContent)]
}
if (msg.tool_calls?.length) {
prev.tool_calls = [...(prev.tool_calls ?? []), ...msg.tool_calls]
}
} else {
coalesced.push(msg)
}
}
return coalesced
}
/**

View File

@@ -1,6 +1,22 @@
import { expect, test } from 'bun:test'
import { afterEach, expect, test } from 'bun:test'
import { isLocalProviderUrl } from './providerConfig.js'
import {
getAdditionalModelOptionsCacheScope,
isLocalProviderUrl,
resolveProviderRequest,
} from './providerConfig.js'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
test('treats localhost endpoints as local', () => {
expect(isLocalProviderUrl('http://localhost:11434/v1')).toBe(true)
@@ -33,3 +49,37 @@ test('treats public hosts as remote', () => {
expect(isLocalProviderUrl('https://example.com/v1')).toBe(false)
expect(isLocalProviderUrl('http://[2001:4860:4860::8888]:11434/v1')).toBe(false)
})
test('creates a cache scope for local openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:1234/v1'
process.env.OPENAI_MODEL = 'llama-3.2-3b-instruct'
expect(getAdditionalModelOptionsCacheScope()).toBe(
'openai:http://localhost:1234/v1',
)
})
test('keeps codex alias models on chat completions for local openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
expect(resolveProviderRequest()).toMatchObject({
transport: 'chat_completions',
requestedModel: 'gpt-5.4',
resolvedModel: 'gpt-5.4',
baseUrl: 'http://127.0.0.1:8080/v1',
})
expect(getAdditionalModelOptionsCacheScope()).toBe(
'openai:http://127.0.0.1:8080/v1',
)
})
test('skips local model cache scope for remote openai-compatible providers', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
process.env.OPENAI_MODEL = 'gpt-4o'
expect(getAdditionalModelOptionsCacheScope()).toBeNull()
})

View File

@@ -219,6 +219,14 @@ export function isCodexAlias(model: string): boolean {
return base in CODEX_ALIAS_MODELS
}
export function shouldUseCodexTransport(
model: string,
baseUrl: string | undefined,
): boolean {
const explicitBaseUrl = asEnvUrl(baseUrl)
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
}
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
@@ -302,13 +310,8 @@ export function resolveProviderRequest(options?: {
asEnvUrl(options?.baseUrl) ??
asEnvUrl(process.env.OPENAI_BASE_URL) ??
asEnvUrl(process.env.OPENAI_API_BASE)
// Use Codex transport only when:
// - the base URL is explicitly the Codex endpoint, OR
// - the model is a Codex alias AND no custom base URL has been set
// A custom OPENAI_BASE_URL (e.g. Azure, OpenRouter) always wins over
// model-name-based Codex detection to prevent auth failures (#200, #203).
const transport: ProviderTransport =
isCodexBaseUrl(rawBaseUrl) || (!rawBaseUrl && isCodexAlias(requestedModel))
shouldUseCodexTransport(requestedModel, rawBaseUrl)
? 'codex_responses'
: 'chat_completions'
@@ -337,6 +340,30 @@ export function resolveProviderRequest(options?: {
}
}
export function getAdditionalModelOptionsCacheScope(): string | null {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) &&
!isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
return 'firstParty'
}
return null
}
const request = resolveProviderRequest()
if (request.transport !== 'chat_completions') {
return null
}
if (!isLocalProviderUrl(request.baseUrl)) {
return null
}
return `openai:${request.baseUrl.toLowerCase()}`
}
export function resolveCodexAuthPath(
env: NodeJS.ProcessEnv = process.env,
): string {

View File

@@ -576,6 +576,7 @@ export type GlobalConfig = {
// Additional model options for the model picker (fetched during bootstrap).
additionalModelOptionsCache?: ModelOption[]
additionalModelOptionsCacheScope?: string
// Additional model options discovered from OpenAI-compatible endpoints.
openaiAdditionalModelOptionsCache?: ModelOption[]

View File

@@ -1,5 +1,6 @@
// biome-ignore-all assist/source/organizeImports: internal-only import markers must not be reordered
import { getInitialMainLoopModel } from '../../bootstrap/state.js'
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
import {
isClaudeAISubscriber,
isMaxSubscriber,
@@ -44,6 +45,25 @@ export type ModelOption = {
descriptionForModel?: string
}
function getScopedAdditionalModelOptions(): ModelOption[] {
const config = getGlobalConfig()
const activeScope = getAdditionalModelOptionsCacheScope()
if (!activeScope) {
return []
}
if (config.additionalModelOptionsCacheScope !== undefined) {
return config.additionalModelOptionsCacheScope === activeScope
? (config.additionalModelOptionsCache ?? [])
: []
}
return activeScope === 'firstParty'
? (config.additionalModelOptionsCache ?? [])
: []
}
export function getDefaultOptionForUser(fastMode = false): ModelOption {
if (process.env.USER_TYPE === 'ant') {
const currentModel = renderDefaultModelSetting(
@@ -408,6 +428,16 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
return standardOptions
}
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
const activeOpenAIOptions = getActiveOpenAIModelOptionsCache()
return [
getDefaultOptionForUser(fastMode),
...(activeOpenAIOptions.length > 0
? activeOpenAIOptions
: getScopedAdditionalModelOptions()),
]
}
// PAYG 1P API: Default (Sonnet) + Sonnet 1M + Opus 4.6 + Opus 1M + Haiku
if (getAPIProvider() === 'firstParty') {
const payg1POptions = [getDefaultOptionForUser(fastMode)]
@@ -566,13 +596,8 @@ export function getModelOptions(fastMode = false): ModelOption[] {
})
}
const additionalOptions =
getAPIProvider() === 'openai'
? getActiveOpenAIModelOptionsCache()
: getGlobalConfig().additionalModelOptionsCache ?? []
// Append additional model options fetched during bootstrap/endpoints.
for (const opt of additionalOptions) {
// Append additional model options fetched during bootstrap
for (const opt of getScopedAdditionalModelOptions()) {
if (!options.some(existing => existing.value === opt.value)) {
options.push(opt)
}

View File

@@ -23,9 +23,13 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'gpt-4.1-nano': 1_047_576,
'gpt-4-turbo': 128_000,
'gpt-4': 8_192,
'o1': 200_000,
'o1-mini': 128_000,
'o1-preview': 128_000,
'o1-pro': 200_000,
'o3': 200_000,
'o3-mini': 200_000,
'o4-mini': 200_000,
'o3': 200_000,
// DeepSeek (V3: 128k context per official docs)
'deepseek-chat': 128_000,
@@ -63,6 +67,9 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'phi4:14b': 16_384,
'gemma2:27b': 8_192,
'codellama:13b': 16_384,
'llama3.2:1b': 128_000,
'qwen3:8b': 128_000,
'codestral': 32_768,
}
/**
@@ -82,9 +89,13 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'gpt-4.1-nano': 32_768,
'gpt-4-turbo': 4_096,
'gpt-4': 4_096,
'o1': 100_000,
'o1-mini': 65_536,
'o1-preview': 32_768,
'o1-pro': 100_000,
'o3': 100_000,
'o3-mini': 100_000,
'o4-mini': 100_000,
'o3': 100_000,
// DeepSeek
'deepseek-chat': 8_192,
@@ -120,6 +131,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'phi4:14b': 4_096,
'gemma2:27b': 4_096,
'codellama:13b': 4_096,
'llama3.2:1b': 4_096,
'qwen3:8b': 8_192,
'codestral': 8_192,
}
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {

View File

@@ -7,6 +7,9 @@ const originalEnv = {
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
@@ -16,6 +19,9 @@ afterEach(() => {
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
async function importFreshProvidersModule() {
@@ -29,6 +35,9 @@ function clearProviderEnv(): void {
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.OPENAI_MODEL
}
test('first-party provider keeps Anthropic account setup flow enabled', () => {
@@ -69,3 +78,32 @@ test('GEMINI takes precedence over GitHub when both are set', async () => {
expect(getAPIProvider()).toBe('gemini')
})
test('explicit local openai-compatible base URLs stay on the openai provider', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('openai')
})
test('codex aliases still resolve to the codex provider without a non-codex base URL', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'codexplan'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('codex')
})
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
process.env.OPENAI_MODEL = 'gpt-5.4'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('openai')
})

View File

@@ -1,5 +1,5 @@
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
import { isCodexAlias } from '../../services/api/providerConfig.js'
import { shouldUseCodexTransport } from '../../services/api/providerConfig.js'
import { isEnvTruthy } from '../envUtils.js'
export type APIProvider =
@@ -34,11 +34,10 @@ export function usesAnthropicAccountFlow(): boolean {
return getAPIProvider() === 'firstParty'
}
function isCodexModel(): boolean {
const model = (process.env.OPENAI_MODEL || '').trim()
if (!model) return false
// Delegate to the canonical alias table in providerConfig to keep
// the two Codex detection systems (provider type + transport) in sync.
return isCodexAlias(model)
return shouldUseCodexTransport(
process.env.OPENAI_MODEL || '',
process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_BASE,
)
}
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {

View File

@@ -0,0 +1,71 @@
import { describe, expect, test } from 'bun:test'
import type { LoadedPlugin } from '../../types/plugin.js'
import { mergePluginSources } from './pluginLoader.js'
function marketplacePlugin(
name: string,
marketplace: string,
enabled: boolean,
): LoadedPlugin {
const pluginId = `${name}@${marketplace}`
return {
name,
manifest: { name } as LoadedPlugin['manifest'],
path: `/tmp/${pluginId}`,
source: pluginId,
repository: pluginId,
enabled,
}
}
describe('mergePluginSources', () => {
test('keeps the enabled copy when duplicate marketplace plugins disagree on enabled state', () => {
const enabledOfficial = marketplacePlugin(
'frontend-design',
'claude-plugins-official',
true,
)
const disabledLegacy = marketplacePlugin(
'frontend-design',
'claude-code-plugins',
false,
)
const result = mergePluginSources({
session: [],
marketplace: [disabledLegacy, enabledOfficial],
builtin: [],
})
expect(result.plugins).toEqual([enabledOfficial])
expect(result.errors).toEqual([])
})
test('keeps the later copy when duplicate marketplace plugins are both enabled', () => {
const legacy = marketplacePlugin(
'frontend-design',
'claude-code-plugins',
true,
)
const official = marketplacePlugin(
'frontend-design',
'claude-plugins-official',
true,
)
const result = mergePluginSources({
session: [],
marketplace: [legacy, official],
builtin: [],
})
expect(result.plugins).toEqual([official])
expect(result.errors).toHaveLength(1)
expect(result.errors[0]).toMatchObject({
type: 'generic-error',
source: legacy.source,
plugin: legacy.name,
})
})
})

View File

@@ -3045,24 +3045,63 @@ export function mergePluginSources(sources: {
})
const sessionNames = new Set(sessionPlugins.map(p => p.name))
const marketplacePlugins = sources.marketplace.filter(p => {
if (sessionNames.has(p.name)) {
// Different marketplaces can enable the same short plugin name, but
// downstream command/skill loading scopes by plugin.name.
const marketplacePluginsByName = new Map<string, LoadedPlugin>()
for (const plugin of sources.marketplace) {
if (sessionNames.has(plugin.name)) {
logForDebugging(
`Plugin "${p.name}" from --plugin-dir overrides installed version`,
`Plugin "${plugin.name}" from --plugin-dir overrides installed version`,
)
return false
continue
}
return true
const existing = marketplacePluginsByName.get(plugin.name)
if (!existing) {
marketplacePluginsByName.set(plugin.name, plugin)
continue
}
const winner = selectMarketplacePlugin(existing, plugin)
const dropped = winner === existing ? plugin : existing
marketplacePluginsByName.set(plugin.name, winner)
logForDebugging(
`Ignoring duplicate marketplace plugin "${plugin.name}" from ${dropped.source}; using ${winner.source}`,
{ level: 'warn' },
)
if (existing.enabled && plugin.enabled) {
errors.push({
type: 'generic-error',
source: dropped.source,
plugin: plugin.name,
error: `Duplicate marketplace plugin "${plugin.name}" ignored: using "${winner.source}" and skipping "${dropped.source}" to avoid short-name collisions`,
})
}
}
// Session first, then non-overridden marketplace, then builtin.
// Downstream first-match consumers see session plugins before
// installed ones for any that slipped past the name filter.
return {
plugins: [...sessionPlugins, ...marketplacePlugins, ...sources.builtin],
plugins: [
...sessionPlugins,
...marketplacePluginsByName.values(),
...sources.builtin,
],
errors,
}
}
function selectMarketplacePlugin(
current: LoadedPlugin,
candidate: LoadedPlugin,
): LoadedPlugin {
if (current.enabled !== candidate.enabled) {
return candidate.enabled ? candidate : current
}
return candidate
}
/**
* Main plugin loading function that discovers and loads all plugins.
*

View File

@@ -0,0 +1,78 @@
import { afterEach, expect, mock, test } from 'bun:test'
import {
getLocalOpenAICompatibleProviderLabel,
listOpenAICompatibleModels,
} from './providerDiscovery.js'
const originalFetch = globalThis.fetch
const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
}
afterEach(() => {
globalThis.fetch = originalFetch
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
})
test('lists models from a local openai-compatible /models endpoint', async () => {
globalThis.fetch = mock((input, init) => {
const url = typeof input === 'string' ? input : input.url
expect(url).toBe('http://localhost:1234/v1/models')
expect(init?.headers).toEqual({ Authorization: 'Bearer local-key' })
return Promise.resolve(
new Response(
JSON.stringify({
data: [
{ id: 'qwen2.5-coder-7b-instruct' },
{ id: 'llama-3.2-3b-instruct' },
{ id: 'qwen2.5-coder-7b-instruct' },
],
}),
{ status: 200 },
),
)
}) as typeof globalThis.fetch
await expect(
listOpenAICompatibleModels({
baseUrl: 'http://localhost:1234/v1',
apiKey: 'local-key',
}),
).resolves.toEqual([
'qwen2.5-coder-7b-instruct',
'llama-3.2-3b-instruct',
])
})
test('returns null when a local openai-compatible /models request fails', async () => {
globalThis.fetch = mock(() =>
Promise.resolve(new Response('not available', { status: 503 })),
) as typeof globalThis.fetch
await expect(
listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1' }),
).resolves.toBeNull()
})
test('detects LM Studio from the default localhost port', () => {
expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe(
'LM Studio',
)
})
test('detects common local openai-compatible providers by hostname', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'),
).toBe('LocalAI')
expect(
getLocalOpenAICompatibleProviderLabel('http://vllm.local:8000/v1'),
).toBe('vLLM')
})
test('falls back to a generic local openai-compatible label', () => {
expect(
getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'),
).toBe('Local OpenAI-compatible')
})

View File

@@ -1,4 +1,5 @@
import type { OllamaModelDescriptor } from './providerRecommendation.ts'
import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js'
export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'
export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337'
@@ -53,6 +54,64 @@ export function getAtomicChatChatBaseUrl(baseUrl?: string): string {
return `${getAtomicChatApiBaseUrl(baseUrl)}/v1`
}
export function getOpenAICompatibleModelsBaseUrl(baseUrl?: string): string {
return (
baseUrl || process.env.OPENAI_BASE_URL || DEFAULT_OPENAI_BASE_URL
).replace(/\/+$/, '')
}
export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string {
try {
const parsed = new URL(getOpenAICompatibleModelsBaseUrl(baseUrl))
const host = parsed.host.toLowerCase()
const hostname = parsed.hostname.toLowerCase()
const path = parsed.pathname.toLowerCase()
const haystack = `${hostname} ${path}`
if (
host.endsWith(':1234') ||
haystack.includes('lmstudio') ||
haystack.includes('lm-studio')
) {
return 'LM Studio'
}
if (host.endsWith(':11434') || haystack.includes('ollama')) {
return 'Ollama'
}
if (haystack.includes('localai')) {
return 'LocalAI'
}
if (haystack.includes('jan')) {
return 'Jan'
}
if (haystack.includes('kobold')) {
return 'KoboldCpp'
}
if (haystack.includes('llama.cpp') || haystack.includes('llamacpp')) {
return 'llama.cpp'
}
if (haystack.includes('vllm')) {
return 'vLLM'
}
if (
haystack.includes('open-webui') ||
haystack.includes('openwebui')
) {
return 'Open WebUI'
}
if (
haystack.includes('text-generation-webui') ||
haystack.includes('oobabooga')
) {
return 'text-generation-webui'
}
} catch {
// Fall back to the generic label when the base URL is malformed.
}
return 'Local OpenAI-compatible'
}
export async function hasLocalOllama(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200)
try {
@@ -111,6 +170,46 @@ export async function listOllamaModels(
}
}
export async function listOpenAICompatibleModels(options?: {
baseUrl?: string
apiKey?: string
}): Promise<string[] | null> {
const { signal, clear } = withTimeoutSignal(5000)
try {
const response = await fetch(
`${getOpenAICompatibleModelsBaseUrl(options?.baseUrl)}/models`,
{
method: 'GET',
headers: options?.apiKey
? {
Authorization: `Bearer ${options.apiKey}`,
}
: undefined,
signal,
},
)
if (!response.ok) {
return null
}
const data = (await response.json()) as {
data?: Array<{ id?: string }>
}
return Array.from(
new Set(
(data.data ?? [])
.filter(model => Boolean(model.id))
.map(model => model.id!),
),
)
} catch {
return null
} finally {
clear()
}
}
export async function hasLocalAtomicChat(baseUrl?: string): Promise<boolean> {
const { signal, clear } = withTimeoutSignal(1200)
try {