Feature: Add local OpenAI-compatible model discovery to /model (#201)

* Add local OpenAI-compatible model discovery to /model

* Guard local OpenAI model discovery from Codex routing

* Preserve remote OpenAI Codex alias behavior
This commit is contained in:
Technomancer702
2026-04-05 15:46:06 -07:00
committed by GitHub
parent 60d3d8961a
commit c534aa5771
15 changed files with 539 additions and 39 deletions

View File

@@ -0,0 +1,43 @@
import { afterEach, expect, mock, test } from 'bun:test'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_MODEL: process.env.OPENAI_MODEL,
}
afterEach(() => {
mock.restore()
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
})
test('opens the model picker without awaiting local model discovery refresh', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
process.env.OPENAI_MODEL = 'qwen2.5-coder-7b-instruct'
let resolveDiscovery: (() => void) | undefined
const discoverOpenAICompatibleModelOptions = mock(
() =>
new Promise<void>(resolve => {
resolveDiscovery = resolve
}),
)
mock.module('../../utils/model/openaiModelDiscovery.js', () => ({
discoverOpenAICompatibleModelOptions,
}))
const { call } = await import('./model.js')
const result = await Promise.race([
call(() => {}, {} as never, ''),
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
])
resolveDiscovery?.()
expect(result).not.toBe('timeout')
expect(discoverOpenAICompatibleModelOptions).toHaveBeenCalledTimes(1)
})

View File

@@ -4,6 +4,7 @@ import * as React from 'react';
import type { CommandResultDisplay } from '../../commands.js';
import { ModelPicker } from '../../components/ModelPicker.js';
import { COMMON_HELP_ARGS, COMMON_INFO_ARGS } from '../../constants/xml.js';
import { fetchBootstrapData } from '../../services/api/bootstrap.js';
import { type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS, logEvent } from '../../services/analytics/index.js';
import { useAppState, useSetAppState } from '../../state/AppState.js';
import type { LocalJSXCommandCall } from '../../types/command.js';
@@ -19,6 +20,7 @@ import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } fr
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
import { validateModel } from '../../utils/model/validateModel.js';
import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js';
function ModelPickerWrapper(t0) {
const $ = _c(17);
const {
@@ -319,7 +321,9 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
});
return <SetModelAndClose args={args} onDone={onDone} />;
}
await refreshOpenAIModelOptionsCache();
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
void refreshOpenAIModelOptionsCache();
}
return <ModelPickerWrapper onDone={onDone} />;
};
function renderModelLabel(model: string | null): string {

View File

@@ -197,6 +197,21 @@ test('buildProfileSaveMessage maps provider fields without echoing secrets', ()
expect(message).not.toContain('sk-secret-12345678')
})
test('buildProfileSaveMessage labels local openai-compatible profiles consistently', () => {
const message = buildProfileSaveMessage(
'openai',
{
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
'D:/codings/Opensource/openclaude/.openclaude-profile.json',
)
expect(message).toContain('Saved Local OpenAI-compatible profile.')
expect(message).toContain('Model: gpt-5.4')
expect(message).toContain('Endpoint: http://127.0.0.1:8080/v1')
})
test('buildProfileSaveMessage describes Gemini access token / ADC mode clearly', () => {
const message = buildProfileSaveMessage(
'gemini',
@@ -230,6 +245,36 @@ test('buildCurrentProviderSummary redacts poisoned model and endpoint values', (
expect(summary.endpointLabel).toBe('sk-...5678')
})
test('buildCurrentProviderSummary labels generic local openai-compatible providers', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'qwen2.5-coder-7b-instruct',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('qwen2.5-coder-7b-instruct')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Codex', () => {
const summary = buildCurrentProviderSummary({
processEnv: {
CLAUDE_CODE_USE_OPENAI: '1',
OPENAI_MODEL: 'gpt-5.4',
OPENAI_BASE_URL: 'http://127.0.0.1:8080/v1',
},
persisted: null,
})
expect(summary.providerLabel).toBe('Local OpenAI-compatible')
expect(summary.modelLabel).toBe('gpt-5.4')
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
})
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
const defaults = getProviderWizardDefaults({
OPENAI_API_KEY: 'sk-secret-12345678',

View File

@@ -15,6 +15,7 @@ import { Box, Text } from '../../ink.js'
import {
DEFAULT_CODEX_BASE_URL,
DEFAULT_OPENAI_BASE_URL,
isLocalProviderUrl,
resolveCodexApiCredentials,
resolveProviderRequest,
} from '../../services/api/providerConfig.js'
@@ -52,7 +53,11 @@ import {
recommendOllamaModel,
type RecommendationGoal,
} from '../../utils/providerRecommendation.js'
import { hasLocalOllama, listOllamaModels } from '../../utils/providerDiscovery.js'
import {
getLocalOpenAICompatibleProviderLabel,
hasLocalOllama,
listOllamaModels,
} from '../../utils/providerDiscovery.js'
type ProviderChoice = 'auto' | ProviderProfile | 'clear'
@@ -182,10 +187,8 @@ export function buildCurrentProviderSummary(options?: {
let providerLabel = 'OpenAI-compatible'
if (request.transport === 'codex_responses') {
providerLabel = 'Codex'
} else if (request.baseUrl.includes('localhost:11434')) {
providerLabel = 'Ollama'
} else if (request.baseUrl.includes('localhost:1234')) {
providerLabel = 'LM Studio'
} else if (isLocalProviderUrl(request.baseUrl)) {
providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl)
}
return {
@@ -272,16 +275,20 @@ function buildSavedProfileSummary(
),
}
case 'openai':
default:
default: {
const baseUrl = env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL
return {
providerLabel: 'OpenAI-compatible',
providerLabel: isLocalProviderUrl(baseUrl)
? getLocalOpenAICompatibleProviderLabel(baseUrl)
: 'OpenAI-compatible',
modelLabel: getSafeDisplayValue(
env.OPENAI_MODEL ?? 'gpt-4o',
process.env,
env,
),
endpointLabel: getSafeDisplayValue(
env.OPENAI_BASE_URL ?? DEFAULT_OPENAI_BASE_URL,
baseUrl,
process.env,
env,
),
@@ -290,6 +297,7 @@ function buildSavedProfileSummary(
? 'configured'
: undefined,
}
}
}
}