Add Kimi Code provider preset and rename Moonshot API preset (#862)

* Add Kimi Code provider preset

* fix desc.

Co-authored-by: Copilot <copilot@github.com>

* more desc. fixes.

* Fix release validation tests

---------

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
JATMN
2026-04-24 21:36:54 -07:00
committed by GitHub
parent 26413f6d30
commit 9070220292
18 changed files with 296 additions and 56 deletions

View File

@@ -118,7 +118,8 @@ const PRESET_ORDER = [
'LM Studio',
'MiniMax',
'Mistral',
'Moonshot AI',
'Moonshot AI - API',
'Moonshot AI - Kimi Code',
'NVIDIA NIM',
'Ollama',
'OpenAI',

View File

@@ -1312,8 +1312,13 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
},
{
value: 'moonshotai',
label: 'Moonshot AI',
description: 'Kimi OpenAI-compatible endpoint',
label: 'Moonshot AI - API',
description: 'Moonshot AI - API endpoint',
},
{
value: 'kimi-code',
label: 'Moonshot AI - Kimi Code',
description: 'Moonshot AI - Kimi Code Subscription endpoint',
},
{
value: 'nvidia-nim',

View File

@@ -101,9 +101,14 @@ describe('detectProvider — direct vendor endpoints', () => {
expect(detectProvider().name).toBe('DeepSeek')
})
test('api.moonshot.cn labels as Moonshot (Kimi)', () => {
test('api.kimi.com labels as Moonshot AI - Kimi Code', () => {
setupOpenAIMode('https://api.kimi.com/coding/v1', 'kimi-for-coding')
expect(detectProvider().name).toBe('Moonshot AI - Kimi Code')
})
test('api.moonshot.cn labels as Moonshot AI - API', () => {
setupOpenAIMode('https://api.moonshot.cn/v1', 'moonshot-v1-8k')
expect(detectProvider().name).toBe('Moonshot (Kimi)')
expect(detectProvider().name).toBe('Moonshot AI - API')
})
test('api.mistral.ai labels as Mistral', () => {
@@ -125,9 +130,14 @@ describe('detectProvider — rawModel fallback when URL is generic', () => {
expect(detectProvider().name).toBe('DeepSeek')
})
test('custom proxy + kimi-k2 falls back to Moonshot (Kimi)', () => {
test('custom proxy + kimi-for-coding falls back to Moonshot AI - Kimi Code', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-for-coding')
expect(detectProvider().name).toBe('Moonshot AI - Kimi Code')
})
test('custom proxy + kimi-k2 falls back to Moonshot AI - API', () => {
setupOpenAIMode('https://my-proxy.internal/v1', 'kimi-k2-instruct')
expect(detectProvider().name).toBe('Moonshot (Kimi)')
expect(detectProvider().name).toBe('Moonshot AI - API')
})
test('custom proxy + llama-3.3 falls back to Meta Llama', () => {

View File

@@ -134,13 +134,17 @@ export function detectProvider(): { name: string; model: string; baseUrl: string
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI'
else if (/nvidia/i.test(baseUrl)) name = 'NVIDIA NIM'
else if (/minimax/i.test(baseUrl)) name = 'MiniMax'
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot (Kimi)'
else if (/api\.kimi\.com/i.test(baseUrl)) name = 'Moonshot AI - Kimi Code'
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot AI - API'
else if (/deepseek/i.test(baseUrl)) name = 'DeepSeek'
else if (/mistral/i.test(baseUrl)) name = 'Mistral'
// rawModel fallback — fires only when base URL is generic/custom.
else if (/nvidia/i.test(rawModel)) name = 'NVIDIA NIM'
else if (/minimax/i.test(rawModel)) name = 'MiniMax'
else if (/kimi/i.test(rawModel)) name = 'Moonshot (Kimi)'
else if (/\bkimi-for-coding\b/i.test(rawModel))
name = 'Moonshot AI - Kimi Code'
else if (/\bkimi-k/i.test(rawModel) || /moonshot/i.test(rawModel))
name = 'Moonshot AI - API'
else if (/deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/mistral/i.test(rawModel)) name = 'Mistral'
else if (/llama/i.test(rawModel)) name = 'Meta Llama'

View File

@@ -3563,6 +3563,107 @@ test('Moonshot: cn host is also detected', async () => {
expect(requestBody?.store).toBeUndefined()
})
test('Kimi Code endpoint inherits Moonshot max_tokens/store compatibility', async () => {
process.env.OPENAI_BASE_URL = 'https://api.kimi.com/coding/v1'
process.env.OPENAI_API_KEY = 'sk-kimi-test'
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'kimi-for-coding',
choices: [
{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' },
],
usage: { prompt_tokens: 3, completion_tokens: 1, total_tokens: 4 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'kimi-for-coding',
system: 'you are kimi code',
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 256,
stream: false,
})
expect(requestBody?.max_tokens).toBe(256)
expect(requestBody?.max_completion_tokens).toBeUndefined()
expect(requestBody?.store).toBeUndefined()
})
test('Kimi Code endpoint echoes reasoning_content on assistant tool-call messages', async () => {
process.env.OPENAI_BASE_URL = 'https://api.kimi.com/coding/v1'
process.env.OPENAI_API_KEY = 'sk-kimi-test'
let requestBody: Record<string, unknown> | undefined
globalThis.fetch = (async (_input, init) => {
requestBody = JSON.parse(String(init?.body))
return new Response(
JSON.stringify({
id: 'chatcmpl-1',
model: 'kimi-for-coding',
choices: [
{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop' },
],
usage: { prompt_tokens: 3, completion_tokens: 1, total_tokens: 4 },
}),
{ headers: { 'Content-Type': 'application/json' } },
)
}) as FetchType
const client = createOpenAIShimClient({}) as OpenAIShimClient
await client.beta.messages.create({
model: 'kimi-for-coding',
system: 'you are kimi code',
messages: [
{ role: 'user', content: 'check the logs' },
{
role: 'assistant',
content: [
{
type: 'thinking',
thinking: 'Need to inspect logs via Bash; running a cat.',
},
{ type: 'text', text: "I'll inspect the logs." },
{
type: 'tool_use',
id: 'call_bash_1',
name: 'Bash',
input: { command: 'cat /tmp/app.log' },
},
],
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call_bash_1',
content: 'log line 1\nlog line 2',
},
],
},
],
max_tokens: 256,
stream: false,
})
const messages = requestBody?.messages as Array<Record<string, unknown>>
const assistantWithToolCall = messages.find(
m => m.role === 'assistant' && Array.isArray(m.tool_calls),
)
expect(assistantWithToolCall).toBeDefined()
expect(assistantWithToolCall?.reasoning_content).toBe(
'Need to inspect logs via Bash; running a cat.',
)
})
test('DeepSeek sends thinking toggle and normalized reasoning effort', async () => {
process.env.OPENAI_BASE_URL = 'https://api.deepseek.com/v1'
process.env.OPENAI_API_KEY = 'sk-deepseek'

View File

@@ -88,6 +88,7 @@ const MOONSHOT_API_HOSTS = new Set([
'api.moonshot.ai',
'api.moonshot.cn',
])
const KIMI_CODE_API_HOST = 'api.kimi.com'
const DEEPSEEK_API_HOSTS = new Set([
'api.deepseek.com',
])
@@ -156,10 +157,16 @@ function hasGeminiApiHost(baseUrl: string | undefined): boolean {
}
}
function isMoonshotBaseUrl(baseUrl: string | undefined): boolean {
function isMoonshotCompatibleBaseUrl(baseUrl: string | undefined): boolean {
if (!baseUrl) return false
try {
return MOONSHOT_API_HOSTS.has(new URL(baseUrl).hostname.toLowerCase())
const parsed = new URL(baseUrl)
const hostname = parsed.hostname.toLowerCase()
return (
MOONSHOT_API_HOSTS.has(hostname) ||
(hostname === KIMI_CODE_API_HOST &&
parsed.pathname.toLowerCase().startsWith('/coding'))
)
} catch {
return false
}
@@ -516,7 +523,7 @@ function convertMessages(
})(),
}
// Providers that validate reasoning continuity (Moonshot: "thinking
// Providers that validate reasoning continuity (Moonshot/Kimi Code: "thinking
// is enabled but reasoning_content is missing in assistant tool call
// message at index N" 400) need the original chain-of-thought echoed
// back on each assistant message that carries a tool_call. We kept
@@ -1504,12 +1511,13 @@ class OpenAIShimMessages {
request.resolvedModel,
)
const openaiMessages = convertMessages(compressedMessages, params.system, {
// Moonshot requires every assistant tool-call message to carry
// Moonshot/Kimi Code requires every assistant tool-call message to carry
// reasoning_content when its thinking feature is active. DeepSeek does
// the same for tool-call turns in thinking mode. Echo it back from the
// thinking block we captured on the inbound response.
preserveReasoningContent:
isMoonshotBaseUrl(request.baseUrl) || isDeepSeekBaseUrl(request.baseUrl),
isMoonshotCompatibleBaseUrl(request.baseUrl) ||
isDeepSeekBaseUrl(request.baseUrl),
})
const body: Record<string, unknown> = {
@@ -1546,7 +1554,7 @@ class OpenAIShimMessages {
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
const isMoonshot = isMoonshotBaseUrl(request.baseUrl)
const isMoonshot = isMoonshotCompatibleBaseUrl(request.baseUrl)
const isDeepSeek = isDeepSeekBaseUrl(request.baseUrl)
if ((isGithub || isMistral || isLocal || isMoonshot || isDeepSeek) && body.max_completion_tokens !== undefined) {
@@ -1556,9 +1564,10 @@ class OpenAIShimMessages {
// mistral and gemini don't recognize body.store — Gemini returns 400
// "Invalid JSON payload received. Unknown name 'store': Cannot find field."
// Moonshot (api.moonshot.ai/.cn) has not published support for the
// parameter either; strip it preemptively to avoid the same class of
// error on strict-parse providers.
// Moonshot direct API, Kimi Code's OpenAI-compatible coding endpoint,
// and DeepSeek have not published support for the parameter either;
// strip it preemptively to avoid the same class of error on strict-parse
// providers.
if (isMistral || isGeminiMode() || isMoonshot || isDeepSeek) {
delete body.store
}

View File

@@ -3,6 +3,8 @@ import { getAutoFixConfig } from './autoFixConfig.js'
import { shouldRunAutoFix, buildAutoFixContext } from './autoFixHook.js'
import { runAutoFixCheck } from './autoFixRunner.js'
const TEST_CWD = process.cwd()
describe('autoFix end-to-end flow', () => {
test('full flow: config → shouldRun → check → context', async () => {
const config = getAutoFixConfig({
@@ -19,7 +21,7 @@ describe('autoFix end-to-end flow', () => {
test: config!.test,
timeout: config!.timeout,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
@@ -39,7 +41,7 @@ describe('autoFix end-to-end flow', () => {
lint: config!.lint,
timeout: config!.timeout,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(false)
const context = buildAutoFixContext(result)

View File

@@ -5,13 +5,15 @@ import {
type AutoFixCheckOptions,
} from './autoFixRunner.js'
const TEST_CWD = process.cwd()
describe('runAutoFixCheck', () => {
test('returns success when lint command exits 0', async () => {
const result = await runAutoFixCheck({
lint: 'echo "all clean"',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(false)
expect(result.lintOutput).toContain('all clean')
@@ -23,7 +25,7 @@ describe('runAutoFixCheck', () => {
lint: 'echo "error: unused var" && exit 1',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
expect(result.lintOutput).toContain('unused var')
@@ -35,7 +37,7 @@ describe('runAutoFixCheck', () => {
test: 'echo "FAIL test_foo" && exit 1',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
expect(result.testOutput).toContain('FAIL test_foo')
@@ -48,7 +50,7 @@ describe('runAutoFixCheck', () => {
test: 'echo "test ok"',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(false)
expect(result.lintOutput).toContain('lint ok')
@@ -61,7 +63,7 @@ describe('runAutoFixCheck', () => {
test: 'echo "should not run"',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
expect(result.lintOutput).toContain('lint error')
@@ -73,7 +75,7 @@ describe('runAutoFixCheck', () => {
lint: 'node -e "setTimeout(() => {}, 10000)"',
timeout: 100,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
expect(result.timedOut).toBe(true)
@@ -83,7 +85,7 @@ describe('runAutoFixCheck', () => {
const result = await runAutoFixCheck({
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(false)
})
@@ -93,7 +95,7 @@ describe('runAutoFixCheck', () => {
lint: 'echo "src/foo.ts:10:5 error no-unused-vars" && exit 1',
timeout: 5000,
cwd: '/tmp',
cwd: TEST_CWD,
})
expect(result.hasErrors).toBe(true)
const summary = result.errorSummary

View File

@@ -55,5 +55,5 @@ test('sandbox auto-allow still enforces Bash path constraints', async () => {
expect(result.behavior).toBe('ask')
expect(result.message).toContain('was blocked')
expect(result.message).toContain('/etc/passwd')
expect(result.message).toContain('passwd')
})

View File

@@ -232,6 +232,17 @@ test('DashScope kimi-k2.5 uses provider-specific context and output caps', () =>
})
})
test('Kimi Code kimi-for-coding uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('kimi-for-coding')).toBe(262_144)
expect(getModelMaxOutputTokens('kimi-for-coding')).toEqual({
default: 32_768,
upperLimit: 32_768,
})
})
test('DashScope glm-5 uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS

View File

@@ -1,13 +1,32 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { saveGlobalConfig } from '../config.js'
import {
getDefaultHaikuModel,
getDefaultOpusModel,
getDefaultSonnetModel,
getSmallFastModel,
getUserSpecifiedModelSetting,
} from './model.js'
async function importFreshModelModule() {
mock.restore()
mock.module('./providers.js', () => ({
getAPIProvider: () => {
if (process.env.NVIDIA_NIM) return 'nvidia-nim'
if (process.env.MINIMAX_API_KEY) return 'minimax'
if (process.env.CLAUDE_CODE_USE_GEMINI) return 'gemini'
if (process.env.CLAUDE_CODE_USE_MISTRAL) return 'mistral'
if (process.env.CLAUDE_CODE_USE_GITHUB) return 'github'
if (process.env.CLAUDE_CODE_USE_OPENAI) {
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
const model = process.env.OPENAI_MODEL ?? ''
return baseUrl.includes('/backend-api/codex') || model.startsWith('codex')
? 'codex'
: 'openai'
}
if (process.env.CLAUDE_CODE_USE_BEDROCK) return 'bedrock'
if (process.env.CLAUDE_CODE_USE_VERTEX) return 'vertex'
if (process.env.CLAUDE_CODE_USE_FOUNDRY) return 'foundry'
return 'firstParty'
},
}))
const nonce = `${Date.now()}-${Math.random()}`
return import(`./model.js?ts=${nonce}`)
}
const SAVED_ENV = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
@@ -59,6 +78,7 @@ beforeEach(() => {
})
afterEach(() => {
mock.restore()
for (const key of Object.keys(SAVED_ENV) as Array<keyof typeof SAVED_ENV>) {
restoreEnv(key)
}
@@ -68,7 +88,7 @@ afterEach(() => {
}))
})
test('codex provider reads OPENAI_MODEL, not stale settings.model', () => {
test('codex provider reads OPENAI_MODEL, not stale settings.model', async () => {
// Regression: switching from Moonshot (settings.model='kimi-k2.6' persisted
// from that session) to the Codex profile. Codex profile correctly sets
// OPENAI_MODEL=codexplan + base URL to chatgpt.com/backend-api/codex.
@@ -82,44 +102,49 @@ test('codex provider reads OPENAI_MODEL, not stale settings.model', () => {
process.env.CODEX_API_KEY = 'codex-test'
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('codexplan')
})
test('nvidia-nim provider reads OPENAI_MODEL, not stale settings.model', () => {
test('nvidia-nim provider reads OPENAI_MODEL, not stale settings.model', async () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('minimax provider reads OPENAI_MODEL, not stale settings.model', () => {
test('minimax provider reads OPENAI_MODEL, not stale settings.model', async () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'MiniMax-M2.5'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('MiniMax-M2.5')
})
test('openai provider still reads OPENAI_MODEL (regression guard)', () => {
test('openai provider still reads OPENAI_MODEL (regression guard)', async () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'gpt-4o'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('gpt-4o')
})
test('github provider still reads OPENAI_MODEL (regression guard)', () => {
test('github provider still reads OPENAI_MODEL (regression guard)', async () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('github:copilot')
})
@@ -131,54 +156,60 @@ test('github provider still reads OPENAI_MODEL (regression guard)', () => {
// because queryHaiku() shipped an unknown model id to the shim endpoint.
// ---------------------------------------------------------------------------
test('getSmallFastModel returns OPENAI_MODEL for MiniMax (regression: WebFetch hang)', () => {
test('getSmallFastModel returns OPENAI_MODEL for MiniMax (regression: WebFetch hang)', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('MiniMax-M2.5-highspeed')
})
test('getSmallFastModel returns OPENAI_MODEL for Codex (regression)', () => {
test('getSmallFastModel returns OPENAI_MODEL for Codex (regression)', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://chatgpt.com/backend-api/codex'
process.env.OPENAI_MODEL = 'codexspark'
process.env.CODEX_API_KEY = 'codex-test'
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('codexspark')
})
test('getSmallFastModel returns OPENAI_MODEL for NVIDIA NIM (regression)', () => {
test('getSmallFastModel returns OPENAI_MODEL for NVIDIA NIM (regression)', async () => {
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('getDefaultOpusModel returns OPENAI_MODEL for MiniMax', () => {
test('getDefaultOpusModel returns OPENAI_MODEL for MiniMax', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
const { getDefaultOpusModel } = await importFreshModelModule()
expect(getDefaultOpusModel()).toBe('MiniMax-M2.7')
})
test('getDefaultSonnetModel returns OPENAI_MODEL for NVIDIA NIM', () => {
test('getDefaultSonnetModel returns OPENAI_MODEL for NVIDIA NIM', async () => {
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getDefaultSonnetModel } = await importFreshModelModule()
expect(getDefaultSonnetModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('getDefaultHaikuModel returns OPENAI_MODEL for MiniMax', () => {
test('getDefaultHaikuModel returns OPENAI_MODEL for MiniMax', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
const { getDefaultHaikuModel } = await importFreshModelModule()
expect(getDefaultHaikuModel()).toBe('MiniMax-M2.5-highspeed')
})
test('default helpers do not leak claude-* names to shim providers', () => {
test('default helpers do not leak claude-* names to shim providers', async () => {
// Umbrella guard: for each OpenAI-shim provider, none of the default-model
// helpers may return an Anthropic-branded model name. That was the source
// of the WebFetch 60s hang — MiniMax received "claude-haiku-4-5" and sat
@@ -186,6 +217,12 @@ test('default helpers do not leak claude-* names to shim providers', () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
const {
getSmallFastModel,
getDefaultOpusModel,
getDefaultSonnetModel,
getDefaultHaikuModel,
} = await importFreshModelModule()
for (const fn of [
getSmallFastModel,
getDefaultOpusModel,

View File

@@ -1,4 +1,5 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { mock } from 'bun:test'
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
import { saveGlobalConfig } from '../config.js'
@@ -39,6 +40,7 @@ beforeEach(() => {
})
afterEach(() => {
mock.restore()
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI

View File

@@ -236,6 +236,7 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
// Moonshot AI direct API (api.moonshot.ai/v1). Values from Moonshot's
// published model card — all K2 tier share 256K context. Prefix matching
// in lookupByKey catches variants like "kimi-k2.6-preview".
'kimi-for-coding': 262_144,
'kimi-k2.6': 262_144,
'kimi-k2': 131_072,
'kimi-k2-instruct': 131_072,
@@ -423,6 +424,7 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'glm-4.7': 16_384,
// Moonshot AI direct API
'kimi-for-coding': 32_768,
'kimi-k2.6': 32_768,
'kimi-k2': 32_768,
'kimi-k2-instruct': 32_768,

View File

@@ -81,13 +81,22 @@ test('detects common local openai-compatible providers by hostname', async () =>
).toBe('vLLM')
})
test('detects Moonshot (Kimi) from api.moonshot.ai hostname', async () => {
test('detects Moonshot AI - API from api.moonshot.ai hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
expect(
getLocalOpenAICompatibleProviderLabel('https://api.moonshot.ai/v1'),
).toBe('Moonshot (Kimi)')
).toBe('Moonshot AI - API')
})
test('detects Moonshot AI - Kimi Code from api.kimi.com/coding hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
expect(
getLocalOpenAICompatibleProviderLabel('https://api.kimi.com/coding/v1'),
).toBe('Moonshot AI - Kimi Code')
})
test('falls back to a generic local openai-compatible label', async () => {
@@ -360,4 +369,4 @@ test('atomic chat readiness returns loaded model ids when ready', async () => {
state: 'ready',
models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'],
})
})
})

View File

@@ -197,13 +197,21 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
if (host.includes('minimax') || haystack.includes('minimax')) {
return 'MiniMax'
}
// Kimi Code subscription API
if (hostname === 'api.kimi.com' && path.includes('/coding')) {
return 'Moonshot AI - Kimi Code'
}
// Check for Bankr LLM gateway
if (host.includes('bankr') || haystack.includes('bankr')) {
return 'Bankr'
}
// Moonshot AI (Kimi) direct API
if (host.includes('moonshot') || haystack.includes('moonshot') || haystack.includes('kimi')) {
return 'Moonshot (Kimi)'
// Moonshot AI direct API
if (
host.includes('moonshot') ||
haystack.includes('moonshot') ||
haystack.includes('kimi')
) {
return 'Moonshot AI - API'
}
} catch {
// Fall back to the generic label when the base URL is malformed.

View File

@@ -42,6 +42,11 @@ function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): Prof
}
}
async function importFreshProviderProfileModule() {
const nonce = `${Date.now()}-${Math.random()}`
return import(`./providerProfile.ts?ts=${nonce}`)
}
const missingCodexAuthPath = join(tmpdir(), 'openclaude-missing-codex-auth.json')
test('matching persisted ollama env is reused for ollama launch', async () => {
@@ -630,6 +635,7 @@ test('buildStartupEnvFromProfile preserves explicit GitHub provider settings whe
})
test('applySavedProfileToCurrentSession can switch away from GitHub provider env', async () => {
const { applySavedProfileToCurrentSession } = await importFreshProviderProfileModule()
const processEnv = {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: 'github:copilot',

View File

@@ -591,6 +591,27 @@ describe('getProviderPresetDefaults', () => {
expect(defaults.requiresApiKey).toBe(false)
})
test('kimi-code preset defaults to the Kimi Code coding endpoint', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
const defaults = getProviderPresetDefaults('kimi-code')
expect(defaults.provider).toBe('openai')
expect(defaults.name).toBe('Moonshot AI - Kimi Code')
expect(defaults.baseUrl).toBe('https://api.kimi.com/coding/v1')
expect(defaults.model).toBe('kimi-for-coding')
expect(defaults.requiresApiKey).toBe(true)
})
test('moonshotai preset keeps the direct API under the renamed display label', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
const defaults = getProviderPresetDefaults('moonshotai')
expect(defaults.name).toBe('Moonshot AI - API')
expect(defaults.baseUrl).toBe('https://api.moonshot.ai/v1')
expect(defaults.model).toBe('kimi-k2.5')
})
test('deepseek preset defaults to DeepSeek V4 flash and exposes flash/pro aliases', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()

View File

@@ -21,6 +21,7 @@ export type ProviderPreset =
| 'anthropic'
| 'ollama'
| 'openai'
| 'kimi-code'
| 'moonshotai'
| 'deepseek'
| 'gemini'
@@ -160,10 +161,19 @@ export function getProviderPresetDefaults(
apiKey: '',
requiresApiKey: true,
}
case 'kimi-code':
return {
provider: 'openai',
name: 'Moonshot AI - Kimi Code',
baseUrl: 'https://api.kimi.com/coding/v1',
model: 'kimi-for-coding',
apiKey: '',
requiresApiKey: true,
}
case 'moonshotai':
return {
provider: 'openai',
name: 'Moonshot AI',
name: 'Moonshot AI - API',
baseUrl: 'https://api.moonshot.ai/v1',
model: 'kimi-k2.5',
apiKey: '',