Add Kimi Code provider preset and rename Moonshot API preset (#862)

* Add Kimi Code provider preset

* fix desc.

Co-authored-by: Copilot <copilot@github.com>

* more desc. fixes.

* Fix release validation tests

---------

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
JATMN
2026-04-24 21:36:54 -07:00
committed by GitHub
parent 26413f6d30
commit 9070220292
18 changed files with 296 additions and 56 deletions

View File

@@ -232,6 +232,17 @@ test('DashScope kimi-k2.5 uses provider-specific context and output caps', () =>
})
})
test('Kimi Code kimi-for-coding uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('kimi-for-coding')).toBe(262_144)
expect(getModelMaxOutputTokens('kimi-for-coding')).toEqual({
default: 32_768,
upperLimit: 32_768,
})
})
test('DashScope glm-5 uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS

View File

@@ -1,13 +1,32 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { saveGlobalConfig } from '../config.js'
import {
getDefaultHaikuModel,
getDefaultOpusModel,
getDefaultSonnetModel,
getSmallFastModel,
getUserSpecifiedModelSetting,
} from './model.js'
async function importFreshModelModule() {
mock.restore()
mock.module('./providers.js', () => ({
getAPIProvider: () => {
if (process.env.NVIDIA_NIM) return 'nvidia-nim'
if (process.env.MINIMAX_API_KEY) return 'minimax'
if (process.env.CLAUDE_CODE_USE_GEMINI) return 'gemini'
if (process.env.CLAUDE_CODE_USE_MISTRAL) return 'mistral'
if (process.env.CLAUDE_CODE_USE_GITHUB) return 'github'
if (process.env.CLAUDE_CODE_USE_OPENAI) {
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
const model = process.env.OPENAI_MODEL ?? ''
return baseUrl.includes('/backend-api/codex') || model.startsWith('codex')
? 'codex'
: 'openai'
}
if (process.env.CLAUDE_CODE_USE_BEDROCK) return 'bedrock'
if (process.env.CLAUDE_CODE_USE_VERTEX) return 'vertex'
if (process.env.CLAUDE_CODE_USE_FOUNDRY) return 'foundry'
return 'firstParty'
},
}))
const nonce = `${Date.now()}-${Math.random()}`
return import(`./model.js?ts=${nonce}`)
}
const SAVED_ENV = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
@@ -59,6 +78,7 @@ beforeEach(() => {
})
afterEach(() => {
mock.restore()
for (const key of Object.keys(SAVED_ENV) as Array<keyof typeof SAVED_ENV>) {
restoreEnv(key)
}
@@ -68,7 +88,7 @@ afterEach(() => {
}))
})
test('codex provider reads OPENAI_MODEL, not stale settings.model', () => {
test('codex provider reads OPENAI_MODEL, not stale settings.model', async () => {
// Regression: switching from Moonshot (settings.model='kimi-k2.6' persisted
// from that session) to the Codex profile. Codex profile correctly sets
// OPENAI_MODEL=codexplan + base URL to chatgpt.com/backend-api/codex.
@@ -82,44 +102,49 @@ test('codex provider reads OPENAI_MODEL, not stale settings.model', () => {
process.env.CODEX_API_KEY = 'codex-test'
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('codexplan')
})
test('nvidia-nim provider reads OPENAI_MODEL, not stale settings.model', () => {
test('nvidia-nim provider reads OPENAI_MODEL, not stale settings.model', async () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('minimax provider reads OPENAI_MODEL, not stale settings.model', () => {
test('minimax provider reads OPENAI_MODEL, not stale settings.model', async () => {
saveGlobalConfig(current => ({ ...current, model: 'kimi-k2.6' }))
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'MiniMax-M2.5'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('MiniMax-M2.5')
})
test('openai provider still reads OPENAI_MODEL (regression guard)', () => {
test('openai provider still reads OPENAI_MODEL (regression guard)', async () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'gpt-4o'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('gpt-4o')
})
test('github provider still reads OPENAI_MODEL (regression guard)', () => {
test('github provider still reads OPENAI_MODEL (regression guard)', async () => {
saveGlobalConfig(current => ({ ...current, model: 'stale-default' }))
process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = 'github:copilot'
const { getUserSpecifiedModelSetting } = await importFreshModelModule()
const model = getUserSpecifiedModelSetting()
expect(model).toBe('github:copilot')
})
@@ -131,54 +156,60 @@ test('github provider still reads OPENAI_MODEL (regression guard)', () => {
// because queryHaiku() shipped an unknown model id to the shim endpoint.
// ---------------------------------------------------------------------------
test('getSmallFastModel returns OPENAI_MODEL for MiniMax (regression: WebFetch hang)', () => {
test('getSmallFastModel returns OPENAI_MODEL for MiniMax (regression: WebFetch hang)', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('MiniMax-M2.5-highspeed')
})
test('getSmallFastModel returns OPENAI_MODEL for Codex (regression)', () => {
test('getSmallFastModel returns OPENAI_MODEL for Codex (regression)', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'https://chatgpt.com/backend-api/codex'
process.env.OPENAI_MODEL = 'codexspark'
process.env.CODEX_API_KEY = 'codex-test'
process.env.CHATGPT_ACCOUNT_ID = 'acct_test'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('codexspark')
})
test('getSmallFastModel returns OPENAI_MODEL for NVIDIA NIM (regression)', () => {
test('getSmallFastModel returns OPENAI_MODEL for NVIDIA NIM (regression)', async () => {
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getSmallFastModel } = await importFreshModelModule()
expect(getSmallFastModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('getDefaultOpusModel returns OPENAI_MODEL for MiniMax', () => {
test('getDefaultOpusModel returns OPENAI_MODEL for MiniMax', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
const { getDefaultOpusModel } = await importFreshModelModule()
expect(getDefaultOpusModel()).toBe('MiniMax-M2.7')
})
test('getDefaultSonnetModel returns OPENAI_MODEL for NVIDIA NIM', () => {
test('getDefaultSonnetModel returns OPENAI_MODEL for NVIDIA NIM', async () => {
process.env.NVIDIA_NIM = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_MODEL = 'nvidia/llama-3.1-nemotron-70b-instruct'
const { getDefaultSonnetModel } = await importFreshModelModule()
expect(getDefaultSonnetModel()).toBe('nvidia/llama-3.1-nemotron-70b-instruct')
})
test('getDefaultHaikuModel returns OPENAI_MODEL for MiniMax', () => {
test('getDefaultHaikuModel returns OPENAI_MODEL for MiniMax', async () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.5-highspeed'
const { getDefaultHaikuModel } = await importFreshModelModule()
expect(getDefaultHaikuModel()).toBe('MiniMax-M2.5-highspeed')
})
test('default helpers do not leak claude-* names to shim providers', () => {
test('default helpers do not leak claude-* names to shim providers', async () => {
// Umbrella guard: for each OpenAI-shim provider, none of the default-model
// helpers may return an Anthropic-branded model name. That was the source
// of the WebFetch 60s hang — MiniMax received "claude-haiku-4-5" and sat
@@ -186,6 +217,12 @@ test('default helpers do not leak claude-* names to shim providers', () => {
process.env.MINIMAX_API_KEY = 'minimax-test'
process.env.OPENAI_MODEL = 'MiniMax-M2.7'
const {
getSmallFastModel,
getDefaultOpusModel,
getDefaultSonnetModel,
getDefaultHaikuModel,
} = await importFreshModelModule()
for (const fn of [
getSmallFastModel,
getDefaultOpusModel,

View File

@@ -1,4 +1,5 @@
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { afterEach, beforeEach, expect, test } from 'bun:test'
import { mock } from 'bun:test'
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
import { saveGlobalConfig } from '../config.js'
@@ -39,6 +40,7 @@ beforeEach(() => {
})
afterEach(() => {
mock.restore()
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI

View File

@@ -236,6 +236,7 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
// Moonshot AI direct API (api.moonshot.ai/v1). Values from Moonshot's
// published model card — all K2 tier share 256K context. Prefix matching
// in lookupByKey catches variants like "kimi-k2.6-preview".
'kimi-for-coding': 262_144,
'kimi-k2.6': 262_144,
'kimi-k2': 131_072,
'kimi-k2-instruct': 131_072,
@@ -423,6 +424,7 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'glm-4.7': 16_384,
// Moonshot AI direct API
'kimi-for-coding': 32_768,
'kimi-k2.6': 32_768,
'kimi-k2': 32_768,
'kimi-k2-instruct': 32_768,

View File

@@ -81,13 +81,22 @@ test('detects common local openai-compatible providers by hostname', async () =>
).toBe('vLLM')
})
test('detects Moonshot (Kimi) from api.moonshot.ai hostname', async () => {
test('detects Moonshot AI - API from api.moonshot.ai hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
expect(
getLocalOpenAICompatibleProviderLabel('https://api.moonshot.ai/v1'),
).toBe('Moonshot (Kimi)')
).toBe('Moonshot AI - API')
})
test('detects Moonshot AI - Kimi Code from api.kimi.com/coding hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
expect(
getLocalOpenAICompatibleProviderLabel('https://api.kimi.com/coding/v1'),
).toBe('Moonshot AI - Kimi Code')
})
test('falls back to a generic local openai-compatible label', async () => {
@@ -360,4 +369,4 @@ test('atomic chat readiness returns loaded model ids when ready', async () => {
state: 'ready',
models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'],
})
})
})

View File

@@ -197,13 +197,21 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
if (host.includes('minimax') || haystack.includes('minimax')) {
return 'MiniMax'
}
// Kimi Code subscription API
if (hostname === 'api.kimi.com' && path.includes('/coding')) {
return 'Moonshot AI - Kimi Code'
}
// Check for Bankr LLM gateway
if (host.includes('bankr') || haystack.includes('bankr')) {
return 'Bankr'
}
// Moonshot AI (Kimi) direct API
if (host.includes('moonshot') || haystack.includes('moonshot') || haystack.includes('kimi')) {
return 'Moonshot (Kimi)'
// Moonshot AI direct API
if (
host.includes('moonshot') ||
haystack.includes('moonshot') ||
haystack.includes('kimi')
) {
return 'Moonshot AI - API'
}
} catch {
// Fall back to the generic label when the base URL is malformed.

View File

@@ -42,6 +42,11 @@ function profile(profile: ProfileFile['profile'], env: ProfileFile['env']): Prof
}
}
async function importFreshProviderProfileModule() {
const nonce = `${Date.now()}-${Math.random()}`
return import(`./providerProfile.ts?ts=${nonce}`)
}
const missingCodexAuthPath = join(tmpdir(), 'openclaude-missing-codex-auth.json')
test('matching persisted ollama env is reused for ollama launch', async () => {
@@ -630,6 +635,7 @@ test('buildStartupEnvFromProfile preserves explicit GitHub provider settings whe
})
test('applySavedProfileToCurrentSession can switch away from GitHub provider env', async () => {
const { applySavedProfileToCurrentSession } = await importFreshProviderProfileModule()
const processEnv = {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: 'github:copilot',

View File

@@ -591,6 +591,27 @@ describe('getProviderPresetDefaults', () => {
expect(defaults.requiresApiKey).toBe(false)
})
test('kimi-code preset defaults to the Kimi Code coding endpoint', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
const defaults = getProviderPresetDefaults('kimi-code')
expect(defaults.provider).toBe('openai')
expect(defaults.name).toBe('Moonshot AI - Kimi Code')
expect(defaults.baseUrl).toBe('https://api.kimi.com/coding/v1')
expect(defaults.model).toBe('kimi-for-coding')
expect(defaults.requiresApiKey).toBe(true)
})
test('moonshotai preset keeps the direct API under the renamed display label', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
const defaults = getProviderPresetDefaults('moonshotai')
expect(defaults.name).toBe('Moonshot AI - API')
expect(defaults.baseUrl).toBe('https://api.moonshot.ai/v1')
expect(defaults.model).toBe('kimi-k2.5')
})
test('deepseek preset defaults to DeepSeek V4 flash and exposes flash/pro aliases', async () => {
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()

View File

@@ -21,6 +21,7 @@ export type ProviderPreset =
| 'anthropic'
| 'ollama'
| 'openai'
| 'kimi-code'
| 'moonshotai'
| 'deepseek'
| 'gemini'
@@ -160,10 +161,19 @@ export function getProviderPresetDefaults(
apiKey: '',
requiresApiKey: true,
}
case 'kimi-code':
return {
provider: 'openai',
name: 'Moonshot AI - Kimi Code',
baseUrl: 'https://api.kimi.com/coding/v1',
model: 'kimi-for-coding',
apiKey: '',
requiresApiKey: true,
}
case 'moonshotai':
return {
provider: 'openai',
name: 'Moonshot AI',
name: 'Moonshot AI - API',
baseUrl: 'https://api.moonshot.ai/v1',
model: 'kimi-k2.5',
apiKey: '',