feat: add Alibaba Coding Plan (DashScope) provider support (#509)

* feat: add Alibaba Coding Plan provider presets

* fix: add DashScope presets to ProviderManager UI selection list

* feat: read DASHSCOPE_API_KEY env var for DashScope provider presets

* adds regression testing for alibaba models

* docs: add time descriptive comment

* feat(dashscope): add qwen3.6-plus model support

* fix(dashscope): remove MiniMax-M2.5 entries to prevent future key conflicts
This commit is contained in:
regisksc
2026-04-17 08:06:21 -03:00
committed by GitHub
parent 80a00acc2c
commit 43ac6dba75
4 changed files with 174 additions and 0 deletions

View File

@@ -1032,6 +1032,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: 'LM Studio',
description: 'Local LM Studio endpoint',
},
{
value: 'dashscope-cn',
label: 'Alibaba Coding Plan (China)',
description: 'Alibaba DashScope China endpoint',
},
{
value: 'dashscope-intl',
label: 'Alibaba Coding Plan',
description: 'Alibaba DashScope International endpoint',
},
{
value: 'custom',
label: 'Custom',

View File

@@ -141,3 +141,116 @@ test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output ca
upperLimit: 131_072,
})
})
test('DashScope qwen3.6-plus uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3.6-plus')).toBe(1_000_000)
expect(getModelMaxOutputTokens('qwen3.6-plus')).toEqual({
default: 65_536,
upperLimit: 65_536,
})
expect(getMaxOutputTokensForModel('qwen3.6-plus')).toBe(65_536)
})
test('DashScope qwen3.5-plus uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3.5-plus')).toBe(1_000_000)
expect(getModelMaxOutputTokens('qwen3.5-plus')).toEqual({
default: 65_536,
upperLimit: 65_536,
})
expect(getMaxOutputTokensForModel('qwen3.5-plus')).toBe(65_536)
})
test('DashScope qwen3-coder-plus uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3-coder-plus')).toBe(1_000_000)
expect(getModelMaxOutputTokens('qwen3-coder-plus')).toEqual({
default: 65_536,
upperLimit: 65_536,
})
})
test('DashScope qwen3-coder-next uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3-coder-next')).toBe(262_144)
expect(getModelMaxOutputTokens('qwen3-coder-next')).toEqual({
default: 65_536,
upperLimit: 65_536,
})
})
test('DashScope qwen3-max uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3-max')).toBe(262_144)
expect(getModelMaxOutputTokens('qwen3-max')).toEqual({
default: 32_768,
upperLimit: 32_768,
})
})
test('DashScope qwen3-max dated variant resolves to base entry via prefix match', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('qwen3-max-2026-01-23')).toBe(262_144)
expect(getModelMaxOutputTokens('qwen3-max-2026-01-23')).toEqual({
default: 32_768,
upperLimit: 32_768,
})
})
test('DashScope kimi-k2.5 uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('kimi-k2.5')).toBe(262_144)
expect(getModelMaxOutputTokens('kimi-k2.5')).toEqual({
default: 32_768,
upperLimit: 32_768,
})
})
test('DashScope glm-5 uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('glm-5')).toBe(202_752)
expect(getModelMaxOutputTokens('glm-5')).toEqual({
default: 16_384,
upperLimit: 16_384,
})
})
test('DashScope glm-4.7 uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('glm-4.7')).toBe(202_752)
expect(getModelMaxOutputTokens('glm-4.7')).toEqual({
default: 16_384,
upperLimit: 16_384,
})
})
test('DashScope models clamp oversized max output overrides to the provider limit', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '100000'
expect(getMaxOutputTokensForModel('qwen3.6-plus')).toBe(65_536)
expect(getMaxOutputTokensForModel('qwen3.5-plus')).toBe(65_536)
expect(getMaxOutputTokensForModel('qwen3-coder-next')).toBe(65_536)
expect(getMaxOutputTokensForModel('qwen3-max')).toBe(32_768)
expect(getMaxOutputTokensForModel('kimi-k2.5')).toBe(32_768)
expect(getMaxOutputTokensForModel('glm-5')).toBe(16_384)
})

View File

@@ -202,6 +202,21 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'llama3.2:1b': 128_000,
'qwen3:8b': 128_000,
'codestral': 32_768,
// Alibaba DashScope (Coding Plan)
// Model context windows from DashScope API /models endpoint (April 2026).
// Values sourced from: qwen3.5-plus/qwen3-coder-plus (1M), qwen3-coder-next/max (256K),
// kimi-k2.5 (256K), glm-5/glm-4.7 (198K).
// Max output tokens: Qwen variants (64K/32K), GLM (16K).
'qwen3.6-plus': 1_000_000,
'qwen3.5-plus': 1_000_000,
'qwen3-coder-plus': 1_000_000,
'qwen3-coder-next': 262_144,
'qwen3-max': 262_144,
'qwen3-max-2026-01-23': 262_144,
'kimi-k2.5': 262_144,
'glm-5': 202_752,
'glm-4.7': 202_752,
}
/**
@@ -330,6 +345,11 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'deepseek-r1:14b': 8_192,
'mistral:7b': 4_096,
'phi4:14b': 4_096,
'gemma2:27b': 4_096,
'codellama:13b': 4_096,
'llama3.2:1b': 4_096,
'qwen3:8b': 8_192,
'codestral': 8_192,
// NVIDIA NIM models
'nvidia/llama-3.1-nemotron-70b-instruct': 32_768,
@@ -356,6 +376,17 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'databricks/dbrx-instruct': 32_768,
'ai21labs/jamba-1.5-large-instruct': 32_768,
'01-ai/yi-large': 8_192,
// Alibaba DashScope (Coding Plan)
'qwen3.6-plus': 65_536,
'qwen3.5-plus': 65_536,
'qwen3-coder-plus': 65_536,
'qwen3-coder-next': 65_536,
'qwen3-max': 32_768,
'qwen3-max-2026-01-23': 32_768,
'kimi-k2.5': 32_768,
'glm-5': 16_384,
'glm-4.7': 16_384,
}
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {

View File

@@ -20,6 +20,8 @@ export type ProviderPreset =
| 'azure-openai'
| 'openrouter'
| 'lmstudio'
| 'dashscope-cn'
| 'dashscope-intl'
| 'custom'
| 'nvidia-nim'
| 'minimax'
@@ -220,6 +222,24 @@ export function getProviderPresetDefaults(
apiKey: '',
requiresApiKey: false,
}
case 'dashscope-cn':
return {
provider: 'openai',
name: 'Alibaba Coding Plan (China)',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
model: 'qwen3.6-plus',
apiKey: process.env.DASHSCOPE_API_KEY ?? '',
requiresApiKey: true,
}
case 'dashscope-intl':
return {
provider: 'openai',
name: 'Alibaba Coding Plan',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
model: 'qwen3.6-plus',
apiKey: process.env.DASHSCOPE_API_KEY ?? '',
requiresApiKey: true,
}
case 'custom':
return {
provider: 'openai',