feat: add Alibaba Coding Plan (DashScope) provider support (#509)

* feat: add Alibaba Coding Plan provider presets

* fix: add DashScope presets to ProviderManager UI selection list

* feat: read DASHSCOPE_API_KEY env var for DashScope provider presets

* adds regression testing for alibaba models

* docs: add time descriptive comment

* feat(dashscope): add qwen3.6-plus model support

* fix(dashscope): remove MiniMax-M2.5 entries to prevent future key conflicts
This commit is contained in:
regisksc
2026-04-17 08:06:21 -03:00
committed by GitHub
parent 80a00acc2c
commit 43ac6dba75
4 changed files with 174 additions and 0 deletions

View File

@@ -202,6 +202,21 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'llama3.2:1b': 128_000,
'qwen3:8b': 128_000,
'codestral': 32_768,
// Alibaba DashScope (Coding Plan)
// Model context windows from DashScope API /models endpoint (April 2026).
// Values sourced from: qwen3.5-plus/qwen3-coder-plus (1M), qwen3-coder-next/max (256K),
// kimi-k2.5 (256K), glm-5/glm-4.7 (198K).
// Max output tokens: Qwen variants (64K/32K), GLM (16K).
'qwen3.6-plus': 1_000_000,
'qwen3.5-plus': 1_000_000,
'qwen3-coder-plus': 1_000_000,
'qwen3-coder-next': 262_144,
'qwen3-max': 262_144,
'qwen3-max-2026-01-23': 262_144,
'kimi-k2.5': 262_144,
'glm-5': 202_752,
'glm-4.7': 202_752,
}
/**
@@ -330,6 +345,11 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'deepseek-r1:14b': 8_192,
'mistral:7b': 4_096,
'phi4:14b': 4_096,
'gemma2:27b': 4_096,
'codellama:13b': 4_096,
'llama3.2:1b': 4_096,
'qwen3:8b': 8_192,
'codestral': 8_192,
// NVIDIA NIM models
'nvidia/llama-3.1-nemotron-70b-instruct': 32_768,
@@ -356,6 +376,17 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'databricks/dbrx-instruct': 32_768,
'ai21labs/jamba-1.5-large-instruct': 32_768,
'01-ai/yi-large': 8_192,
// Alibaba DashScope (Coding Plan)
'qwen3.6-plus': 65_536,
'qwen3.5-plus': 65_536,
'qwen3-coder-plus': 65_536,
'qwen3-coder-next': 65_536,
'qwen3-max': 32_768,
'qwen3-max-2026-01-23': 32_768,
'kimi-k2.5': 32_768,
'glm-5': 16_384,
'glm-4.7': 16_384,
}
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {