feat: add NVIDIA NIM and MiniMax provider support (#552)

* feat: add NVIDIA NIM and MiniMax provider support

- Add nvidia-nim and minimax to --provider CLI flag
- Add model discovery for NVIDIA NIM (160+ models) and MiniMax
- Update /model picker to show provider-specific models
- Fix provider detection in startup banner
- Update .env.example with new provider options

Supported providers:
- NVIDIA NIM: https://integrate.api.nvidia.com/v1
- MiniMax: https://api.minimax.io/v1

* fix: resolve conflict in StartupScreen (keep NVIDIA/MiniMax + add Codex detection)

* fix: resolve providerProfile conflict (add imports from main, keep NVIDIA/MiniMax)

* fix: revert providerSecrets to match main (NVIDIA/MiniMax handled elsewhere)

* fix: add context window entries for NVIDIA NIM and new MiniMax models

* fix: use GLM-5 as NVIDIA NIM default and MiniMax-M2.5 for consistency

* fix: address remaining review items - add GLM/Kimi context entries, max output tokens, fix .env.example, revert to Nemotron default

* fix: filter NVIDIA NIM picker to chat/instruct models only, set provider-specific API keys from saved profiles

* chore: add more NVIDIA NIM context window entries for popular models

* fix: address remaining non-blocking items - fix base model, clear provider API keys on profile switch
This commit is contained in:
ArkhAngelLifeJiggy
2026-04-15 13:26:13 +01:00
committed by GitHub
parent 6b2121da12
commit 51191d6132
15 changed files with 628 additions and 70 deletions

View File

@@ -225,6 +225,30 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here
# GOOGLE_CLOUD_PROJECT=your-gcp-project-id # GOOGLE_CLOUD_PROJECT=your-gcp-project-id
# -----------------------------------------------------------------------------
# Option 9: NVIDIA NIM
# -----------------------------------------------------------------------------
# NVIDIA NIM provides hosted inference endpoints for NVIDIA models.
# Get your API key from https://build.nvidia.com/
#
# CLAUDE_CODE_USE_OPENAI=1
# NVIDIA_API_KEY=nvapi-your-key-here
# OPENAI_BASE_URL=https://integrate.api.nvidia.com/v1
# OPENAI_MODEL=nvidia/llama-3.1-nemotron-70b-instruct
# -----------------------------------------------------------------------------
# Option 10: MiniMax
# -----------------------------------------------------------------------------
# MiniMax API provides text generation models.
# Get your API key from https://platform.minimax.io/
#
# CLAUDE_CODE_USE_OPENAI=1
# MINIMAX_API_KEY=your-minimax-key-here
# OPENAI_BASE_URL=https://api.minimax.io/v1
# OPENAI_MODEL=MiniMax-M2.5
# ============================================================================= # =============================================================================
# OPTIONAL TUNING # OPTIONAL TUNING
# ============================================================================= # =============================================================================

View File

@@ -1009,6 +1009,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: 'Custom', label: 'Custom',
description: 'Any OpenAI-compatible provider', description: 'Any OpenAI-compatible provider',
}, },
{
value: 'nvidia-nim',
label: 'NVIDIA NIM',
description: 'NVIDIA NIM endpoint',
},
{
value: 'minimax',
label: 'MiniMax',
description: 'MiniMax API endpoint',
},
...(mode === 'first-run' ...(mode === 'first-run'
? [ ? [
{ {

View File

@@ -117,17 +117,28 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
const baseUrl = resolvedRequest.baseUrl const baseUrl = resolvedRequest.baseUrl
const isLocal = isLocalProviderUrl(baseUrl) const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI' let name = 'OpenAI'
// Override to Codex when resolved endpoint is Codex if (/nvidia/i.test(baseUrl) || /nvidia/i.test(rawModel) || process.env.NVIDIA_NIM)
if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex')) { name = 'NVIDIA NIM'
else if (/minimax/i.test(baseUrl) || /minimax/i.test(rawModel) || process.env.MINIMAX_API_KEY)
name = 'MiniMax'
else if (resolvedRequest.transport === 'codex_responses' || baseUrl.includes('chatgpt.com/backend-api/codex'))
name = 'Codex' name = 'Codex'
} else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek' else if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel))
else if (/openrouter/i.test(baseUrl)) name = 'OpenRouter' name = 'DeepSeek'
else if (/together/i.test(baseUrl)) name = 'Together AI' else if (/openrouter/i.test(baseUrl))
else if (/groq/i.test(baseUrl)) name = 'Groq' name = 'OpenRouter'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel)) name = 'Mistral' else if (/together/i.test(baseUrl))
else if (/azure/i.test(baseUrl)) name = 'Azure OpenAI' name = 'Together AI'
else if (/llama/i.test(rawModel)) name = 'Meta Llama' else if (/groq/i.test(baseUrl))
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl) name = 'Groq'
else if (/mistral/i.test(baseUrl) || /mistral/i.test(rawModel))
name = 'Mistral'
else if (/azure/i.test(baseUrl))
name = 'Azure OpenAI'
else if (/llama/i.test(rawModel))
name = 'Meta Llama'
else if (isLocal)
name = getLocalOpenAICompatibleProviderLabel(baseUrl)
// Resolve model alias to actual model name + reasoning effort // Resolve model alias to actual model name + reasoning effort
let displayModel = resolvedRequest.resolvedModel let displayModel = resolvedRequest.resolvedModel

View File

@@ -1360,8 +1360,12 @@ class OpenAIShimMessages {
...filterAnthropicHeaders(options?.headers), ...filterAnthropicHeaders(options?.headers),
} }
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) const isGemini = isGeminiMode()
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? '' const isMiniMax = !!process.env.MINIMAX_API_KEY
const apiKey =
this.providerOverride?.apiKey ??
process.env.OPENAI_API_KEY ??
(isMiniMax ? process.env.MINIMAX_API_KEY : '')
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via // Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
// path segments like https://evil.com/cognitiveservices.azure.com/ // path segments like https://evil.com/cognitiveservices.azure.com/
let isAzure = false let isAzure = false

View File

@@ -37,6 +37,8 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
gemini: 'gemini-2.0-flash', gemini: 'gemini-2.0-flash',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_3_5_V2_SONNET_CONFIG = { export const CLAUDE_3_5_V2_SONNET_CONFIG = {
@@ -48,6 +50,8 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
gemini: 'gemini-2.0-flash', gemini: 'gemini-2.0-flash',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_3_5_HAIKU_CONFIG = { export const CLAUDE_3_5_HAIKU_CONFIG = {
@@ -59,6 +63,8 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
gemini: 'gemini-2.0-flash-lite', gemini: 'gemini-2.0-flash-lite',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_HAIKU_4_5_CONFIG = { export const CLAUDE_HAIKU_4_5_CONFIG = {
@@ -70,6 +76,8 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
gemini: 'gemini-2.0-flash-lite', gemini: 'gemini-2.0-flash-lite',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_CONFIG = { export const CLAUDE_SONNET_4_CONFIG = {
@@ -81,6 +89,8 @@ export const CLAUDE_SONNET_4_CONFIG = {
gemini: 'gemini-2.0-flash', gemini: 'gemini-2.0-flash',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_5_CONFIG = { export const CLAUDE_SONNET_4_5_CONFIG = {
@@ -92,6 +102,8 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
gemini: 'gemini-2.0-flash', gemini: 'gemini-2.0-flash',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_CONFIG = { export const CLAUDE_OPUS_4_CONFIG = {
@@ -103,6 +115,8 @@ export const CLAUDE_OPUS_4_CONFIG = {
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro-preview-03-25',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_1_CONFIG = { export const CLAUDE_OPUS_4_1_CONFIG = {
@@ -114,6 +128,8 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro-preview-03-25',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_5_CONFIG = { export const CLAUDE_OPUS_4_5_CONFIG = {
@@ -125,6 +141,8 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro-preview-03-25',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_OPUS_4_6_CONFIG = { export const CLAUDE_OPUS_4_6_CONFIG = {
@@ -136,6 +154,8 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
gemini: 'gemini-2.5-pro-preview-03-25', gemini: 'gemini-2.5-pro-preview-03-25',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
export const CLAUDE_SONNET_4_6_CONFIG = { export const CLAUDE_SONNET_4_6_CONFIG = {
@@ -147,6 +167,8 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
gemini: 'gemini-2.0-flash', gemini: 'gemini-2.0-flash',
github: 'github:copilot', github: 'github:copilot',
codex: 'gpt-5.4', codex: 'gpt-5.4',
'nvidia-nim': 'nvidia/llama-3.1-nemotron-70b-instruct',
minimax: 'MiniMax-M2.5',
} as const satisfies ModelConfig } as const satisfies ModelConfig
// @[MODEL LAUNCH]: Register the new config here. // @[MODEL LAUNCH]: Register the new config here.

View File

@@ -0,0 +1,46 @@
/**
* MiniMax model list for the /model picker.
* Full model catalog from MiniMax API.
*/
import type { ModelOption } from './modelOptions.js'
import { getAPIProvider } from './providers.js'
import { isEnvTruthy } from '../envUtils.js'
export function isMiniMaxProvider(): boolean {
if (isEnvTruthy(process.env.MINIMAX_API_KEY)) {
return true
}
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
if (baseUrl.includes('minimax')) {
return true
}
return getAPIProvider() === 'minimax'
}
function getMiniMaxModels(): ModelOption[] {
return [
// Latest Generation Models - use correct MiniMax naming with M prefix
{ value: 'MiniMax-M2', label: 'MiniMax M2', description: 'MoE model - 131K context - Chat/Code/Reasoning' },
{ value: 'MiniMax-M2.1', label: 'MiniMax M2.1', description: 'Enhanced - 200K context - Vision' },
{ value: 'MiniMax-M2.5', label: 'MiniMax M2.5', description: 'Flagship - 256K context - Vision/Function-calling' },
{ value: 'MiniMax-Text-01', label: 'MiniMax Text 01', description: 'Text-focused - 512K context - FREE' },
{ value: 'MiniMax-Text-01-Preview', label: 'MiniMax Text 01 Preview', description: 'Preview - 256K context - FREE' },
{ value: 'MiniMax-Vision-01', label: 'MiniMax Vision 01', description: 'Vision model - 32K context' },
{ value: 'MiniMax-Vision-01-Fast', label: 'MiniMax Vision 01 Fast', description: 'Fast vision - 16K context - FREE' },
// Legacy free tier models
{ value: 'abab6.5s-chat', label: 'ABAB 6.5S Chat', description: 'Legacy free - 16K context' },
{ value: 'abab6.5-chat', label: 'ABAB 6.5 Chat', description: 'Legacy free - 32K context' },
{ value: 'abab6.5g-chat', label: 'ABAB 6.5G Chat', description: 'Generation 6.5 - 32K context' },
{ value: 'abab6-chat', label: 'ABAB 6 Chat', description: 'Legacy - 8K context' },
]
}
let cachedMiniMaxOptions: ModelOption[] | null = null
export function getCachedMiniMaxModelOptions(): ModelOption[] {
if (!cachedMiniMaxOptions) {
cachedMiniMaxOptions = getMiniMaxModels()
}
return cachedMiniMaxOptions
}

View File

@@ -35,6 +35,8 @@ import { has1mContext } from '../context.js'
import { getGlobalConfig } from '../config.js' import { getGlobalConfig } from '../config.js'
import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js' import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js'
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js' import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
import { getCachedNvidiaNimModelOptions, isNvidiaNimProvider } from './nvidiaNimModels.js'
import { getCachedMiniMaxModelOptions, isMiniMaxProvider } from './minimaxModels.js'
import { getAntModels } from './antModels.js' import { getAntModels } from './antModels.js'
// @[MODEL LAUNCH]: Update all the available and default model option strings below. // @[MODEL LAUNCH]: Update all the available and default model option strings below.
@@ -390,6 +392,26 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
return [defaultOption] return [defaultOption]
} }
// When using NVIDIA NIM, show models from the NVIDIA catalog
if (isNvidiaNimProvider()) {
const defaultOption = getDefaultOptionForUser(fastMode)
const nvidiaModels = getCachedNvidiaNimModelOptions()
if (nvidiaModels.length > 0) {
return [defaultOption, ...nvidiaModels]
}
return [defaultOption]
}
// When using MiniMax, show models from the MiniMax catalog
if (isMiniMaxProvider()) {
const defaultOption = getDefaultOptionForUser(fastMode)
const minimaxModels = getCachedMiniMaxModelOptions()
if (minimaxModels.length > 0) {
return [defaultOption, ...minimaxModels]
}
return [defaultOption]
}
if (process.env.USER_TYPE === 'ant') { if (process.env.USER_TYPE === 'ant') {
// Build options from antModels config // Build options from antModels config
const antModelOptions: ModelOption[] = getAntModels().map(m => ({ const antModelOptions: ModelOption[] = getAntModels().map(m => ({

View File

@@ -0,0 +1,161 @@
/**
* NVIDIA NIM model list for the /model picker.
* Filtered to chat/instruct models only - embedding, reward, safety, vision, etc. excluded.
*/
import type { ModelOption } from './modelOptions.js'
import { getAPIProvider } from './providers.js'
import { isEnvTruthy } from '../envUtils.js'
export function isNvidiaNimProvider(): boolean {
// Check if explicitly set via NVIDIA_NIM or via provider flag
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
return true
}
// Also check if using NVIDIA NIM endpoint
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
if (baseUrl.includes('nvidia') || baseUrl.includes('integrate.api.nvidia')) {
return true
}
return getAPIProvider() === 'nvidia-nim'
}
function getNvidiaNimModels(): ModelOption[] {
return [
// AGENTIC REASONING MODELS
{ value: 'nvidia/cosmos-reason2-8b', label: 'Cosmos Reason 2 8B', description: 'Reasoning' },
{ value: 'microsoft/phi-4-mini-flash-reasoning', label: 'Phi 4 Mini Flash Reasoning', description: 'Reasoning' },
{ value: 'qwen/qwen3-next-80b-a3b-thinking', label: 'Qwen 3 Next 80B Thinking', description: 'Reasoning' },
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-32b', label: 'DeepSeek R1 Qwen 32B', description: 'Reasoning' },
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-14b', label: 'DeepSeek R1 Qwen 14B', description: 'Reasoning' },
{ value: 'deepseek-ai/deepseek-r1-distill-qwen-7b', label: 'DeepSeek R1 Qwen 7B', description: 'Reasoning' },
{ value: 'deepseek-ai/deepseek-r1-distill-llama-8b', label: 'DeepSeek R1 Llama 8B', description: 'Reasoning' },
{ value: 'qwen/qwq-32b', label: 'QwQ 32B Reasoning', description: 'Reasoning' },
// CODE MODELS
{ value: 'meta/codellama-70b', label: 'CodeLlama 70B', description: 'Code' },
{ value: 'bigcode/starcoder2-15b', label: 'StarCoder2 15B', description: 'Code' },
{ value: 'bigcode/starcoder2-7b', label: 'StarCoder2 7B', description: 'Code' },
{ value: 'mistralai/codestral-22b-instruct-v0.1', label: 'Codestral 22B', description: 'Code' },
{ value: 'mistralai/mamba-codestral-7b-v0.1', label: 'Mamba Codestral 7B', description: 'Code' },
{ value: 'deepseek-ai/deepseek-coder-6.7b-instruct', label: 'DeepSeek Coder 6.7B', description: 'Code' },
{ value: 'google/codegemma-7b', label: 'CodeGemma 7B', description: 'Code' },
{ value: 'google/codegemma-1.1-7b', label: 'CodeGemma 1.1 7B', description: 'Code' },
{ value: 'qwen/qwen2.5-coder-32b-instruct', label: 'Qwen 2.5 Coder 32B', description: 'Code' },
{ value: 'qwen/qwen2.5-coder-7b-instruct', label: 'Qwen 2.5 Coder 7B', description: 'Code' },
{ value: 'qwen/qwen3-coder-480b-a35b-instruct', label: 'Qwen 3 Coder 480B', description: 'Code' },
{ value: 'ibm/granite-34b-code-instruct', label: 'Granite 34B Code', description: 'Code' },
{ value: 'ibm/granite-8b-code-instruct', label: 'Granite 8B Code', description: 'Code' },
// NEMOTRON MODELS - NVIDIA Flagship
{ value: 'nvidia/llama-3.1-nemotron-70b-instruct', label: 'Nemotron 70B Instruct', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.1-nemotron-51b-instruct', label: 'Nemotron 51B Instruct', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.1-nemotron-ultra-253b-v1', label: 'Nemotron Ultra 253B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.3-nemotron-super-49b-v1', label: 'Nemotron Super 49B v1', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.3-nemotron-super-49b-v1.5', label: 'Nemotron Super 49B v1.5', description: 'NVIDIA Flagship' },
{ value: 'nvidia/nemotron-4-340b-instruct', label: 'Nemotron 4 340B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/nemotron-3-super-120b-a12b', label: 'Nemotron 3 Super 120B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/nemotron-3-nano-30b-a3b', label: 'Nemotron 3 Nano 30B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/nemotron-mini-4b-instruct', label: 'Nemotron Mini 4B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.1-nemotron-nano-8b-v1', label: 'Nemotron Nano 8B', description: 'NVIDIA Flagship' },
{ value: 'nvidia/llama-3.1-nemotron-nano-4b-v1.1', label: 'Nemotron Nano 4B v1.1', description: 'NVIDIA Flagship' },
// CHATQA MODELS
{ value: 'nvidia/llama3-chatqa-1.5-70b', label: 'Llama3 ChatQA 1.5 70B', description: 'Chat' },
{ value: 'nvidia/llama3-chatqa-1.5-8b', label: 'Llama3 ChatQA 1.5 8B', description: 'Chat' },
// META LLAMA MODELS
{ value: 'meta/llama-3.1-405b-instruct', label: 'Llama 3.1 405B', description: 'Meta Llama' },
{ value: 'meta/llama-3.1-70b-instruct', label: 'Llama 3.1 70B', description: 'Meta Llama' },
{ value: 'meta/llama-3.1-8b-instruct', label: 'Llama 3.1 8B', description: 'Meta Llama' },
{ value: 'meta/llama-3.2-90b-vision-instruct', label: 'Llama 3.2 90B Vision', description: 'Meta Llama' },
{ value: 'meta/llama-3.2-11b-vision-instruct', label: 'Llama 3.2 11B Vision', description: 'Meta Llama' },
{ value: 'meta/llama-3.2-3b-instruct', label: 'Llama 3.2 3B', description: 'Meta Llama' },
{ value: 'meta/llama-3.2-1b-instruct', label: 'Llama 3.2 1B', description: 'Meta Llama' },
{ value: 'meta/llama-3.3-70b-instruct', label: 'Llama 3.3 70B', description: 'Meta Llama' },
{ value: 'meta/llama-4-maverick-17b-128e-instruct', label: 'Llama 4 Maverick 17B', description: 'Meta Llama' },
{ value: 'meta/llama-4-scout-17b-16e-instruct', label: 'Llama 4 Scout 17B', description: 'Meta Llama' },
// GOOGLE GEMMA MODELS (text only - no vision)
{ value: 'google/gemma-4-31b-it', label: 'Gemma 4 31B', description: 'Google Gemma' },
{ value: 'google/gemma-3-27b-it', label: 'Gemma 3 27B', description: 'Google Gemma' },
{ value: 'google/gemma-3-12b-it', label: 'Gemma 3 12B', description: 'Google Gemma' },
{ value: 'google/gemma-3-4b-it', label: 'Gemma 3 4B', description: 'Google Gemma' },
{ value: 'google/gemma-3-1b-it', label: 'Gemma 3 1B', description: 'Google Gemma' },
{ value: 'google/gemma-3n-e4b-it', label: 'Gemma 3N E4B', description: 'Google Gemma' },
{ value: 'google/gemma-3n-e2b-it', label: 'Gemma 3N E2B', description: 'Google Gemma' },
{ value: 'google/gemma-2-27b-it', label: 'Gemma 2 27B', description: 'Google Gemma' },
{ value: 'google/gemma-2-9b-it', label: 'Gemma 2 9B', description: 'Google Gemma' },
{ value: 'google/gemma-2-2b-it', label: 'Gemma 2 2B', description: 'Google Gemma' },
// MISTRAL MODELS
{ value: 'mistralai/mistral-large-3-675b-instruct-2512', label: 'Mistral Large 3 675B', description: 'Mistral' },
{ value: 'mistralai/mistral-large-2-instruct', label: 'Mistral Large 2', description: 'Mistral' },
{ value: 'mistralai/mistral-large', label: 'Mistral Large', description: 'Mistral' },
{ value: 'mistralai/mistral-medium-3-instruct', label: 'Mistral Medium 3', description: 'Mistral' },
{ value: 'mistralai/mistral-small-4-119b-2603', label: 'Mistral Small 4 119B', description: 'Mistral' },
{ value: 'mistralai/mistral-small-3.1-24b-instruct-2503', label: 'Mistral Small 3.1 24B', description: 'Mistral' },
{ value: 'mistralai/mistral-small-24b-instruct', label: 'Mistral Small 24B', description: 'Mistral' },
{ value: 'mistralai/mistral-7b-instruct-v0.3', label: 'Mistral 7B v0.3', description: 'Mistral' },
{ value: 'mistralai/mistral-7b-instruct-v0.2', label: 'Mistral 7B v0.2', description: 'Mistral' },
{ value: 'mistralai/mixtral-8x22b-instruct-v0.1', label: 'Mixtral 8x22B', description: 'Mistral' },
{ value: 'mistralai/mixtral-8x22b-instruct-v0.1', label: 'Mixtral 8x22B Instruct', description: 'Mistral' },
{ value: 'mistralai/mixtral-8x7b-instruct-v0.1', label: 'Mixtral 8x7B', description: 'Mistral' },
{ value: 'mistralai/mistral-nemotron', label: 'Mistral Nemotron', description: 'Mistral' },
{ value: 'mistralai/mathstral-7b-v0.1', label: 'Mathstral 7B', description: 'Math' },
{ value: 'mistralai/ministral-14b-instruct-2512', label: 'Ministral 14B', description: 'Mistral' },
{ value: 'mistralai/devstral-2-123b-instruct-2512', label: 'Devstral 2 123B', description: 'Code' },
{ value: 'mistralai/magistral-small-2506', label: 'Magistral Small', description: 'Mistral' },
// MICROSOFT PHI MODELS (text only - no vision)
{ value: 'microsoft/phi-4-multimodal-instruct', label: 'Phi 4 Multimodal', description: 'Multimodal' },
{ value: 'microsoft/phi-4-mini-instruct', label: 'Phi 4 Mini', description: 'Phi' },
{ value: 'microsoft/phi-3.5-mini-instruct', label: 'Phi 3.5 Mini', description: 'Phi' },
{ value: 'microsoft/phi-3-small-128k-instruct', label: 'Phi 3 Small 128K', description: 'Phi' },
{ value: 'microsoft/phi-3-small-8k-instruct', label: 'Phi 3 Small 8K', description: 'Phi' },
{ value: 'microsoft/phi-3-medium-128k-instruct', label: 'Phi 3 Medium 128K', description: 'Phi' },
{ value: 'microsoft/phi-3-medium-4k-instruct', label: 'Phi 3 Medium 4K', description: 'Phi' },
{ value: 'microsoft/phi-3-mini-128k-instruct', label: 'Phi 3 Mini 128K', description: 'Phi' },
{ value: 'microsoft/phi-3-mini-4k-instruct', label: 'Phi 3 Mini 4K', description: 'Phi' },
// QWEN MODELS
{ value: 'qwen/qwen3.5-397b-a17b', label: 'Qwen 3.5 397B', description: 'Qwen' },
{ value: 'qwen/qwen3.5-122b-a10b', label: 'Qwen 3.5 122B', description: 'Qwen' },
{ value: 'qwen/qwen3-next-80b-a3b-instruct', label: 'Qwen 3 Next 80B', description: 'Qwen' },
{ value: 'qwen/qwen2.5-7b-instruct', label: 'Qwen 2.5 7B', description: 'Qwen' },
{ value: 'qwen/qwen2-7b-instruct', label: 'Qwen 2 7B', description: 'Qwen' },
{ value: 'qwen/qwen3-32b', label: 'Qwen 3 32B', description: 'Qwen' },
{ value: 'qwen/qwen3-8b', label: 'Qwen 3 8B', description: 'Qwen' },
// DEEPSEEK MODELS
{ value: 'deepseek-ai/deepseek-r1', label: 'DeepSeek R1', description: 'DeepSeek' },
{ value: 'deepseek-ai/deepseek-v3', label: 'DeepSeek V3', description: 'DeepSeek' },
{ value: 'deepseek-ai/deepseek-v3.2', label: 'DeepSeek V3.2', description: 'DeepSeek' },
{ value: 'deepseek-ai/deepseek-v3.1-terminus', label: 'DeepSeek V3.1 Terminus', description: 'DeepSeek' },
{ value: 'deepseek-ai/deepseek-v3.1', label: 'DeepSeek V3.1', description: 'DeepSeek' },
// IBM GRANITE MODELS
{ value: 'ibm/granite-3.3-8b-instruct', label: 'Granite 3.3 8B', description: 'IBM Granite' },
{ value: 'ibm/granite-3.0-8b-instruct', label: 'Granite 3.0 8B', description: 'IBM Granite' },
{ value: 'ibm/granite-3.0-3b-a800m-instruct', label: 'Granite 3.0 3B', description: 'IBM Granite' },
// OTHER MODELS
{ value: 'databricks/dbrx-instruct', label: 'DBRX Instruct', description: 'Other' },
{ value: '01-ai/yi-large', label: 'Yi Large', description: 'Other' },
{ value: 'ai21labs/jamba-1.5-large-instruct', label: 'Jamba 1.5 Large', description: 'Other' },
{ value: 'ai21labs/jamba-1.5-mini-instruct', label: 'Jamba 1.5 Mini', description: 'Other' },
{ value: 'writer/palmyra-creative-122b', label: 'Palmyra Creative 122B', description: 'Other' },
{ value: 'writer/palmyra-fin-70b-32k', label: 'Palmyra Fin 70B 32K', description: 'Other' },
{ value: 'writer/palmyra-med-70b', label: 'Palmyra Med 70B', description: 'Other' },
{ value: 'writer/palmyra-med-70b-32k', label: 'Palmyra Med 70B 32K', description: 'Other' },
// Z-AI GLM MODELS
{ value: 'z-ai/glm5', label: 'GLM-5', description: 'Z-AI' },
{ value: 'z-ai/glm4.7', label: 'GLM-4.7', description: 'Z-AI' },
// MINIMAX MODELS
{ value: 'minimaxai/minimax-m2.5', label: 'MiniMax M2.5', description: 'MiniMax' },
// MOONSHOT KIMI MODELS
{ value: 'moonshotai/kimi-k2.5', label: 'Kimi K2.5', description: 'Moonshot' },
{ value: 'moonshotai/kimi-k2-instruct', label: 'Kimi K2 Instruct', description: 'Moonshot' },
{ value: 'moonshotai/kimi-k2-thinking', label: 'Kimi K2 Thinking', description: 'Moonshot' },
{ value: 'moonshotai/kimi-k2.5-thinking', label: 'Kimi K2.5 Thinking', description: 'Moonshot' },
{ value: 'moonshotai/kimi-k2-instruct-0905', label: 'Kimi K2 Instruct 0905', description: 'Moonshot' },
]
}
let cachedNvidiaNimOptions: ModelOption[] | null = null
export function getCachedNvidiaNimModelOptions(): ModelOption[] {
if (!cachedNvidiaNimOptions) {
cachedNvidiaNimOptions = getNvidiaNimModels()
}
return cachedNvidiaNimOptions
}

View File

@@ -104,6 +104,57 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'devstral-latest': 256_000, 'devstral-latest': 256_000,
'ministral-3b-latest': 256_000, 'ministral-3b-latest': 256_000,
// NVIDIA NIM - popular models
'nvidia/llama-3.1-nemotron-70b-instruct': 128_000,
'nvidia/llama-3.1-nemotron-ultra-253b-v1': 128_000,
'nvidia/nemotron-mini-4b-instruct': 32_768,
'meta/llama-3.1-405b-instruct': 128_000,
'meta/llama-3.1-70b-instruct': 128_000,
'meta/llama-3.1-8b-instruct': 128_000,
'meta/llama-3.2-90b-instruct': 128_000,
'meta/llama-3.2-1b-instruct': 128_000,
'meta/llama-3.2-3b-instruct': 128_000,
'meta/llama-3.3-70b-instruct': 128_000,
// Google Gemma via NVIDIA NIM
'google/gemma-2-27b-it': 8_192,
'google/gemma-2-9b-it': 8_192,
'google/gemma-3-27b-it': 131_072,
'google/gemma-3-12b-it': 131_072,
'google/gemma-3-4b-it': 131_072,
// DeepSeek via NVIDIA NIM
'deepseek-ai/deepseek-r1': 128_000,
'deepseek-ai/deepseek-v3': 128_000,
'deepseek-ai/deepseek-v3.2': 128_000,
// Qwen via NVIDIA NIM
'qwen/qwen3-32b': 128_000,
'qwen/qwen3-8b': 128_000,
'qwen/qwen2.5-7b-instruct': 32_768,
// Mistral via NVIDIA NIM
'mistralai/mistral-large-3-675b-instruct-2512': 256_000,
'mistralai/mistral-large-2-instruct': 256_000,
'mistralai/mistral-small-3.1-24b-instruct-2503': 32_768,
'mistralai/mixtral-8x7b-instruct-v0.1': 32_768,
// Microsoft Phi via NVIDIA NIM
'microsoft/phi-4-mini-instruct': 16_384,
'microsoft/phi-3.5-mini-instruct': 16_384,
'microsoft/phi-3-mini-128k-instruct': 128_000,
// IBM Granite via NVIDIA NIM
'ibm/granite-3.3-8b-instruct': 8_192,
'ibm/granite-8b-code-instruct': 8_192,
// GLM models via NVIDIA NIM
'z-ai/glm5': 200_000,
'z-ai/glm4.7': 128_000,
// Kimi models via NVIDIA NIM
'moonshotai/kimi-k2.5': 200_000,
'moonshotai/kimi-k2-instruct': 128_000,
// DBRX via NVIDIA NIM
'databricks/dbrx-instruct': 131_072,
// Jamba via NVIDIA NIM
'ai21labs/jamba-1.5-large-instruct': 256_000,
'ai21labs/jamba-1.5-mini-instruct': 256_000,
// Yi via NVIDIA NIM
'01-ai/yi-large': 32_768,
// MiniMax (all M2.x variants share 204,800 context, 131,072 max output) // MiniMax (all M2.x variants share 204,800 context, 131,072 max output)
'MiniMax-M2.7': 204_800, 'MiniMax-M2.7': 204_800,
'MiniMax-M2.7-highspeed': 204_800, 'MiniMax-M2.7-highspeed': 204_800,
@@ -118,6 +169,13 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'minimax-m2.1': 204_800, 'minimax-m2.1': 204_800,
'minimax-m2.1-highspeed': 204_800, 'minimax-m2.1-highspeed': 204_800,
// MiniMax new models
'MiniMax-Text-01': 524_288,
'MiniMax-Text-01-Preview': 262_144,
'MiniMax-Vision-01': 32_768,
'MiniMax-Vision-01-Fast': 16_384,
'MiniMax-M2': 204_800,
// Google (via OpenRouter) // Google (via OpenRouter)
'google/gemini-2.0-flash':1_048_576, 'google/gemini-2.0-flash':1_048_576,
'google/gemini-2.5-pro': 1_048_576, 'google/gemini-2.5-pro': 1_048_576,
@@ -246,6 +304,12 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'minimax-m2.5-highspeed': 131_072, 'minimax-m2.5-highspeed': 131_072,
'minimax-m2.1': 131_072, 'minimax-m2.1': 131_072,
'minimax-m2.1-highspeed': 131_072, 'minimax-m2.1-highspeed': 131_072,
// New MiniMax models
'MiniMax-M2': 131_072,
'MiniMax-Text-01': 65_536,
'MiniMax-Text-01-Preview': 65_536,
'MiniMax-Vision-01': 16_384,
'MiniMax-Vision-01-Fast': 16_384,
// Google (via OpenRouter) // Google (via OpenRouter)
'google/gemini-2.0-flash': 8_192, 'google/gemini-2.0-flash': 8_192,
@@ -266,11 +330,32 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'deepseek-r1:14b': 8_192, 'deepseek-r1:14b': 8_192,
'mistral:7b': 4_096, 'mistral:7b': 4_096,
'phi4:14b': 4_096, 'phi4:14b': 4_096,
'gemma2:27b': 4_096,
'codellama:13b': 4_096, // NVIDIA NIM models
'llama3.2:1b': 4_096, 'nvidia/llama-3.1-nemotron-70b-instruct': 32_768,
'qwen3:8b': 8_192, 'nvidia/nemotron-mini-4b-instruct': 8_192,
'codestral': 8_192, 'meta/llama-3.1-405b-instruct': 32_768,
'meta/llama-3.1-70b-instruct': 32_768,
'meta/llama-3.2-90b-instruct': 32_768,
'meta/llama-3.3-70b-instruct': 32_768,
'google/gemma-2-27b-it': 4_096,
'google/gemma-3-27b-it': 16_384,
'google/gemma-3-12b-it': 16_384,
'deepseek-ai/deepseek-r1': 32_768,
'deepseek-ai/deepseek-v3': 32_768,
'deepseek-ai/deepseek-v3.2': 32_768,
'qwen/qwen3-32b': 32_768,
'qwen/qwen2.5-7b-instruct': 8_192,
'mistralai/mistral-large-3-675b-instruct-2512': 32_768,
'mistralai/mixtral-8x7b-instruct-v0.1': 8_192,
'microsoft/phi-4-mini-instruct': 4_096,
'microsoft/phi-3.5-mini-instruct': 4_096,
'ibm/granite-3.3-8b-instruct': 4_096,
'z-ai/glm5': 32_768,
'moonshotai/kimi-k2.5': 32_768,
'databricks/dbrx-instruct': 32_768,
'ai21labs/jamba-1.5-large-instruct': 32_768,
'01-ai/yi-large': 8_192,
} }
function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined { function lookupByModel<T>(table: Record<string, T>, model: string): T | undefined {

View File

@@ -11,9 +11,17 @@ export type APIProvider =
| 'gemini' | 'gemini'
| 'github' | 'github'
| 'codex' | 'codex'
| 'nvidia-nim'
| 'minimax'
| 'mistral' | 'mistral'
export function getAPIProvider(): APIProvider { export function getAPIProvider(): APIProvider {
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
return 'nvidia-nim'
}
if (isEnvTruthy(process.env.MINIMAX_API_KEY)) {
return 'minimax'
}
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
? 'gemini' ? 'gemini'
: :

View File

@@ -11,6 +11,8 @@ import {
} from '@anthropic-ai/sdk' } from '@anthropic-ai/sdk'
import { getModelStrings } from './modelStrings.js' import { getModelStrings } from './modelStrings.js'
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js' import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
import { getCachedNvidiaNimModelOptions, isNvidiaNimProvider } from './nvidiaNimModels.js'
import { getCachedMiniMaxModelOptions, isMiniMaxProvider } from './minimaxModels.js'
// Cache valid models to avoid repeated API calls // Cache valid models to avoid repeated API calls
const validModelCache = new Map<string, boolean>() const validModelCache = new Map<string, boolean>()
@@ -47,6 +49,40 @@ export async function validateModel(
// If cache is empty, fall through to API validation // If cache is empty, fall through to API validation
} }
// For NVIDIA NIM provider, validate against cached model list
if (isNvidiaNimProvider()) {
const nvidiaModels = getCachedNvidiaNimModelOptions()
const found = nvidiaModels.some(m => m.value === normalizedModel)
if (found) {
validModelCache.set(normalizedModel, true)
return { valid: true }
}
if (nvidiaModels.length > 0) {
const MAX_SHOWN = 5
const names = nvidiaModels.map(m => m.value)
const shown = names.slice(0, MAX_SHOWN).join(', ')
const suffix = names.length > MAX_SHOWN ? ` and ${names.length - MAX_SHOWN} more` : ''
return { valid: false, error: `Model '${normalizedModel}' not found in NVIDIA NIM catalog. Available: ${shown}${suffix}` }
}
}
// For MiniMax provider, validate against cached model list
if (isMiniMaxProvider()) {
const minimaxModels = getCachedMiniMaxModelOptions()
const found = minimaxModels.some(m => m.value === normalizedModel)
if (found) {
validModelCache.set(normalizedModel, true)
return { valid: true }
}
if (minimaxModels.length > 0) {
const MAX_SHOWN = 5
const names = minimaxModels.map(m => m.value)
const shown = names.slice(0, MAX_SHOWN).join(', ')
const suffix = names.length > MAX_SHOWN ? ` and ${names.length - MAX_SHOWN} more` : ''
return { valid: false, error: `Model '${normalizedModel}' not found in MiniMax catalog. Available: ${shown}${suffix}` }
}
}
// Check against availableModels allowlist before any API call // Check against availableModels allowlist before any API call
if (!isModelAllowed(normalizedModel)) { if (!isModelAllowed(normalizedModel)) {
return { return {

View File

@@ -105,6 +105,14 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
) { ) {
return 'text-generation-webui' return 'text-generation-webui'
} }
// Check for NVIDIA NIM
if (host.includes('nvidia') || haystack.includes('nvidia') || host.includes('integrate.api.nvidia')) {
return 'NVIDIA NIM'
}
// Check for MiniMax (both api.minimax.io and api.minimax.chat)
if (host.includes('minimax') || haystack.includes('minimax')) {
return 'MiniMax'
}
} catch { } catch {
// Fall back to the generic label when the base URL is malformed. // Fall back to the generic label when the base URL is malformed.
} }

View File

@@ -21,6 +21,8 @@ export const VALID_PROVIDERS = [
'bedrock', 'bedrock',
'vertex', 'vertex',
'ollama', 'ollama',
'nvidia-nim',
'minimax',
] as const ] as const
export type ProviderFlagName = (typeof VALID_PROVIDERS)[number] export type ProviderFlagName = (typeof VALID_PROVIDERS)[number]
@@ -131,6 +133,21 @@ export function applyProviderFlag(
} }
if (model) process.env.OPENAI_MODEL = model if (model) process.env.OPENAI_MODEL = model
break break
case 'nvidia-nim':
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL ??= 'https://integrate.api.nvidia.com/v1'
process.env.NVIDIA_NIM = '1'
process.env.OPENAI_MODEL ??= 'nvidia/llama-3.1-nemotron-70b-instruct'
if (model) process.env.OPENAI_MODEL = model
break
case 'minimax':
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL ??= 'https://api.minimax.io/v1'
process.env.OPENAI_MODEL ??= 'MiniMax-M2.5'
if (model) process.env.OPENAI_MODEL = model
break
} }
return {} return {}

View File

@@ -6,29 +6,29 @@ import {
isCodexBaseUrl, isCodexBaseUrl,
resolveCodexApiCredentials, resolveCodexApiCredentials,
resolveProviderRequest, resolveProviderRequest,
} from '../services/api/providerConfig.ts' } from '../services/api/providerConfig.js'
import { parseChatgptAccountId } from '../services/api/codexOAuthShared.js' import { parseChatgptAccountId } from '../services/api/codexOAuthShared.js'
import { import {
getGoalDefaultOpenAIModel, getGoalDefaultOpenAIModel,
normalizeRecommendationGoal, normalizeRecommendationGoal,
type RecommendationGoal, type RecommendationGoal,
} from './providerRecommendation.ts' } from './providerRecommendation.js'
import { readGeminiAccessToken } from './geminiCredentials.ts' import { readGeminiAccessToken } from './geminiCredentials.js'
import { getOllamaChatBaseUrl } from './providerDiscovery.ts' import { getOllamaChatBaseUrl } from './providerDiscovery.js'
import { getProviderValidationError } from './providerValidation.ts' import { getProviderValidationError } from './providerValidation.js'
import { import {
maskSecretForDisplay, maskSecretForDisplay,
redactSecretValueForDisplay, redactSecretValueForDisplay,
sanitizeApiKey, sanitizeApiKey,
sanitizeProviderConfigValue, sanitizeProviderConfigValue,
} from './providerSecrets.ts' } from './providerSecrets.js'
export { export {
maskSecretForDisplay, maskSecretForDisplay,
redactSecretValueForDisplay, redactSecretValueForDisplay,
sanitizeApiKey, sanitizeApiKey,
sanitizeProviderConfigValue, sanitizeProviderConfigValue,
} from './providerSecrets.ts' } from './providerSecrets.js'
export const PROFILE_FILE_NAME = '.openclaude-profile.json' export const PROFILE_FILE_NAME = '.openclaude-profile.json'
export const DEFAULT_GEMINI_BASE_URL = export const DEFAULT_GEMINI_BASE_URL =
@@ -57,18 +57,28 @@ const PROFILE_ENV_KEYS = [
'GEMINI_MODEL', 'GEMINI_MODEL',
'GEMINI_BASE_URL', 'GEMINI_BASE_URL',
'GOOGLE_API_KEY', 'GOOGLE_API_KEY',
'NVIDIA_NIM',
'NVIDIA_API_KEY',
'NVIDIA_MODEL',
'MINIMAX_API_KEY',
'MINIMAX_BASE_URL',
'MINIMAX_MODEL',
'MISTRAL_BASE_URL', 'MISTRAL_BASE_URL',
'MISTRAL_API_KEY', 'MISTRAL_API_KEY',
'MISTRAL_MODEL', 'MISTRAL_MODEL',
] as const ] as const
export type ProviderProfile = const SECRET_ENV_KEYS = [
| 'openai' 'OPENAI_API_KEY',
| 'ollama' 'CODEX_API_KEY',
| 'codex' 'GEMINI_API_KEY',
| 'gemini' 'GOOGLE_API_KEY',
| 'atomic-chat' 'NVIDIA_API_KEY',
| 'mistral' 'MINIMAX_API_KEY',
'MISTRAL_API_KEY',
] as const
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' | 'nvidia-nim' | 'minimax' | 'mistral'
export type ProfileEnv = { export type ProfileEnv = {
OPENAI_BASE_URL?: string OPENAI_BASE_URL?: string
@@ -82,6 +92,12 @@ export type ProfileEnv = {
GEMINI_AUTH_MODE?: 'api-key' | 'access-token' | 'adc' GEMINI_AUTH_MODE?: 'api-key' | 'access-token' | 'adc'
GEMINI_MODEL?: string GEMINI_MODEL?: string
GEMINI_BASE_URL?: string GEMINI_BASE_URL?: string
GOOGLE_API_KEY?: string
NVIDIA_NIM?: string
NVIDIA_API_KEY?: string
MINIMAX_API_KEY?: string
MINIMAX_BASE_URL?: string
MINIMAX_MODEL?: string
MISTRAL_BASE_URL?: string MISTRAL_BASE_URL?: string
MISTRAL_API_KEY?: string MISTRAL_API_KEY?: string
MISTRAL_MODEL?: string MISTRAL_MODEL?: string
@@ -93,6 +109,19 @@ export type ProfileFile = {
createdAt: string createdAt: string
} }
type SecretValueSource = Partial<
Record<
| 'OPENAI_API_KEY'
| 'CODEX_API_KEY'
| 'GEMINI_API_KEY'
| 'GOOGLE_API_KEY'
| 'NVIDIA_API_KEY'
| 'MINIMAX_API_KEY'
| 'MISTRAL_API_KEY',
string | undefined
>
>
type ProfileFileLocation = { type ProfileFileLocation = {
cwd?: string cwd?: string
filePath?: string filePath?: string
@@ -113,6 +142,8 @@ export function isProviderProfile(value: unknown): value is ProviderProfile {
value === 'codex' || value === 'codex' ||
value === 'gemini' || value === 'gemini' ||
value === 'atomic-chat' || value === 'atomic-chat' ||
value === 'nvidia-nim' ||
value === 'minimax' ||
value === 'mistral' value === 'mistral'
) )
} }
@@ -143,6 +174,67 @@ export function buildAtomicChatProfileEnv(
} }
} }
export function buildNvidiaNimProfileEnv(options: {
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
const processEnv = options.processEnv ?? process.env
const key = sanitizeApiKey(options.apiKey ?? processEnv.NVIDIA_API_KEY)
if (!key) {
return null
}
const defaultBaseUrl = 'https://integrate.api.nvidia.com/v1'
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
return {
OPENAI_BASE_URL:
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl,
OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
'nvidia/llama-3.1-nemotron-70b-instruct',
OPENAI_API_KEY: key,
NVIDIA_NIM: '1',
}
}
export function buildMiniMaxProfileEnv(options: {
model?: string | null
baseUrl?: string | null
apiKey?: string | null
processEnv?: NodeJS.ProcessEnv
}): ProfileEnv | null {
const processEnv = options.processEnv ?? process.env
const key = sanitizeApiKey(options.apiKey ?? processEnv.MINIMAX_API_KEY)
if (!key) {
return null
}
const defaultBaseUrl = 'https://api.minimax.io/v1'
const defaultModel = 'MiniMax-M2.5'
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
return {
OPENAI_BASE_URL:
sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_BASE_URL, secretSource) ||
defaultBaseUrl,
OPENAI_MODEL:
sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue(processEnv.OPENAI_MODEL, secretSource) ||
defaultModel,
OPENAI_API_KEY: key,
MINIMAX_API_KEY: key,
MINIMAX_BASE_URL: defaultBaseUrl,
MINIMAX_MODEL: defaultModel,
}
}
export function buildGeminiProfileEnv(options: { export function buildGeminiProfileEnv(options: {
model?: string | null model?: string | null
baseUrl?: string | null baseUrl?: string | null
@@ -161,15 +253,13 @@ export function buildGeminiProfileEnv(options: {
return null return null
} }
const secretSource: SecretValueSource = key ? { GEMINI_API_KEY: key } : {}
const env: ProfileEnv = { const env: ProfileEnv = {
GEMINI_AUTH_MODE: authMode, GEMINI_AUTH_MODE: authMode,
GEMINI_MODEL: GEMINI_MODEL:
sanitizeProviderConfigValue(options.model, { GEMINI_API_KEY: key }, processEnv) || sanitizeProviderConfigValue(options.model, secretSource) ||
sanitizeProviderConfigValue( sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) ||
processEnv.GEMINI_MODEL,
{ GEMINI_API_KEY: key },
processEnv,
) ||
DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_MODEL,
} }
@@ -178,12 +268,8 @@ export function buildGeminiProfileEnv(options: {
} }
const baseUrl = const baseUrl =
sanitizeProviderConfigValue(options.baseUrl, { GEMINI_API_KEY: key }, processEnv) || sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
sanitizeProviderConfigValue( sanitizeProviderConfigValue(processEnv.GEMINI_BASE_URL, secretSource)
processEnv.GEMINI_BASE_URL,
{ GEMINI_API_KEY: key },
processEnv,
)
if (baseUrl) { if (baseUrl) {
env.GEMINI_BASE_URL = baseUrl env.GEMINI_BASE_URL = baseUrl
} }
@@ -205,15 +291,14 @@ export function buildOpenAIProfileEnv(options: {
} }
const defaultModel = getGoalDefaultOpenAIModel(options.goal) const defaultModel = getGoalDefaultOpenAIModel(options.goal)
const secretSource: SecretValueSource = { OPENAI_API_KEY: key }
const shellOpenAIModel = sanitizeProviderConfigValue( const shellOpenAIModel = sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL, processEnv.OPENAI_MODEL,
{ OPENAI_API_KEY: key }, secretSource,
processEnv,
) )
const shellOpenAIBaseUrl = sanitizeProviderConfigValue( const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL, processEnv.OPENAI_BASE_URL,
{ OPENAI_API_KEY: key }, secretSource,
processEnv,
) )
const shellOpenAIRequest = resolveProviderRequest({ const shellOpenAIRequest = resolveProviderRequest({
model: shellOpenAIModel, model: shellOpenAIModel,
@@ -224,19 +309,11 @@ export function buildOpenAIProfileEnv(options: {
return { return {
OPENAI_BASE_URL: OPENAI_BASE_URL:
sanitizeProviderConfigValue( sanitizeProviderConfigValue(options.baseUrl, secretSource) ||
options.baseUrl,
{ OPENAI_API_KEY: key },
processEnv,
) ||
(useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) || (useShellOpenAIConfig ? shellOpenAIBaseUrl : undefined) ||
DEFAULT_OPENAI_BASE_URL, DEFAULT_OPENAI_BASE_URL,
OPENAI_MODEL: OPENAI_MODEL:
sanitizeProviderConfigValue( sanitizeProviderConfigValue(options.model, secretSource) ||
options.model,
{ OPENAI_API_KEY: key },
processEnv,
) ||
(useShellOpenAIConfig ? shellOpenAIModel : undefined) || (useShellOpenAIConfig ? shellOpenAIModel : undefined) ||
defaultModel, defaultModel,
OPENAI_API_KEY: key, OPENAI_API_KEY: key,
@@ -293,21 +370,19 @@ export function buildMistralProfileEnv(options: {
const env: ProfileEnv = { const env: ProfileEnv = {
MISTRAL_API_KEY: key, MISTRAL_API_KEY: key,
MISTRAL_MODEL: MISTRAL_MODEL:
sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }, processEnv) || sanitizeProviderConfigValue(options.model, { MISTRAL_API_KEY: key }) ||
sanitizeProviderConfigValue( sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL, processEnv.MISTRAL_MODEL,
{ MISTRAL_API_KEY: key }, { MISTRAL_API_KEY: key },
processEnv,
) || ) ||
DEFAULT_MISTRAL_MODEL, DEFAULT_MISTRAL_MODEL,
} }
const baseUrl = const baseUrl =
sanitizeProviderConfigValue(options.baseUrl, { MISTRAL_API_KEY: key }, processEnv) || sanitizeProviderConfigValue(options.baseUrl, { MISTRAL_API_KEY: key }) ||
sanitizeProviderConfigValue( sanitizeProviderConfigValue(
processEnv.MISTRAL_BASE_URL, processEnv.MISTRAL_BASE_URL,
{ MISTRAL_API_KEY: key }, { MISTRAL_API_KEY: key },
processEnv,
) )
if (baseUrl) { if (baseUrl) {
env.MISTRAL_BASE_URL = baseUrl env.MISTRAL_BASE_URL = baseUrl
@@ -465,11 +540,11 @@ export async function buildLaunchEnv(options: {
) )
const shellOpenAIModel = sanitizeProviderConfigValue( const shellOpenAIModel = sanitizeProviderConfigValue(
processEnv.OPENAI_MODEL, processEnv.OPENAI_MODEL,
processEnv, processEnv as SecretValueSource,
) )
const shellOpenAIBaseUrl = sanitizeProviderConfigValue( const shellOpenAIBaseUrl = sanitizeProviderConfigValue(
processEnv.OPENAI_BASE_URL, processEnv.OPENAI_BASE_URL,
processEnv, processEnv as SecretValueSource,
) )
const persistedGeminiModel = sanitizeProviderConfigValue( const persistedGeminiModel = sanitizeProviderConfigValue(
persistedEnv.GEMINI_MODEL, persistedEnv.GEMINI_MODEL,
@@ -481,11 +556,11 @@ export async function buildLaunchEnv(options: {
) )
const shellGeminiModel = sanitizeProviderConfigValue( const shellGeminiModel = sanitizeProviderConfigValue(
processEnv.GEMINI_MODEL, processEnv.GEMINI_MODEL,
processEnv, processEnv as SecretValueSource,
) )
const shellGeminiBaseUrl = sanitizeProviderConfigValue( const shellGeminiBaseUrl = sanitizeProviderConfigValue(
processEnv.GEMINI_BASE_URL, processEnv.GEMINI_BASE_URL,
processEnv, processEnv as SecretValueSource,
) )
const shellGeminiAccessToken = const shellGeminiAccessToken =
processEnv.GEMINI_ACCESS_TOKEN?.trim() || undefined processEnv.GEMINI_ACCESS_TOKEN?.trim() || undefined
@@ -567,19 +642,15 @@ export async function buildLaunchEnv(options: {
const shellMistralModel = sanitizeProviderConfigValue( const shellMistralModel = sanitizeProviderConfigValue(
processEnv.MISTRAL_MODEL, processEnv.MISTRAL_MODEL,
processEnv,
) )
const persistedMistralModel = sanitizeProviderConfigValue( const persistedMistralModel = sanitizeProviderConfigValue(
persistedEnv.MISTRAL_MODEL, persistedEnv.MISTRAL_MODEL,
persistedEnv,
) )
const shellMistralBaseUrl = sanitizeProviderConfigValue( const shellMistralBaseUrl = sanitizeProviderConfigValue(
processEnv.MISTRAL_BASE_URL, processEnv.MISTRAL_BASE_URL,
processEnv,
) )
const persistedMistralBaseUrl = sanitizeProviderConfigValue( const persistedMistralBaseUrl = sanitizeProviderConfigValue(
persistedEnv.MISTRAL_BASE_URL, persistedEnv.MISTRAL_BASE_URL,
persistedEnv,
) )
env.MISTRAL_MODEL = env.MISTRAL_MODEL =

View File

@@ -20,6 +20,8 @@ export type ProviderPreset =
| 'openrouter' | 'openrouter'
| 'lmstudio' | 'lmstudio'
| 'custom' | 'custom'
| 'nvidia-nim'
| 'minimax'
export type ProviderProfileInput = { export type ProviderProfileInput = {
provider?: ProviderProfile['provider'] provider?: ProviderProfile['provider']
@@ -229,6 +231,24 @@ export function getProviderPresetDefaults(
apiKey: process.env.OPENAI_API_KEY ?? '', apiKey: process.env.OPENAI_API_KEY ?? '',
requiresApiKey: false, requiresApiKey: false,
} }
case 'nvidia-nim':
return {
provider: 'openai',
name: 'NVIDIA NIM',
baseUrl: 'https://integrate.api.nvidia.com/v1',
model: 'nvidia/llama-3.1-nemotron-70b-instruct',
apiKey: process.env.NVIDIA_API_KEY ?? '',
requiresApiKey: true,
}
case 'minimax':
return {
provider: 'openai',
name: 'MiniMax',
baseUrl: 'https://api.minimax.io/v1',
model: 'MiniMax-M2.5',
apiKey: process.env.MINIMAX_API_KEY ?? '',
requiresApiKey: true,
}
case 'ollama': case 'ollama':
default: default:
return { return {
@@ -365,6 +385,11 @@ export function clearProviderProfileEnvFromProcessEnv(
delete processEnv.ANTHROPIC_API_KEY delete processEnv.ANTHROPIC_API_KEY
delete processEnv[PROFILE_ENV_APPLIED_FLAG] delete processEnv[PROFILE_ENV_APPLIED_FLAG]
delete processEnv[PROFILE_ENV_APPLIED_ID] delete processEnv[PROFILE_ENV_APPLIED_ID]
// Clear provider-specific API keys
delete processEnv.MINIMAX_API_KEY
delete processEnv.NVIDIA_API_KEY
delete processEnv.NVIDIA_NIM
} }
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void { export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
@@ -395,6 +420,14 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (profile.apiKey) { if (profile.apiKey) {
process.env.OPENAI_API_KEY = profile.apiKey process.env.OPENAI_API_KEY = profile.apiKey
// Also set provider-specific API keys for detection
const baseUrl = profile.baseUrl.toLowerCase()
if (baseUrl.includes('minimax')) {
process.env.MINIMAX_API_KEY = profile.apiKey
}
if (baseUrl.includes('nvidia') || baseUrl.includes('integrate.api.nvidia')) {
process.env.NVIDIA_API_KEY = profile.apiKey
}
} else { } else {
delete process.env.OPENAI_API_KEY delete process.env.OPENAI_API_KEY
} }