feat: add NVIDIA NIM and MiniMax provider support (#552)
* feat: add NVIDIA NIM and MiniMax provider support - Add nvidia-nim and minimax to --provider CLI flag - Add model discovery for NVIDIA NIM (160+ models) and MiniMax - Update /model picker to show provider-specific models - Fix provider detection in startup banner - Update .env.example with new provider options Supported providers: - NVIDIA NIM: https://integrate.api.nvidia.com/v1 - MiniMax: https://api.minimax.io/v1 * fix: resolve conflict in StartupScreen (keep NVIDIA/MiniMax + add Codex detection) * fix: resolve providerProfile conflict (add imports from main, keep NVIDIA/MiniMax) * fix: revert providerSecrets to match main (NVIDIA/MiniMax handled elsewhere) * fix: add context window entries for NVIDIA NIM and new MiniMax models * fix: use GLM-5 as NVIDIA NIM default and MiniMax-M2.5 for consistency * fix: address remaining review items - add GLM/Kimi context entries, max output tokens, fix .env.example, revert to Nemotron default * fix: filter NVIDIA NIM picker to chat/instruct models only, set provider-specific API keys from saved profiles * chore: add more NVIDIA NIM context window entries for popular models * fix: address remaining non-blocking items - fix base model, clear provider API keys on profile switch
This commit is contained in:
committed by
GitHub
parent
6b2121da12
commit
51191d6132
@@ -35,6 +35,8 @@ import { has1mContext } from '../context.js'
|
||||
import { getGlobalConfig } from '../config.js'
|
||||
import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js'
|
||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||
import { getCachedNvidiaNimModelOptions, isNvidiaNimProvider } from './nvidiaNimModels.js'
|
||||
import { getCachedMiniMaxModelOptions, isMiniMaxProvider } from './minimaxModels.js'
|
||||
import { getAntModels } from './antModels.js'
|
||||
|
||||
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
|
||||
@@ -390,6 +392,26 @@ function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
return [defaultOption]
|
||||
}
|
||||
|
||||
// When using NVIDIA NIM, show models from the NVIDIA catalog
|
||||
if (isNvidiaNimProvider()) {
|
||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
||||
const nvidiaModels = getCachedNvidiaNimModelOptions()
|
||||
if (nvidiaModels.length > 0) {
|
||||
return [defaultOption, ...nvidiaModels]
|
||||
}
|
||||
return [defaultOption]
|
||||
}
|
||||
|
||||
// When using MiniMax, show models from the MiniMax catalog
|
||||
if (isMiniMaxProvider()) {
|
||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
||||
const minimaxModels = getCachedMiniMaxModelOptions()
|
||||
if (minimaxModels.length > 0) {
|
||||
return [defaultOption, ...minimaxModels]
|
||||
}
|
||||
return [defaultOption]
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
// Build options from antModels config
|
||||
const antModelOptions: ModelOption[] = getAntModels().map(m => ({
|
||||
|
||||
Reference in New Issue
Block a user