feat: add xAI as official provider (#865)

* feat: add xAI as official provider

- Add xAI preset to ProviderManager (alphabetical order)
- Add xAI provider detection via XAI_API_KEY
- Add xAI startup screen heuristic (x.ai base URL or grok model)
- Add xAI status display properties
- Add grok-4 and grok-3 context windows
- Add xAI model fallbacks across all tiers
- Fix JSDoc priority order in providerAutoDetect

Co-Authored-By: Claude Opus 4.6 <noreply@openclaude.dev>

* fix(xai): persist relaunch classification for xAI profiles

Addresses reviewer feedback on feat/xai-official-provider:
- isProcessEnvAlignedWithProfile now validates XAI_API_KEY for x.ai
  base URLs, mirroring the Bankr pattern. Without this, relaunch
  skips re-applying the profile, XAI_API_KEY stays unset, and
  getAPIProvider() falls back to 'openai'.
- buildOpenAICompatibleStartupEnv now sets XAI_API_KEY when syncing
  active xAI profile to the legacy fallback file.
- Adds 'xai' to VALID_PROVIDERS and --provider xai CLI flag support.
- Adds xAI detection to providerDiscovery label heuristics.
- Adds 'xai' to legacy ProviderProfile type/isProviderProfile guard.
- Adds targeted tests for relaunch alignment, flag application, and
  discovery labeling.

Co-Authored-By: OpenClaude <openclaude@gitlawb.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@openclaude.dev>
Co-authored-by: OpenClaude <openclaude@gitlawb.com>
This commit is contained in:
Kevin Codex
2026-04-26 21:26:44 +08:00
committed by GitHub
parent d45628c413
commit 2586a9cddb
18 changed files with 248 additions and 7 deletions

View File

@@ -125,6 +125,7 @@ const PRESET_ORDER = [
'OpenAI',
'OpenRouter',
'Together AI',
'xAI',
'Z.AI - GLM Coding Plan',
'Custom',
] as const

View File

@@ -1427,6 +1427,11 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: 'Together AI',
description: 'Together chat/completions endpoint',
},
{
value: 'xai',
label: 'xAI',
description: 'xAI Grok OpenAI-compatible endpoint',
},
{
value: 'zai',
label: 'Z.AI - GLM Coding Plan',

View File

@@ -138,6 +138,7 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
else if (/api\.kimi\.com/i.test(baseUrl)) name = 'Moonshot AI - Kimi Code'
else if (/moonshot/i.test(baseUrl)) name = 'Moonshot AI - API'
else if (/deepseek/i.test(baseUrl)) name = 'DeepSeek'
else if (/x\.ai/i.test(baseUrl)) name = 'xAI'
else if (isZaiBaseUrl(baseUrl)) name = 'Z.AI - GLM'
else if (/mistral/i.test(baseUrl)) name = 'Mistral'
// rawModel fallback — fires only when base URL is generic/custom.
@@ -148,6 +149,7 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
else if (/\bkimi-k/i.test(rawModel) || /moonshot/i.test(rawModel))
name = 'Moonshot AI - API'
else if (/deepseek/i.test(rawModel)) name = 'DeepSeek'
else if (/grok/i.test(rawModel)) name = 'xAI'
else if (containsExactZaiGlmModelId(rawModel)) name = 'Z.AI - GLM'
else if (/mistral/i.test(rawModel)) name = 'Mistral'
else if (/llama/i.test(rawModel)) name = 'Meta Llama'

View File

@@ -71,6 +71,10 @@ export function getSmallFastModel(): ModelName {
if (getAPIProvider() === 'minimax') {
return process.env.OPENAI_MODEL || 'MiniMax-M2.5-highspeed'
}
// xAI — OPENAI_MODEL carries the active Grok model; fall back to grok-3.
if (getAPIProvider() === 'xai') {
return process.env.OPENAI_MODEL || 'grok-3'
}
return getDefaultHaikuModel()
}
@@ -119,7 +123,8 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
provider === 'codex' ||
provider === 'github' ||
provider === 'nvidia-nim' ||
provider === 'minimax'
provider === 'minimax' ||
provider === 'xai'
specifiedModel =
(provider === 'gemini' ? process.env.GEMINI_MODEL : undefined) ||
(provider === 'mistral' ? process.env.MISTRAL_MODEL : undefined) ||
@@ -194,6 +199,10 @@ export function getDefaultOpusModel(): ModelName {
if (getAPIProvider() === 'minimax') {
return process.env.OPENAI_MODEL || 'MiniMax-M2.7'
}
// xAI — flagship Grok model for "opus"-equivalent.
if (getAPIProvider() === 'xai') {
return process.env.OPENAI_MODEL || 'grok-4'
}
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
// even when values match, since 3P availability lags firstParty and
// these will diverge again at the next model launch.
@@ -236,6 +245,10 @@ export function getDefaultSonnetModel(): ModelName {
if (getAPIProvider() === 'minimax') {
return process.env.OPENAI_MODEL || 'MiniMax-M2.5'
}
// xAI — flagship Grok model for "sonnet"-equivalent.
if (getAPIProvider() === 'xai') {
return process.env.OPENAI_MODEL || 'grok-4'
}
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
if (getAPIProvider() !== 'firstParty') {
return getModelStrings().sonnet45
@@ -276,6 +289,10 @@ export function getDefaultHaikuModel(): ModelName {
if (getAPIProvider() === 'minimax') {
return process.env.OPENAI_MODEL || 'MiniMax-M2.5-highspeed'
}
// xAI — faster Grok model for "haiku"-equivalent.
if (getAPIProvider() === 'xai') {
return process.env.OPENAI_MODEL || 'grok-3'
}
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
return getModelStrings().haiku45
@@ -344,6 +361,10 @@ export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
if (getAPIProvider() === 'codex') {
return process.env.OPENAI_MODEL || 'gpt-5.5'
}
// xAI provider: always use the configured Grok model (default grok-4)
if (getAPIProvider() === 'xai') {
return process.env.OPENAI_MODEL || 'grok-4'
}
// Ants default to defaultModel from flag config, or Opus 1M if not configured
if (process.env.USER_TYPE === 'ant') {
@@ -524,7 +545,7 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
*/
export function getPublicModelDisplayName(model: ModelName): string | null {
// For OpenAI/Gemini/Codex/GitHub providers, show the actual model name not a Claude alias
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex' || getAPIProvider() === 'github') {
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex' || getAPIProvider() === 'github' || getAPIProvider() === 'xai') {
// Return display names for known GitHub Copilot models
const copilotModelNames: Record<string, string> = {
'gpt-5.5': 'GPT-5.5',

View File

@@ -242,6 +242,10 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
'GLM-4.7': 202_752,
'GLM-4.5-Air': 128_000,
// xAI Grok
'grok-4': 2_000_000,
'grok-3': 131_072,
// Moonshot AI direct API (api.moonshot.ai/v1). Values from Moonshot's
// published model card — all K2 tier share 256K context. Prefix matching
// in lookupByKey catches variants like "kimi-k2.6-preview".
@@ -442,6 +446,10 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
'GLM-4.7': 131_072,
'GLM-4.5-Air': 65_536,
// xAI Grok
'grok-4': 32_768,
'grok-3': 32_768,
// Moonshot AI direct API
'kimi-for-coding': 32_768,
'kimi-k2.6': 32_768,

View File

@@ -10,6 +10,7 @@ const originalEnv = {
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
OPENAI_MODEL: process.env.OPENAI_MODEL,
XAI_API_KEY: process.env.XAI_API_KEY,
}
afterEach(() => {
@@ -22,6 +23,7 @@ afterEach(() => {
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
process.env.XAI_API_KEY = originalEnv.XAI_API_KEY
})
async function importFreshProvidersModule() {
@@ -38,6 +40,7 @@ function clearProviderEnv(): void {
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_BASE
delete process.env.OPENAI_MODEL
delete process.env.XAI_API_KEY
}
test('first-party provider keeps Anthropic account setup flow enabled', () => {
@@ -98,6 +101,17 @@ test('codex aliases still resolve to the codex provider without a non-codex base
expect(getAPIProvider()).toBe('codex')
})
test('XAI_API_KEY resolves to the xai provider', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.XAI_API_KEY = 'xai-test-key'
process.env.OPENAI_BASE_URL = 'https://api.x.ai/v1'
process.env.OPENAI_MODEL = 'grok-4'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('xai')
})
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
clearProviderEnv()
process.env.CLAUDE_CODE_USE_OPENAI = '1'

View File

@@ -14,6 +14,7 @@ export type APIProvider =
| 'nvidia-nim'
| 'minimax'
| 'mistral'
| 'xai'
export function getAPIProvider(): APIProvider {
if (isEnvTruthy(process.env.NVIDIA_NIM)) {
@@ -27,6 +28,10 @@ export function getAPIProvider(): APIProvider {
if (typeof process.env.MINIMAX_API_KEY === 'string' && process.env.MINIMAX_API_KEY.trim() !== '') {
return 'minimax'
}
// xAI is signalled by a real API key (same pattern as MiniMax)
if (typeof process.env.XAI_API_KEY === 'string' && process.env.XAI_API_KEY.trim() !== '') {
return 'xai'
}
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
? 'gemini'
:

View File

@@ -112,6 +112,13 @@ describe('detectProviderFromEnv — priority order', () => {
})
})
test('XAI_API_KEY detected', () => {
expect(scan({ XAI_API_KEY: 'xai-x' })).toEqual({
kind: 'xai',
source: 'XAI_API_KEY set',
})
})
test('empty-string values are ignored', () => {
expect(
scan({

View File

@@ -16,8 +16,9 @@
* 5. GEMINI_API_KEY or GOOGLE_API_KEY
* 6. MISTRAL_API_KEY
* 7. MINIMAX_API_KEY
* 8. Local Ollama reachable (default localhost:11434)
* 9. Local LM Studio reachable (default localhost:1234)
* 8. XAI_API_KEY
* 9. Local Ollama reachable (default localhost:11434)
* 10. Local LM Studio reachable (default localhost:1234)
*
* Local-service probes are parallelized and cheap (short timeout, no
* request body). Env scans are synchronous and run first so we don't make
@@ -40,6 +41,7 @@ export type DetectedProviderKind =
| 'gemini'
| 'mistral'
| 'minimax'
| 'xai'
| 'ollama'
| 'lm-studio'
@@ -159,6 +161,10 @@ export function detectProviderFromEnv(
return { kind: 'minimax', source: 'MINIMAX_API_KEY set' }
}
if (envHasNonEmpty(env, 'XAI_API_KEY')) {
return { kind: 'xai', source: 'XAI_API_KEY set' }
}
return null
}

View File

@@ -99,6 +99,15 @@ test('detects Moonshot AI - Kimi Code from api.kimi.com/coding hostname', async
).toBe('Moonshot AI - Kimi Code')
})
test('detects xAI from api.x.ai hostname', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()
expect(
getLocalOpenAICompatibleProviderLabel('https://api.x.ai/v1'),
).toBe('xAI')
})
test('falls back to a generic local openai-compatible label', async () => {
const { getLocalOpenAICompatibleProviderLabel } =
await loadProviderDiscoveryModule()

View File

@@ -206,6 +206,10 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string
if (host.includes('bankr') || haystack.includes('bankr')) {
return 'Bankr'
}
// xAI Grok endpoint
if (host.includes('x.ai') || haystack.includes('x.ai')) {
return 'xAI'
}
// Z.AI GLM Coding Plan
if (isZaiBaseUrl(parsed.href)) {
return 'Z.AI - GLM'

View File

@@ -178,6 +178,42 @@ describe('applyProviderFlag - ollama', () => {
})
})
describe('applyProviderFlag - xai', () => {
test('sets CLAUDE_CODE_USE_OPENAI=1 with xAI defaults when unset', () => {
delete process.env.OPENAI_BASE_URL
delete process.env.OPENAI_API_KEY
const result = applyProviderFlag('xai', [])
expect(result.error).toBeUndefined()
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
expect(process.env.OPENAI_BASE_URL).toBe('https://api.x.ai/v1')
expect(process.env.OPENAI_MODEL).toBe('grok-4')
})
test('sets OPENAI_MODEL when --model is provided', () => {
applyProviderFlag('xai', ['--model', 'grok-3'])
expect(process.env.OPENAI_MODEL).toBe('grok-3')
})
test('propagates XAI_API_KEY to OPENAI_API_KEY when only XAI_API_KEY is set', () => {
delete process.env.OPENAI_API_KEY
process.env.XAI_API_KEY = 'xai-secret-key'
applyProviderFlag('xai', [])
expect(process.env.OPENAI_API_KEY).toBe('xai-secret-key')
})
test('does not override existing OPENAI_API_KEY when both keys are set', () => {
process.env.OPENAI_API_KEY = 'existing-openai-key'
process.env.XAI_API_KEY = 'xai-secret-key'
applyProviderFlag('xai', [])
expect(process.env.OPENAI_API_KEY).toBe('existing-openai-key')
})
})
describe('applyProviderFlag - invalid provider', () => {
test('returns error for unknown provider', () => {
const result = applyProviderFlag('unknown-provider', [])

View File

@@ -16,6 +16,7 @@ export const VALID_PROVIDERS = [
'anthropic',
'bankr',
'zai',
'xai',
'openai',
'gemini',
'mistral',
@@ -167,6 +168,16 @@ export function applyProviderFlag(
process.env.OPENAI_MODEL ??= 'GLM-5.1'
if (model) process.env.OPENAI_MODEL = model
break
case 'xai':
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL ??= 'https://api.x.ai/v1'
process.env.OPENAI_MODEL ??= 'grok-4'
if (model) process.env.OPENAI_MODEL = model
if (process.env.XAI_API_KEY && !process.env.OPENAI_API_KEY) {
process.env.OPENAI_API_KEY = process.env.XAI_API_KEY
}
break
}
return {}

View File

@@ -23,7 +23,6 @@ import {
sanitizeApiKey,
sanitizeProviderConfigValue,
} from './providerSecrets.js'
import { getPrimaryModel } from './providerModels.js'
export {
maskSecretForDisplay,
@@ -79,6 +78,7 @@ const PROFILE_ENV_KEYS = [
'BANKR_BASE_URL',
'BNKR_API_KEY',
'BANKR_MODEL',
'XAI_API_KEY',
] as const
const SECRET_ENV_KEYS = [
@@ -91,9 +91,10 @@ const SECRET_ENV_KEYS = [
'MINIMAX_API_KEY',
'MISTRAL_API_KEY',
'BNKR_API_KEY',
'XAI_API_KEY',
] as const
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' | 'nvidia-nim' | 'minimax' | 'mistral'
export type ProviderProfile = 'openai' | 'ollama' | 'codex' | 'gemini' | 'atomic-chat' | 'nvidia-nim' | 'minimax' | 'mistral' | 'xai'
export type ProfileEnv = {
OPENAI_BASE_URL?: string
@@ -123,6 +124,7 @@ export type ProfileEnv = {
BANKR_BASE_URL?: string
BNKR_API_KEY?: string
BANKR_MODEL?: string
XAI_API_KEY?: string
}
export type ProfileFile = {
@@ -180,7 +182,8 @@ export function isProviderProfile(value: unknown): value is ProviderProfile {
value === 'atomic-chat' ||
value === 'nvidia-nim' ||
value === 'minimax' ||
value === 'mistral'
value === 'mistral' ||
value === 'xai'
)
}

View File

@@ -43,6 +43,7 @@ const RESTORED_KEYS = [
'MISTRAL_BASE_URL',
'MISTRAL_MODEL',
'MISTRAL_API_KEY',
'XAI_API_KEY',
] as const
type MockConfigState = {
@@ -136,6 +137,16 @@ function buildGeminiProfile(overrides: Partial<ProviderProfile> = {}): ProviderP
})
}
function buildXaiProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
return buildProfile({
provider: 'openai',
baseUrl: 'https://api.x.ai/v1',
model: 'grok-4',
apiKey: 'xai-test-key',
...overrides,
})
}
describe('applyProviderProfileToProcessEnv', () => {
test('openai profile clears competing gemini/github flags', async () => {
const { applyProviderProfileToProcessEnv } =
@@ -324,6 +335,18 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(process.env.MISTRAL_MODEL).toBe('devstral-latest')
expect(process.env.CLAUDE_CODE_USE_MISTRAL).toBe('1')
})
test('xai profile sets XAI_API_KEY and getAPIProvider returns xai', async () => {
const { applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
applyProviderProfileToProcessEnv(buildXaiProfile())
const { getAPIProvider: getFreshAPIProvider } =
await importFreshProvidersModule()
expect(process.env.XAI_API_KEY).toBe('xai-test-key')
expect(getFreshAPIProvider()).toBe('xai')
})
})
describe('applyActiveProviderProfileFromConfig', () => {
@@ -512,6 +535,44 @@ describe('applyActiveProviderProfileFromConfig', () => {
expect(process.env.OPENAI_MODEL).toBe('github:copilot')
})
test('re-applies xai active profile when XAI_API_KEY is missing (env drift)', async () => {
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
const xaiProfile = buildXaiProfile({ id: 'saved_xai' })
applyProviderProfileToProcessEnv(xaiProfile)
// Simulate relaunch where the shell exported OPENAI vars but not XAI_API_KEY
delete process.env.XAI_API_KEY
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [xaiProfile],
activeProviderProfileId: 'saved_xai',
} as any)
expect(applied?.id).toBe('saved_xai')
expect(process.env.XAI_API_KEY).toBe('xai-test-key')
})
test('does not re-apply xai active profile when XAI_API_KEY is aligned', async () => {
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
await importFreshProviderProfileModules()
const xaiProfile = buildXaiProfile({ id: 'saved_xai' })
applyProviderProfileToProcessEnv(xaiProfile)
// XAI_API_KEY is already set and aligned
expect(process.env.XAI_API_KEY).toBe('xai-test-key')
expect(process.env.OPENAI_API_KEY).toBe('xai-test-key')
const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [xaiProfile],
activeProviderProfileId: 'saved_xai',
} as any)
// Returns profile without re-applying since env is aligned
expect(applied?.id).toBe('saved_xai')
expect(process.env.XAI_API_KEY).toBe('xai-test-key')
})
test('applies active profile when no explicit provider is selected', async () => {
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderProfileModules()

View File

@@ -39,6 +39,7 @@ export type ProviderPreset =
| 'custom'
| 'nvidia-nim'
| 'minimax'
| 'xai'
| 'zai'
| 'bankr'
| 'atomic-chat'
@@ -331,6 +332,15 @@ export function getProviderPresetDefaults(
apiKey: process.env.NVIDIA_API_KEY ?? '',
requiresApiKey: true,
}
case 'xai':
return {
provider: 'openai',
name: 'xAI',
baseUrl: 'https://api.x.ai/v1',
model: 'grok-4',
apiKey: process.env.XAI_API_KEY ?? '',
requiresApiKey: true,
}
case 'minimax':
return {
provider: 'openai',
@@ -559,6 +569,10 @@ function isProcessEnvAlignedWithProfile(
(profile.baseUrl?.toLowerCase().includes('bankr')
? !includeApiKey ||
sameOptionalEnvValue(processEnv.BNKR_API_KEY, profile.apiKey)
: true) &&
(profile.baseUrl?.toLowerCase().includes('x.ai')
? !includeApiKey ||
sameOptionalEnvValue(processEnv.XAI_API_KEY, profile.apiKey)
: true)
)
}
@@ -619,6 +633,7 @@ export function clearProviderProfileEnvFromProcessEnv(
delete processEnv.BANKR_BASE_URL
delete processEnv.BNKR_API_KEY
delete processEnv.BANKR_MODEL
delete processEnv.XAI_API_KEY
}
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
@@ -726,6 +741,9 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void
if (baseUrl.includes('bankr')) {
process.env.BNKR_API_KEY = profile.apiKey
}
if (baseUrl.includes('x.ai')) {
process.env.XAI_API_KEY = profile.apiKey
}
} else {
delete process.env.OPENAI_API_KEY
}
@@ -999,6 +1017,9 @@ function buildOpenAICompatibleStartupEnv(
if (activeProfile.baseUrl?.toLowerCase().includes('bankr')) {
env.BNKR_API_KEY = activeProfile.apiKey
}
if (activeProfile.baseUrl?.toLowerCase().includes('x.ai')) {
env.XAI_API_KEY = activeProfile.apiKey
}
} else {
delete env.OPENAI_API_KEY
}

View File

@@ -6,6 +6,7 @@ const SECRET_ENV_KEYS = [
'GOOGLE_API_KEY',
'MISTRAL_API_KEY',
'BNKR_API_KEY',
'XAI_API_KEY',
] as const
export type SecretValueSource = Partial<

View File

@@ -252,6 +252,7 @@ export function buildAPIProviderProperties(): Property[] {
gemini: 'Google Gemini',
github: 'GitHub Models',
mistral: 'Mistral',
xai: 'xAI',
}[apiProvider];
properties.push({
label: 'API provider',
@@ -411,6 +412,31 @@ export function buildAPIProviderProperties(): Property[] {
value: redactSecretValueForDisplay(mistralModel, process.env) ?? mistralModel
})
}
} else if (apiProvider === 'xai') {
const xaiBaseUrl = process.env.OPENAI_BASE_URL;
if (xaiBaseUrl) {
properties.push({
label: 'xAI base URL',
value: redactSecretValueForDisplay(xaiBaseUrl, process.env) ?? xaiBaseUrl
})
}
const openaiModel = process.env.OPENAI_MODEL;
if (openaiModel) {
let modelDisplay = openaiModel;
const resolved = resolveProviderRequest({ model: openaiModel });
const resolvedModel = resolved.resolvedModel;
const reasoningEffort = resolved.reasoning?.effort;
if (resolvedModel && resolvedModel !== openaiModel.toLowerCase()) {
modelDisplay = resolvedModel;
}
if (reasoningEffort) {
modelDisplay = `${modelDisplay} (${reasoningEffort})`;
}
properties.push({
label: 'Model',
value: redactSecretValueForDisplay(modelDisplay, process.env) ?? modelDisplay
});
}
}
const proxyUrl = getProxyUrl();
if (proxyUrl) {