Compare commits
29 Commits
fix/repl-s
...
feat/auto-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff7eccc36c | ||
|
|
fbc838ce55 | ||
|
|
8c2d56844b | ||
|
|
6041b7f016 | ||
|
|
122f7b83f3 | ||
|
|
68230f3ffb | ||
|
|
832e80e535 | ||
|
|
93dc5a1554 | ||
|
|
537c469c3a | ||
|
|
ccaa193eec | ||
|
|
2caf2fd982 | ||
|
|
ad724dc3a4 | ||
|
|
648ae8053b | ||
|
|
3188f6ac66 | ||
|
|
69ea1f1e4a | ||
|
|
f9ce81bfb3 | ||
|
|
4975cfc2e0 | ||
|
|
600c01faf7 | ||
|
|
b07bafa5bd | ||
|
|
85aa8b0985 | ||
|
|
e365cb4010 | ||
|
|
52d33a87a0 | ||
|
|
b4bd95b477 | ||
|
|
1e057025d6 | ||
|
|
aff2bd87e4 | ||
|
|
72e6a945fe | ||
|
|
39f3b2babd | ||
|
|
ff7d49990d | ||
|
|
8ece290087 |
13
.github/workflows/pr-checks.yml
vendored
13
.github/workflows/pr-checks.yml
vendored
@@ -29,6 +29,13 @@ jobs:
|
||||
with:
|
||||
bun-version: 1.3.11
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
cache-dependency-path: python/requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
@@ -38,6 +45,12 @@ jobs:
|
||||
- name: Full unit test suite
|
||||
run: bun test --max-concurrency=1
|
||||
|
||||
- name: Install Python test dependencies
|
||||
run: python -m pip install -r python/requirements.txt
|
||||
|
||||
- name: Python unit tests
|
||||
run: python -m pytest -q python/tests
|
||||
|
||||
- name: Suspicious PR intent scan
|
||||
run: bun run security:pr-scan -- --base ${{ github.event.pull_request.base.sha || 'origin/main' }}
|
||||
- name: Provider tests
|
||||
|
||||
3
python/requirements.txt
Normal file
3
python/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.25.2
|
||||
@@ -118,14 +118,14 @@ function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
}
|
||||
|
||||
const GEMINI_DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai'
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
|
||||
function currentBaseUrl(): string {
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
|
||||
return process.env.GEMINI_BASE_URL ?? GEMINI_DEFAULT_BASE_URL
|
||||
}
|
||||
if (isTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
return process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
}
|
||||
return process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1'
|
||||
}
|
||||
@@ -157,7 +157,7 @@ function checkGeminiEnv(): CheckResult[] {
|
||||
|
||||
function checkGithubEnv(): CheckResult[] {
|
||||
const results: CheckResult[] = []
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE
|
||||
const baseUrl = process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE
|
||||
results.push(pass('Provider mode', 'GitHub Models provider enabled.'))
|
||||
|
||||
const token = process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN
|
||||
@@ -435,7 +435,7 @@ function serializeSafeEnvSummary(): Record<string, string | boolean> {
|
||||
process.env.OPENAI_MODEL ??
|
||||
'(unset, default: github:copilot → openai/gpt-4.1)',
|
||||
OPENAI_BASE_URL:
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_MODELS_DEFAULT_BASE,
|
||||
process.env.OPENAI_BASE_URL ?? GITHUB_COPILOT_BASE,
|
||||
GITHUB_TOKEN_SET: Boolean(
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN,
|
||||
),
|
||||
|
||||
@@ -136,6 +136,7 @@ import hooks from './commands/hooks/index.js'
|
||||
import files from './commands/files/index.js'
|
||||
import branch from './commands/branch/index.js'
|
||||
import agents from './commands/agents/index.js'
|
||||
import autoFix from './commands/auto-fix.js'
|
||||
import plugin from './commands/plugin/index.js'
|
||||
import reloadPlugins from './commands/reload-plugins/index.js'
|
||||
import rewind from './commands/rewind/index.js'
|
||||
@@ -263,6 +264,7 @@ const COMMANDS = memoize((): Command[] => [
|
||||
addDir,
|
||||
advisor,
|
||||
agents,
|
||||
autoFix,
|
||||
branch,
|
||||
btw,
|
||||
chrome,
|
||||
|
||||
25
src/commands/auto-fix.ts
Normal file
25
src/commands/auto-fix.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { Command } from '../types/command.js'
|
||||
|
||||
const command: Command = {
|
||||
name: 'auto-fix',
|
||||
description: 'Configure auto-fix: run lint/test after AI edits',
|
||||
isEnabled: () => true,
|
||||
type: 'prompt',
|
||||
progressMessage: 'Configuring auto-fix...',
|
||||
contentLength: 0,
|
||||
source: 'builtin',
|
||||
async getPromptForCommand() {
|
||||
return [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
'The user wants to configure auto-fix settings. Auto-fix automatically runs lint and test commands after AI file edits, feeding errors back for self-repair.\n\n' +
|
||||
'Current settings location: `.claude/settings.json` or `.claude/settings.local.json`\n\n' +
|
||||
'Example configuration:\n```json\n{\n "autoFix": {\n "enabled": true,\n "lint": "eslint . --fix",\n "test": "bun test",\n "maxRetries": 3,\n "timeout": 30000\n }\n}\n```\n\n' +
|
||||
'Ask the user what lint and test commands they use, then help them set up the configuration.',
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
export default command
|
||||
@@ -2,8 +2,9 @@ import type { Command } from '../../commands.js'
|
||||
|
||||
const onboardGithub: Command = {
|
||||
name: 'onboard-github',
|
||||
aliases: ['onboarding-github', 'onboardgithub', 'onboardinggithub'],
|
||||
description:
|
||||
'Interactive setup for GitHub Models: device login or PAT, saved to secure storage',
|
||||
'Interactive setup for GitHub Copilot: OAuth device login stored in secure storage',
|
||||
type: 'local-jsx',
|
||||
load: () => import('./onboard-github.js'),
|
||||
}
|
||||
|
||||
148
src/commands/onboard-github/onboard-github.test.ts
Normal file
148
src/commands/onboard-github/onboard-github.test.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
activateGithubOnboardingMode,
|
||||
applyGithubOnboardingProcessEnv,
|
||||
buildGithubOnboardingSettingsEnv,
|
||||
hasExistingGithubModelsLoginToken,
|
||||
shouldForceGithubRelogin,
|
||||
} from './onboard-github.js'
|
||||
|
||||
describe('shouldForceGithubRelogin', () => {
|
||||
test.each(['force', '--force', 'relogin', '--relogin', 'reauth', '--reauth'])(
|
||||
'treats %s as force re-login',
|
||||
arg => {
|
||||
expect(shouldForceGithubRelogin(arg)).toBe(true)
|
||||
},
|
||||
)
|
||||
|
||||
test('returns false for empty or unknown args', () => {
|
||||
expect(shouldForceGithubRelogin('')).toBe(false)
|
||||
expect(shouldForceGithubRelogin(undefined)).toBe(false)
|
||||
expect(shouldForceGithubRelogin('something-else')).toBe(false)
|
||||
})
|
||||
|
||||
test('treats force flags as present in multi-word args', () => {
|
||||
expect(shouldForceGithubRelogin('--force extra')).toBe(true)
|
||||
expect(shouldForceGithubRelogin('foo --relogin bar')).toBe(true)
|
||||
expect(shouldForceGithubRelogin('abc reauth xyz')).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('hasExistingGithubModelsLoginToken', () => {
|
||||
test('returns true when GITHUB_TOKEN is present', () => {
|
||||
expect(
|
||||
hasExistingGithubModelsLoginToken({ GITHUB_TOKEN: 'token' }, ''),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when GH_TOKEN is present', () => {
|
||||
expect(
|
||||
hasExistingGithubModelsLoginToken({ GH_TOKEN: 'token' }, ''),
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when stored token exists', () => {
|
||||
expect(hasExistingGithubModelsLoginToken({}, 'stored-token')).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when both env and stored token are missing', () => {
|
||||
expect(hasExistingGithubModelsLoginToken({}, '')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('onboarding auth precedence cleanup', () => {
|
||||
test('clears preexisting OpenAI auth when switching to GitHub', () => {
|
||||
const env: NodeJS.ProcessEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: '1',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
OPENAI_API_KEY: 'sk-stale-openai-key',
|
||||
OPENAI_ORG: 'org-old',
|
||||
OPENAI_PROJECT: 'project-old',
|
||||
OPENAI_ORGANIZATION: 'org-legacy',
|
||||
OPENAI_BASE_URL: 'https://api.openai.com/v1',
|
||||
OPENAI_API_BASE: 'https://api.openai.com/v1',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID: 'profile_old',
|
||||
}
|
||||
|
||||
applyGithubOnboardingProcessEnv('github:copilot', env)
|
||||
|
||||
expect(env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
expect(env.OPENAI_MODEL).toBe('github:copilot')
|
||||
|
||||
expect(env.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(env.OPENAI_ORG).toBeUndefined()
|
||||
expect(env.OPENAI_PROJECT).toBeUndefined()
|
||||
expect(env.OPENAI_ORGANIZATION).toBeUndefined()
|
||||
expect(env.OPENAI_BASE_URL).toBeUndefined()
|
||||
expect(env.OPENAI_API_BASE).toBeUndefined()
|
||||
|
||||
expect(env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||
expect(env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBeUndefined()
|
||||
|
||||
const settingsEnv = buildGithubOnboardingSettingsEnv('github:copilot')
|
||||
expect(settingsEnv.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
expect(settingsEnv.OPENAI_MODEL).toBe('github:copilot')
|
||||
expect(settingsEnv.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_ORG).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_PROJECT).toBeUndefined()
|
||||
expect(settingsEnv.OPENAI_ORGANIZATION).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('activateGithubOnboardingMode', () => {
|
||||
test('activates settings/env/hydration in order when merge succeeds', () => {
|
||||
const calls: string[] = []
|
||||
|
||||
const result = activateGithubOnboardingMode(' github:copilot ', {
|
||||
mergeSettingsEnv: model => {
|
||||
calls.push(`merge:${model}`)
|
||||
return { ok: true }
|
||||
},
|
||||
applyProcessEnv: model => {
|
||||
calls.push(`apply:${model}`)
|
||||
},
|
||||
hydrateToken: () => {
|
||||
calls.push('hydrate')
|
||||
},
|
||||
onChangeAPIKey: () => {
|
||||
calls.push('onChangeAPIKey')
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toEqual({ ok: true })
|
||||
expect(calls).toEqual([
|
||||
'merge:github:copilot',
|
||||
'apply:github:copilot',
|
||||
'hydrate',
|
||||
'onChangeAPIKey',
|
||||
])
|
||||
})
|
||||
|
||||
test('stops activation when settings merge fails', () => {
|
||||
const calls: string[] = []
|
||||
|
||||
const result = activateGithubOnboardingMode(DEFAULT_MODEL_FOR_TESTS, {
|
||||
mergeSettingsEnv: () => {
|
||||
calls.push('merge')
|
||||
return { ok: false, detail: 'settings write failed' }
|
||||
},
|
||||
applyProcessEnv: () => {
|
||||
calls.push('apply')
|
||||
},
|
||||
hydrateToken: () => {
|
||||
calls.push('hydrate')
|
||||
},
|
||||
onChangeAPIKey: () => {
|
||||
calls.push('onChangeAPIKey')
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toEqual({ ok: false, detail: 'settings write failed' })
|
||||
expect(calls).toEqual(['merge'])
|
||||
})
|
||||
})
|
||||
|
||||
const DEFAULT_MODEL_FOR_TESTS = 'github:copilot'
|
||||
@@ -2,9 +2,9 @@ import * as React from 'react'
|
||||
import { useCallback, useState } from 'react'
|
||||
import { Select } from '../../components/CustomSelect/select.js'
|
||||
import { Spinner } from '../../components/Spinner.js'
|
||||
import TextInput from '../../components/TextInput.js'
|
||||
import { Box, Text } from '../../ink.js'
|
||||
import {
|
||||
exchangeForCopilotToken,
|
||||
openVerificationUri,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
@@ -12,29 +12,134 @@ import {
|
||||
import type { LocalJSXCommandCall } from '../../types/command.js'
|
||||
import {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from '../../utils/githubModelsCredentials.js'
|
||||
import { updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
import { getSettingsForSource, updateSettingsForSource } from '../../utils/settings/settings.js'
|
||||
|
||||
const DEFAULT_MODEL = 'github:copilot'
|
||||
const FORCE_RELOGIN_ARGS = new Set([
|
||||
'force',
|
||||
'--force',
|
||||
'relogin',
|
||||
'--relogin',
|
||||
'reauth',
|
||||
'--reauth',
|
||||
])
|
||||
|
||||
type Step =
|
||||
| 'menu'
|
||||
| 'device-busy'
|
||||
| 'pat'
|
||||
| 'error'
|
||||
type Step = 'menu' | 'device-busy' | 'error'
|
||||
|
||||
const PROVIDER_SPECIFIC_KEYS = new Set([
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_API_KEY',
|
||||
'GOOGLE_API_KEY',
|
||||
'GEMINI_BASE_URL',
|
||||
'GEMINI_MODEL',
|
||||
'GEMINI_ACCESS_TOKEN',
|
||||
'GEMINI_AUTH_MODE',
|
||||
])
|
||||
|
||||
export function shouldForceGithubRelogin(args?: string): boolean {
|
||||
const normalized = (args ?? '').trim().toLowerCase()
|
||||
if (!normalized) {
|
||||
return false
|
||||
}
|
||||
return normalized.split(/\s+/).some(arg => FORCE_RELOGIN_ARGS.has(arg))
|
||||
}
|
||||
|
||||
const GITHUB_PAT_PREFIXES = ['ghp_', 'gho_','ghs_', 'ghr_', 'github_pat_']
|
||||
|
||||
function isGithubPat(token: string): boolean {
|
||||
return GITHUB_PAT_PREFIXES.some(prefix => token.startsWith(prefix))
|
||||
}
|
||||
|
||||
export function hasExistingGithubModelsLoginToken(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
storedToken?: string,
|
||||
): boolean {
|
||||
const envToken = env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()
|
||||
if (envToken) {
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (isGithubPat(envToken)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
const persisted = (storedToken ?? readGithubModelsToken())?.trim()
|
||||
// PATs are no longer supported - require OAuth re-auth
|
||||
if (persisted && isGithubPat(persisted)) {
|
||||
return false
|
||||
}
|
||||
return Boolean(persisted)
|
||||
}
|
||||
|
||||
export function buildGithubOnboardingSettingsEnv(
|
||||
model: string,
|
||||
): Record<string, string | undefined> {
|
||||
return {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: model,
|
||||
OPENAI_API_KEY: undefined,
|
||||
OPENAI_ORG: undefined,
|
||||
OPENAI_PROJECT: undefined,
|
||||
OPENAI_ORGANIZATION: undefined,
|
||||
OPENAI_BASE_URL: undefined,
|
||||
OPENAI_API_BASE: undefined,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined,
|
||||
}
|
||||
}
|
||||
|
||||
export function applyGithubOnboardingProcessEnv(
|
||||
model: string,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): void {
|
||||
env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
env.OPENAI_MODEL = model
|
||||
|
||||
delete env.OPENAI_API_KEY
|
||||
delete env.OPENAI_ORG
|
||||
delete env.OPENAI_PROJECT
|
||||
delete env.OPENAI_ORGANIZATION
|
||||
delete env.OPENAI_BASE_URL
|
||||
delete env.OPENAI_API_BASE
|
||||
|
||||
delete env.CLAUDE_CODE_USE_OPENAI
|
||||
delete env.CLAUDE_CODE_USE_GEMINI
|
||||
delete env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete env.CLAUDE_CODE_USE_VERTEX
|
||||
delete env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
}
|
||||
|
||||
function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
const currentSettings = getSettingsForSource('userSettings')
|
||||
const currentEnv = currentSettings?.env ?? {}
|
||||
|
||||
const newEnv: Record<string, string> = {}
|
||||
for (const [key, value] of Object.entries(currentEnv)) {
|
||||
if (!PROVIDER_SPECIFIC_KEYS.has(key)) {
|
||||
newEnv[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
newEnv.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
newEnv.OPENAI_MODEL = model
|
||||
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: model,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
env: newEnv,
|
||||
})
|
||||
if (error) {
|
||||
return { ok: false, detail: error.message }
|
||||
@@ -42,6 +147,32 @@ function mergeUserSettingsEnv(model: string): { ok: boolean; detail?: string } {
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
export function activateGithubOnboardingMode(
|
||||
model: string = DEFAULT_MODEL,
|
||||
options?: {
|
||||
mergeSettingsEnv?: (model: string) => { ok: boolean; detail?: string }
|
||||
applyProcessEnv?: (model: string) => void
|
||||
hydrateToken?: () => void
|
||||
onChangeAPIKey?: () => void
|
||||
},
|
||||
): { ok: boolean; detail?: string } {
|
||||
const normalizedModel = model.trim() || DEFAULT_MODEL
|
||||
const mergeSettingsEnv = options?.mergeSettingsEnv ?? mergeUserSettingsEnv
|
||||
const applyProcessEnv = options?.applyProcessEnv ?? applyGithubOnboardingProcessEnv
|
||||
const hydrateToken =
|
||||
options?.hydrateToken ?? hydrateGithubModelsTokenFromSecureStorage
|
||||
|
||||
const merged = mergeSettingsEnv(normalizedModel)
|
||||
if (!merged.ok) {
|
||||
return merged
|
||||
}
|
||||
|
||||
applyProcessEnv(normalizedModel)
|
||||
hydrateToken()
|
||||
options?.onChangeAPIKey?.()
|
||||
return { ok: true }
|
||||
}
|
||||
|
||||
function OnboardGithub(props: {
|
||||
onDone: Parameters<LocalJSXCommandCall>[0]
|
||||
onChangeAPIKey: () => void
|
||||
@@ -53,32 +184,42 @@ function OnboardGithub(props: {
|
||||
user_code: string
|
||||
verification_uri: string
|
||||
} | null>(null)
|
||||
const [patDraft, setPatDraft] = useState('')
|
||||
const [cursorOffset, setCursorOffset] = useState(0)
|
||||
|
||||
const finalize = useCallback(
|
||||
async (token: string, model: string = DEFAULT_MODEL) => {
|
||||
const saved = saveGithubModelsToken(token)
|
||||
async (
|
||||
token: string,
|
||||
model: string = DEFAULT_MODEL,
|
||||
oauthToken?: string,
|
||||
) => {
|
||||
const saved = saveGithubModelsToken(token, oauthToken)
|
||||
if (!saved.success) {
|
||||
setErrorMsg(saved.warning ?? 'Could not save token to secure storage.')
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
const merged = mergeUserSettingsEnv(model.trim() || DEFAULT_MODEL)
|
||||
if (!merged.ok) {
|
||||
const activated = activateGithubOnboardingMode(model, {
|
||||
onChangeAPIKey,
|
||||
})
|
||||
if (!activated.ok) {
|
||||
setErrorMsg(
|
||||
`Token saved, but settings were not updated: ${merged.detail ?? 'unknown error'}. ` +
|
||||
`Token saved, but settings were not updated: ${activated.detail ?? 'unknown error'}. ` +
|
||||
`Add env CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL to ~/.claude/settings.json manually.`,
|
||||
)
|
||||
setStep('error')
|
||||
return
|
||||
}
|
||||
// Clear stale provider-specific env vars from the current session
|
||||
// so resolveProviderRequest() doesn't pick up a previous provider's
|
||||
// base URL or key after onboarding completes.
|
||||
for (const key of PROVIDER_SPECIFIC_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = model.trim() || DEFAULT_MODEL
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
onChangeAPIKey()
|
||||
onDone(
|
||||
'GitHub Models onboard complete. Token stored in secure storage; user settings updated. Restart if the model does not switch.',
|
||||
'GitHub Copilot onboard complete. Copilot token and OAuth token stored in secure storage (Windows/Linux: ~/.claude/.credentials.json, macOS: Keychain fallback to ~/.claude/.credentials.json); user settings updated. Restart if the model does not switch.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
},
|
||||
@@ -96,11 +237,12 @@ function OnboardGithub(props: {
|
||||
verification_uri: device.verification_uri,
|
||||
})
|
||||
await openVerificationUri(device.verification_uri)
|
||||
const token = await pollAccessToken(device.device_code, {
|
||||
const oauthToken = await pollAccessToken(device.device_code, {
|
||||
initialInterval: device.interval,
|
||||
timeoutSeconds: device.expires_in,
|
||||
})
|
||||
await finalize(token, DEFAULT_MODEL)
|
||||
const copilotToken = await exchangeForCopilotToken(oauthToken)
|
||||
await finalize(copilotToken.token, DEFAULT_MODEL, oauthToken)
|
||||
} catch (e) {
|
||||
setErrorMsg(e instanceof Error ? e.message : String(e))
|
||||
setStep('error')
|
||||
@@ -139,7 +281,7 @@ function OnboardGithub(props: {
|
||||
if (step === 'device-busy') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>GitHub device login</Text>
|
||||
<Text>GitHub Copilot sign-in</Text>
|
||||
{deviceHint ? (
|
||||
<>
|
||||
<Text>
|
||||
@@ -147,54 +289,22 @@ function OnboardGithub(props: {
|
||||
{deviceHint.verification_uri}
|
||||
</Text>
|
||||
<Text dimColor>
|
||||
A browser window may have opened. Waiting for authorization…
|
||||
A browser window may have opened. Waiting for authorization...
|
||||
</Text>
|
||||
</>
|
||||
) : (
|
||||
<Text dimColor>Requesting device code from GitHub…</Text>
|
||||
<Text dimColor>Requesting device code from GitHub...</Text>
|
||||
)}
|
||||
<Spinner />
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
if (step === 'pat') {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text>Paste a GitHub personal access token with access to GitHub Models.</Text>
|
||||
<Text dimColor>Input is masked. Enter to submit; Esc to go back.</Text>
|
||||
<TextInput
|
||||
value={patDraft}
|
||||
mask="*"
|
||||
onChange={setPatDraft}
|
||||
onSubmit={async (value: string) => {
|
||||
const t = value.trim()
|
||||
if (!t) {
|
||||
return
|
||||
}
|
||||
await finalize(t, DEFAULT_MODEL)
|
||||
}}
|
||||
onExit={() => {
|
||||
setStep('menu')
|
||||
setPatDraft('')
|
||||
}}
|
||||
columns={80}
|
||||
cursorOffset={cursorOffset}
|
||||
onChangeCursorOffset={setCursorOffset}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
const menuOptions = [
|
||||
{
|
||||
label: 'Sign in with browser (device code)',
|
||||
label: 'Sign in with browser',
|
||||
value: 'device' as const,
|
||||
},
|
||||
{
|
||||
label: 'Paste personal access token',
|
||||
value: 'pat' as const,
|
||||
},
|
||||
{
|
||||
label: 'Cancel',
|
||||
value: 'cancel' as const,
|
||||
@@ -203,10 +313,10 @@ function OnboardGithub(props: {
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text bold>GitHub Models setup</Text>
|
||||
<Text bold>GitHub Copilot setup</Text>
|
||||
<Text dimColor>
|
||||
Stores your token in the OS credential store (macOS Keychain when available)
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings — no export
|
||||
and enables CLAUDE_CODE_USE_GITHUB in your user settings - no export
|
||||
GITHUB_TOKEN needed for future runs.
|
||||
</Text>
|
||||
<Select
|
||||
@@ -216,10 +326,6 @@ function OnboardGithub(props: {
|
||||
onDone('GitHub onboard cancelled', { display: 'system' })
|
||||
return
|
||||
}
|
||||
if (v === 'pat') {
|
||||
setStep('pat')
|
||||
return
|
||||
}
|
||||
void runDeviceFlow()
|
||||
}}
|
||||
/>
|
||||
@@ -227,7 +333,28 @@ function OnboardGithub(props: {
|
||||
)
|
||||
}
|
||||
|
||||
export const call: LocalJSXCommandCall = async (onDone, context) => {
|
||||
export const call: LocalJSXCommandCall = async (onDone, context, args) => {
|
||||
const forceRelogin = shouldForceGithubRelogin(args)
|
||||
if (hasExistingGithubModelsLoginToken() && !forceRelogin) {
|
||||
const activated = activateGithubOnboardingMode(DEFAULT_MODEL, {
|
||||
onChangeAPIKey: context.onChangeAPIKey,
|
||||
})
|
||||
if (!activated.ok) {
|
||||
onDone(
|
||||
`GitHub token detected, but settings activation failed: ${activated.detail ?? 'unknown error'}. ` +
|
||||
'Set CLAUDE_CODE_USE_GITHUB=1 and OPENAI_MODEL=github:copilot in user settings manually.',
|
||||
{ display: 'system' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
onDone(
|
||||
'GitHub Models already authorized. Activated GitHub Models mode using your existing token. Use /onboard-github --force to re-authenticate.',
|
||||
{ display: 'user' },
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<OnboardGithub
|
||||
onDone={onDone}
|
||||
|
||||
@@ -52,7 +52,11 @@ async function renderFinalFrame(node: React.ReactNode): Promise<string> {
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
await instance.waitUntilExit()
|
||||
// Timeout guard: if render throws before exit effect fires, don't hang
|
||||
await Promise.race([
|
||||
instance.waitUntilExit(),
|
||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
||||
])
|
||||
return stripAnsi(extractLastFrame(getOutput()))
|
||||
}
|
||||
|
||||
@@ -275,6 +279,21 @@ test('buildCurrentProviderSummary does not relabel local gpt-5.4 providers as Co
|
||||
expect(summary.endpointLabel).toBe('http://127.0.0.1:8080/v1')
|
||||
})
|
||||
|
||||
test('buildCurrentProviderSummary recognizes GitHub Models mode', () => {
|
||||
const summary = buildCurrentProviderSummary({
|
||||
processEnv: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: 'github:copilot',
|
||||
OPENAI_BASE_URL: 'https://models.github.ai/inference',
|
||||
},
|
||||
persisted: null,
|
||||
})
|
||||
|
||||
expect(summary.providerLabel).toBe('GitHub Models')
|
||||
expect(summary.modelLabel).toBe('github:copilot')
|
||||
expect(summary.endpointLabel).toBe('https://models.github.ai/inference')
|
||||
})
|
||||
|
||||
test('getProviderWizardDefaults ignores poisoned current provider values', () => {
|
||||
const defaults = getProviderWizardDefaults({
|
||||
OPENAI_API_KEY: 'sk-secret-12345678',
|
||||
|
||||
@@ -178,6 +178,23 @@ export function buildCurrentProviderSummary(options?: {
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return {
|
||||
providerLabel: 'GitHub Models',
|
||||
modelLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_MODEL ?? 'github:copilot',
|
||||
processEnv,
|
||||
),
|
||||
endpointLabel: getSafeDisplayValue(
|
||||
processEnv.OPENAI_BASE_URL ??
|
||||
processEnv.OPENAI_API_BASE ??
|
||||
'https://models.github.ai/inference',
|
||||
processEnv,
|
||||
),
|
||||
savedProfileLabel,
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_OPENAI)) {
|
||||
const request = resolveProviderRequest({
|
||||
model: processEnv.OPENAI_MODEL,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import { useRegisterOverlay } from '../../context/overlayContext.js'
|
||||
import type { InputEvent } from '../../ink/events/input-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- raw space/arrow multiselect input
|
||||
@@ -9,6 +8,7 @@ import {
|
||||
normalizeFullWidthSpace,
|
||||
} from '../../utils/stringUtils.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
import { optionsNavigateEqual } from './use-select-navigation.js'
|
||||
import { useSelectNavigation } from './use-select-navigation.js'
|
||||
|
||||
export type UseMultiSelectStateProps<T> = {
|
||||
@@ -174,7 +174,7 @@ export function useMultiSelectState<T>({
|
||||
// and the deleted ui/useMultiSelectState.ts — without this, MCPServerDesktopImportDialog
|
||||
// keeps colliding servers checked after getAllMcpConfigs() resolves.
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
setSelectedValues(defaultValue)
|
||||
setLastOptions(options)
|
||||
}
|
||||
|
||||
@@ -6,10 +6,34 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import { isDeepStrictEqual } from 'util'
|
||||
import OptionMap from './option-map.js'
|
||||
import type { OptionWithDescription } from './select.js'
|
||||
|
||||
/**
|
||||
* Compare two option arrays for structural equality on properties that
|
||||
* affect navigation behavior. ReactNode `label` and function `onChange`
|
||||
* are intentionally excluded — they are identity-unstable (new reference
|
||||
* each render) but don't change navigation semantics.
|
||||
*/
|
||||
export function optionsNavigateEqual<T>(
|
||||
a: OptionWithDescription<T>[],
|
||||
b: OptionWithDescription<T>[],
|
||||
): boolean {
|
||||
if (a.length !== b.length) return false
|
||||
for (let i = 0; i < a.length; i++) {
|
||||
const ao = a[i]!
|
||||
const bo = b[i]!
|
||||
if (
|
||||
ao.value !== bo.value ||
|
||||
ao.disabled !== bo.disabled ||
|
||||
ao.type !== bo.type
|
||||
) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type State<T> = {
|
||||
/**
|
||||
* Map where key is option's value and value is option's index.
|
||||
@@ -524,7 +548,7 @@ export function useSelectNavigation<T>({
|
||||
|
||||
const [lastOptions, setLastOptions] = useState(options)
|
||||
|
||||
if (options !== lastOptions && !isDeepStrictEqual(options, lastOptions)) {
|
||||
if (options !== lastOptions && !optionsNavigateEqual(options, lastOptions)) {
|
||||
dispatch({
|
||||
type: 'reset',
|
||||
state: createDefaultState({
|
||||
|
||||
@@ -112,7 +112,7 @@ export function HelpV2(t0) {
|
||||
}
|
||||
tabs.push(t6);
|
||||
if (false && antOnlyCommands.length > 0) {
|
||||
let t7;
|
||||
let t7;
|
||||
if ($[26] !== antOnlyCommands || $[27] !== close || $[28] !== columns || $[29] !== maxHeight) {
|
||||
t7 = <Tab key="internal-only" title="[internal-only]"><Commands commands={antOnlyCommands} maxHeight={maxHeight} columns={columns} title="Browse internal-only commands:" onCancel={close} /></Tab>;
|
||||
$[26] = antOnlyCommands;
|
||||
|
||||
305
src/components/ProviderManager.test.tsx
Normal file
305
src/components/ProviderManager.test.tsx
Normal file
@@ -0,0 +1,305 @@
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
import { afterEach, expect, mock, test } from 'bun:test'
|
||||
import React from 'react'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
|
||||
import { createRoot } from '../ink.js'
|
||||
import { AppStateProvider } from '../state/AppState.js'
|
||||
|
||||
const SYNC_START = '\x1B[?2026h'
|
||||
const SYNC_END = '\x1B[?2026l'
|
||||
|
||||
const ORIGINAL_ENV = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
}
|
||||
|
||||
function extractLastFrame(output: string): string {
|
||||
let lastFrame: string | null = null
|
||||
let cursor = 0
|
||||
|
||||
while (cursor < output.length) {
|
||||
const start = output.indexOf(SYNC_START, cursor)
|
||||
if (start === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const contentStart = start + SYNC_START.length
|
||||
const end = output.indexOf(SYNC_END, contentStart)
|
||||
if (end === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const frame = output.slice(contentStart, end)
|
||||
if (frame.trim().length > 0) {
|
||||
lastFrame = frame
|
||||
}
|
||||
cursor = end + SYNC_END.length
|
||||
}
|
||||
|
||||
return lastFrame ?? output
|
||||
}
|
||||
|
||||
function createTestStreams(): {
|
||||
stdout: PassThrough
|
||||
stdin: PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
getOutput: () => string
|
||||
} {
|
||||
let output = ''
|
||||
const stdout = new PassThrough()
|
||||
const stdin = new PassThrough() as PassThrough & {
|
||||
isTTY: boolean
|
||||
setRawMode: (mode: boolean) => void
|
||||
ref: () => void
|
||||
unref: () => void
|
||||
}
|
||||
|
||||
stdin.isTTY = true
|
||||
stdin.setRawMode = () => {}
|
||||
stdin.ref = () => {}
|
||||
stdin.unref = () => {}
|
||||
;(stdout as unknown as { columns: number }).columns = 120
|
||||
stdout.on('data', chunk => {
|
||||
output += chunk.toString()
|
||||
})
|
||||
|
||||
return {
|
||||
stdout,
|
||||
stdin,
|
||||
getOutput: () => output,
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForCondition(
|
||||
predicate: () => boolean,
|
||||
options?: { timeoutMs?: number; intervalMs?: number },
|
||||
): Promise<void> {
|
||||
const timeoutMs = options?.timeoutMs ?? 2000
|
||||
const intervalMs = options?.intervalMs ?? 10
|
||||
const startedAt = Date.now()
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
if (predicate()) {
|
||||
return
|
||||
}
|
||||
await Bun.sleep(intervalMs)
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for ProviderManager test condition')
|
||||
}
|
||||
|
||||
function createDeferred<T>(): {
|
||||
promise: Promise<T>
|
||||
resolve: (value: T) => void
|
||||
} {
|
||||
let resolve!: (value: T) => void
|
||||
const promise = new Promise<T>(r => {
|
||||
resolve = r
|
||||
})
|
||||
return { promise, resolve }
|
||||
}
|
||||
|
||||
function mockProviderProfilesModule(): void {
|
||||
mock.module('../utils/providerProfiles.js', () => ({
|
||||
addProviderProfile: () => null,
|
||||
applyActiveProviderProfileFromConfig: () => {},
|
||||
deleteProviderProfile: () => ({ removed: false, activeProfileId: null }),
|
||||
getActiveProviderProfile: () => null,
|
||||
getProviderPresetDefaults: () => ({
|
||||
provider: 'openai',
|
||||
name: 'Mock provider',
|
||||
baseUrl: 'http://localhost:11434/v1',
|
||||
model: 'mock-model',
|
||||
apiKey: '',
|
||||
}),
|
||||
getProviderProfiles: () => [],
|
||||
setActiveProviderProfile: () => null,
|
||||
updateProviderProfile: () => null,
|
||||
}))
|
||||
}
|
||||
|
||||
function mockProviderManagerDependencies(
|
||||
syncRead: () => string | undefined,
|
||||
asyncRead: () => Promise<string | undefined>,
|
||||
): void {
|
||||
mockProviderProfilesModule()
|
||||
|
||||
mock.module('../utils/githubModelsCredentials.js', () => ({
|
||||
clearGithubModelsToken: () => ({ success: true }),
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER: 'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED',
|
||||
hydrateGithubModelsTokenFromSecureStorage: () => {},
|
||||
readGithubModelsToken: syncRead,
|
||||
readGithubModelsTokenAsync: asyncRead,
|
||||
}))
|
||||
|
||||
mock.module('../utils/settings/settings.js', () => ({
|
||||
updateSettingsForSource: () => ({ error: null }),
|
||||
}))
|
||||
}
|
||||
|
||||
async function waitForFrameOutput(
|
||||
getOutput: () => string,
|
||||
predicate: (output: string) => boolean,
|
||||
timeoutMs = 2500,
|
||||
): Promise<string> {
|
||||
let output = ''
|
||||
|
||||
await waitForCondition(() => {
|
||||
output = stripAnsi(extractLastFrame(getOutput()))
|
||||
return predicate(output)
|
||||
}, { timeoutMs })
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
async function mountProviderManager(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
}>,
|
||||
): Promise<{
|
||||
getOutput: () => string
|
||||
dispose: () => Promise<void>
|
||||
}> {
|
||||
const { stdout, stdin, getOutput } = createTestStreams()
|
||||
const root = await createRoot({
|
||||
stdout: stdout as unknown as NodeJS.WriteStream,
|
||||
stdin: stdin as unknown as NodeJS.ReadStream,
|
||||
patchConsole: false,
|
||||
})
|
||||
|
||||
root.render(
|
||||
<AppStateProvider>
|
||||
<ProviderManager
|
||||
mode="manage"
|
||||
onDone={() => {}}
|
||||
/>
|
||||
</AppStateProvider>,
|
||||
)
|
||||
|
||||
return {
|
||||
getOutput,
|
||||
dispose: async () => {
|
||||
root.unmount()
|
||||
stdin.end()
|
||||
stdout.end()
|
||||
await Bun.sleep(0)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function renderProviderManagerFrame(
|
||||
ProviderManager: React.ComponentType<{
|
||||
mode: 'first-run' | 'manage'
|
||||
onDone: () => void
|
||||
}>,
|
||||
options?: {
|
||||
waitForOutput?: (output: string) => boolean
|
||||
timeoutMs?: number
|
||||
},
|
||||
): Promise<string> {
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
const output = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => {
|
||||
if (!options?.waitForOutput) {
|
||||
return frame.includes('Provider manager')
|
||||
}
|
||||
return options.waitForOutput(frame)
|
||||
},
|
||||
options?.timeoutMs ?? 2500,
|
||||
)
|
||||
|
||||
await mounted.dispose()
|
||||
return output
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
|
||||
for (const [key, value] of Object.entries(ORIGINAL_ENV)) {
|
||||
if (value === undefined) {
|
||||
delete process.env[key as keyof typeof ORIGINAL_ENV]
|
||||
} else {
|
||||
process.env[key as keyof typeof ORIGINAL_ENV] = value
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test('ProviderManager resolves GitHub virtual provider from async storage without sync reads in render flow', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const asyncRead = mock(async () => 'stored-token')
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const output = await renderProviderManagerFrame(ProviderManager, {
|
||||
waitForOutput: frame =>
|
||||
frame.includes('Provider manager') &&
|
||||
frame.includes('GitHub Models') &&
|
||||
frame.includes('token stored'),
|
||||
})
|
||||
|
||||
expect(output).toContain('Provider manager')
|
||||
expect(output).toContain('GitHub Models')
|
||||
expect(output).toContain('token stored')
|
||||
expect(output).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const syncRead = mock(() => {
|
||||
throw new Error('sync credential read should not run in ProviderManager render flow')
|
||||
})
|
||||
const deferredStoredToken = createDeferred<string | undefined>()
|
||||
const asyncRead = mock(async () => deferredStoredToken.promise)
|
||||
|
||||
mockProviderManagerDependencies(syncRead, asyncRead)
|
||||
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`)
|
||||
const mounted = await mountProviderManager(ProviderManager)
|
||||
|
||||
const firstFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('Provider manager'),
|
||||
)
|
||||
|
||||
expect(firstFrame).toContain('Checking GitHub Models credentials...')
|
||||
expect(firstFrame).not.toContain('No provider profiles configured yet.')
|
||||
|
||||
deferredStoredToken.resolve('stored-token')
|
||||
|
||||
const resolvedFrame = await waitForFrameOutput(
|
||||
mounted.getOutput,
|
||||
frame => frame.includes('GitHub Models') && frame.includes('token stored'),
|
||||
)
|
||||
|
||||
expect(resolvedFrame).toContain('GitHub Models')
|
||||
expect(resolvedFrame).toContain('token stored')
|
||||
|
||||
await mounted.dispose()
|
||||
|
||||
expect(syncRead).not.toHaveBeenCalled()
|
||||
expect(asyncRead).toHaveBeenCalled()
|
||||
})
|
||||
@@ -5,6 +5,7 @@ import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { ProviderProfile } from '../utils/config.js'
|
||||
import {
|
||||
addProviderProfile,
|
||||
applyActiveProviderProfileFromConfig,
|
||||
deleteProviderProfile,
|
||||
getActiveProviderProfile,
|
||||
getProviderPresetDefaults,
|
||||
@@ -14,6 +15,15 @@ import {
|
||||
type ProviderProfileInput,
|
||||
updateProviderProfile,
|
||||
} from '../utils/providerProfiles.js'
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
GITHUB_MODELS_HYDRATED_ENV_MARKER,
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
readGithubModelsToken,
|
||||
readGithubModelsTokenAsync,
|
||||
} from '../utils/githubModelsCredentials.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
import { Select } from './CustomSelect/index.js'
|
||||
import { Pane } from './design-system/Pane.js'
|
||||
import TextInput from './TextInput.js'
|
||||
@@ -75,6 +85,13 @@ const FORM_STEPS: Array<{
|
||||
},
|
||||
]
|
||||
|
||||
const GITHUB_PROVIDER_ID = '__github_models__'
|
||||
const GITHUB_PROVIDER_LABEL = 'GitHub Models'
|
||||
const GITHUB_PROVIDER_DEFAULT_MODEL = 'github:copilot'
|
||||
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://models.github.ai/inference'
|
||||
|
||||
type GithubCredentialSource = 'stored' | 'env' | 'none'
|
||||
|
||||
function toDraft(profile: ProviderProfile): ProviderDraft {
|
||||
return {
|
||||
name: profile.name,
|
||||
@@ -102,11 +119,83 @@ function profileSummary(profile: ProviderProfile, isActive: boolean): string {
|
||||
return `${providerKind} · ${profile.baseUrl} · ${profile.model} · ${keyInfo}${activeSuffix}`
|
||||
}
|
||||
|
||||
function getGithubCredentialSourceFromEnv(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): GithubCredentialSource {
|
||||
if (processEnv.GITHUB_TOKEN?.trim() || processEnv.GH_TOKEN?.trim()) {
|
||||
return 'env'
|
||||
}
|
||||
return 'none'
|
||||
}
|
||||
|
||||
async function resolveGithubCredentialSource(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): Promise<GithubCredentialSource> {
|
||||
const envSource = getGithubCredentialSourceFromEnv(processEnv)
|
||||
if (envSource !== 'none') {
|
||||
return envSource
|
||||
}
|
||||
|
||||
if (await readGithubModelsTokenAsync()) {
|
||||
return 'stored'
|
||||
}
|
||||
|
||||
return 'none'
|
||||
}
|
||||
|
||||
function isGithubProviderAvailable(
|
||||
credentialSource: GithubCredentialSource,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): boolean {
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return true
|
||||
}
|
||||
return credentialSource !== 'none'
|
||||
}
|
||||
|
||||
function getGithubProviderModel(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return processEnv.OPENAI_MODEL?.trim() || GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
}
|
||||
return GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
}
|
||||
|
||||
function getGithubProviderSummary(
|
||||
isActive: boolean,
|
||||
credentialSource: GithubCredentialSource,
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
const credentialSummary =
|
||||
credentialSource === 'stored'
|
||||
? 'token stored'
|
||||
: credentialSource === 'env'
|
||||
? 'token via env'
|
||||
: 'no token found'
|
||||
const activeSuffix = isActive ? ' (active)' : ''
|
||||
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
|
||||
}
|
||||
|
||||
export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const initialGithubCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const initialIsGithubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const initialHasGithubCredential = initialGithubCredentialSource !== 'none'
|
||||
|
||||
const [profiles, setProfiles] = React.useState(() => getProviderProfiles())
|
||||
const [activeProfileId, setActiveProfileId] = React.useState(
|
||||
() => getActiveProviderProfile()?.id,
|
||||
)
|
||||
const [githubProviderAvailable, setGithubProviderAvailable] = React.useState(
|
||||
() => isGithubProviderAvailable(initialGithubCredentialSource),
|
||||
)
|
||||
const [githubCredentialSource, setGithubCredentialSource] = React.useState<GithubCredentialSource>(
|
||||
() => initialGithubCredentialSource,
|
||||
)
|
||||
const [isGithubActive, setIsGithubActive] = React.useState(() => initialIsGithubActive)
|
||||
const [isGithubCredentialSourceResolved, setIsGithubCredentialSourceResolved] =
|
||||
React.useState(() => initialHasGithubCredential || initialIsGithubActive)
|
||||
const githubRefreshEpochRef = React.useRef(0)
|
||||
const [screen, setScreen] = React.useState<Screen>(
|
||||
mode === 'first-run' ? 'select-preset' : 'menu',
|
||||
)
|
||||
@@ -126,16 +215,155 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
const currentStepKey = currentStep.key
|
||||
const currentValue = draft[currentStepKey]
|
||||
|
||||
const refreshGithubProviderState = React.useCallback((): void => {
|
||||
const envCredentialSource = getGithubCredentialSourceFromEnv()
|
||||
const githubActive = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
const canResolveFromEnv = githubActive || envCredentialSource !== 'none'
|
||||
|
||||
if (canResolveFromEnv) {
|
||||
githubRefreshEpochRef.current += 1
|
||||
setGithubCredentialSource(envCredentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(envCredentialSource))
|
||||
setIsGithubActive(githubActive)
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
return
|
||||
}
|
||||
|
||||
setIsGithubCredentialSourceResolved(false)
|
||||
const refreshEpoch = ++githubRefreshEpochRef.current
|
||||
void (async () => {
|
||||
const credentialSource = await resolveGithubCredentialSource()
|
||||
if (refreshEpoch !== githubRefreshEpochRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
setGithubCredentialSource(credentialSource)
|
||||
setGithubProviderAvailable(isGithubProviderAvailable(credentialSource))
|
||||
setIsGithubActive(isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB))
|
||||
setIsGithubCredentialSourceResolved(true)
|
||||
})()
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
refreshGithubProviderState()
|
||||
|
||||
return () => {
|
||||
githubRefreshEpochRef.current += 1
|
||||
}
|
||||
}, [refreshGithubProviderState])
|
||||
|
||||
function refreshProfiles(): void {
|
||||
const nextProfiles = getProviderProfiles()
|
||||
setProfiles(nextProfiles)
|
||||
setActiveProfileId(getActiveProviderProfile()?.id)
|
||||
refreshGithubProviderState()
|
||||
}
|
||||
|
||||
function clearStartupProviderOverrideFromUserSettings(): string | null {
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_GITHUB: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
})
|
||||
return error ? error.message : null
|
||||
}
|
||||
|
||||
function closeWithCancelled(message: string): void {
|
||||
onDone({ action: 'cancelled', message })
|
||||
}
|
||||
|
||||
function activateGithubProvider(): string | null {
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: '1',
|
||||
OPENAI_MODEL: GITHUB_PROVIDER_DEFAULT_MODEL,
|
||||
OPENAI_API_KEY: undefined as any,
|
||||
OPENAI_ORG: undefined as any,
|
||||
OPENAI_PROJECT: undefined as any,
|
||||
OPENAI_ORGANIZATION: undefined as any,
|
||||
OPENAI_BASE_URL: undefined as any,
|
||||
OPENAI_API_BASE: undefined as any,
|
||||
CLAUDE_CODE_USE_OPENAI: undefined as any,
|
||||
CLAUDE_CODE_USE_GEMINI: undefined as any,
|
||||
CLAUDE_CODE_USE_BEDROCK: undefined as any,
|
||||
CLAUDE_CODE_USE_VERTEX: undefined as any,
|
||||
CLAUDE_CODE_USE_FOUNDRY: undefined as any,
|
||||
},
|
||||
})
|
||||
if (error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = GITHUB_PROVIDER_DEFAULT_MODEL
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_ORG
|
||||
delete process.env.OPENAI_PROJECT
|
||||
delete process.env.OPENAI_ORGANIZATION
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
return null
|
||||
}
|
||||
|
||||
function deleteGithubProvider(): string | null {
|
||||
const storedTokenBeforeClear = readGithubModelsToken()?.trim()
|
||||
const cleared = clearGithubModelsToken()
|
||||
if (!cleared.success) {
|
||||
return cleared.warning ?? 'Could not clear GitHub credentials.'
|
||||
}
|
||||
|
||||
const { error } = updateSettingsForSource('userSettings', {
|
||||
env: {
|
||||
CLAUDE_CODE_USE_GITHUB: undefined as any,
|
||||
OPENAI_MODEL: undefined as any,
|
||||
OPENAI_BASE_URL: undefined as any,
|
||||
OPENAI_API_BASE: undefined as any,
|
||||
},
|
||||
})
|
||||
if (error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
const hydratedTokenInSession = process.env.GITHUB_TOKEN?.trim()
|
||||
if (
|
||||
process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER] === '1' &&
|
||||
hydratedTokenInSession &&
|
||||
(!storedTokenBeforeClear || hydratedTokenInSession === storedTokenBeforeClear)
|
||||
) {
|
||||
delete process.env.GITHUB_TOKEN
|
||||
}
|
||||
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.OPENAI_API_KEY
|
||||
delete process.env.OPENAI_ORG
|
||||
delete process.env.OPENAI_PROJECT
|
||||
delete process.env.OPENAI_ORGANIZATION
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
|
||||
// Restore active provider profile immediately when one exists.
|
||||
applyActiveProviderProfileFromConfig()
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
function startCreateFromPreset(preset: ProviderPreset): void {
|
||||
const defaults = getProviderPresetDefaults(preset)
|
||||
const nextDraft = {
|
||||
@@ -187,11 +415,20 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
return
|
||||
}
|
||||
|
||||
const isActiveSavedProfile = getActiveProviderProfile()?.id === saved.id
|
||||
const settingsOverrideError = isActiveSavedProfile
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
|
||||
refreshProfiles()
|
||||
setStatusMessage(
|
||||
const successMessage =
|
||||
editingProfileId
|
||||
? `Updated provider: ${saved.name}`
|
||||
: `Added provider: ${saved.name} (now active)`,
|
||||
: `Added provider: ${saved.name} (now active)`
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `${successMessage}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: successMessage,
|
||||
)
|
||||
|
||||
if (mode === 'first-run') {
|
||||
@@ -413,6 +650,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
|
||||
function renderMenu(): React.ReactNode {
|
||||
const hasProfiles = profiles.length > 0
|
||||
const hasSelectableProviders = hasProfiles || githubProviderAvailable
|
||||
|
||||
const options = [
|
||||
{
|
||||
@@ -424,7 +662,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
value: 'activate',
|
||||
label: 'Set active provider',
|
||||
description: 'Switch the active provider profile',
|
||||
disabled: !hasProfiles,
|
||||
disabled: !hasSelectableProviders,
|
||||
},
|
||||
{
|
||||
value: 'edit',
|
||||
@@ -436,7 +674,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
value: 'delete',
|
||||
label: 'Delete provider',
|
||||
description: 'Remove a provider profile',
|
||||
disabled: !hasProfiles,
|
||||
disabled: !hasSelectableProviders,
|
||||
},
|
||||
{
|
||||
value: 'done',
|
||||
@@ -455,14 +693,29 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
</Text>
|
||||
{statusMessage && <Text>{statusMessage}</Text>}
|
||||
<Box flexDirection="column">
|
||||
{profiles.length === 0 ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
{profiles.length === 0 && !githubProviderAvailable ? (
|
||||
isGithubCredentialSourceResolved ? (
|
||||
<Text dimColor>No provider profiles configured yet.</Text>
|
||||
) : (
|
||||
<Text dimColor>Checking GitHub Models credentials...</Text>
|
||||
)
|
||||
) : (
|
||||
profiles.map(profile => (
|
||||
<Text key={profile.id} dimColor>
|
||||
- {profile.name}: {profileSummary(profile, profile.id === activeProfileId)}
|
||||
</Text>
|
||||
))
|
||||
<>
|
||||
{profiles.map(profile => (
|
||||
<Text key={profile.id} dimColor>
|
||||
- {profile.name}: {profileSummary(profile, profile.id === activeProfileId)}
|
||||
</Text>
|
||||
))}
|
||||
{githubProviderAvailable ? (
|
||||
<Text dimColor>
|
||||
- {GITHUB_PROVIDER_LABEL}:{' '}
|
||||
{getGithubProviderSummary(
|
||||
isGithubActive,
|
||||
githubCredentialSource,
|
||||
)}
|
||||
</Text>
|
||||
) : null}
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
<Select
|
||||
@@ -474,7 +727,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
setScreen('select-preset')
|
||||
break
|
||||
case 'activate':
|
||||
if (profiles.length > 0) {
|
||||
if (hasSelectableProviders) {
|
||||
setScreen('select-active')
|
||||
}
|
||||
break
|
||||
@@ -484,7 +737,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
}
|
||||
break
|
||||
case 'delete':
|
||||
if (profiles.length > 0) {
|
||||
if (hasSelectableProviders) {
|
||||
setScreen('select-delete')
|
||||
}
|
||||
break
|
||||
@@ -504,8 +757,29 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
title: string,
|
||||
emptyMessage: string,
|
||||
onSelect: (profileId: string) => void,
|
||||
options?: { includeGithub?: boolean },
|
||||
): React.ReactNode {
|
||||
if (profiles.length === 0) {
|
||||
const includeGithub = options?.includeGithub ?? false
|
||||
const selectOptions = profiles.map(profile => ({
|
||||
value: profile.id,
|
||||
label:
|
||||
profile.id === activeProfileId
|
||||
? `${profile.name} (active)`
|
||||
: profile.name,
|
||||
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
|
||||
}))
|
||||
|
||||
if (includeGithub && githubProviderAvailable) {
|
||||
selectOptions.push({
|
||||
value: GITHUB_PROVIDER_ID,
|
||||
label: isGithubActive
|
||||
? `${GITHUB_PROVIDER_LABEL} (active)`
|
||||
: GITHUB_PROVIDER_LABEL,
|
||||
description: `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel()}`,
|
||||
})
|
||||
}
|
||||
|
||||
if (selectOptions.length === 0) {
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
@@ -528,25 +802,16 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
)
|
||||
}
|
||||
|
||||
const options = profiles.map(profile => ({
|
||||
value: profile.id,
|
||||
label:
|
||||
profile.id === activeProfileId
|
||||
? `${profile.name} (active)`
|
||||
: profile.name,
|
||||
description: `${profile.provider === 'anthropic' ? 'anthropic' : 'openai-compatible'} · ${profile.baseUrl} · ${profile.model}`,
|
||||
}))
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" gap={1}>
|
||||
<Text color="remember" bold>
|
||||
{title}
|
||||
</Text>
|
||||
<Select
|
||||
options={options}
|
||||
options={selectOptions}
|
||||
onChange={onSelect}
|
||||
onCancel={() => setScreen('menu')}
|
||||
visibleOptionCount={Math.min(10, Math.max(2, options.length))}
|
||||
visibleOptionCount={Math.min(10, Math.max(2, selectOptions.length))}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
@@ -566,16 +831,36 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
'Set active provider',
|
||||
'No providers available. Add one first.',
|
||||
profileId => {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
const githubError = activateGithubProvider()
|
||||
if (githubError) {
|
||||
setErrorMessage(`Could not activate GitHub provider: ${githubError}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${GITHUB_PROVIDER_LABEL}`)
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const active = setActiveProviderProfile(profileId)
|
||||
if (!active) {
|
||||
setErrorMessage('Could not change active provider.')
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
const settingsOverrideError =
|
||||
clearStartupProviderOverrideFromUserSettings()
|
||||
refreshProfiles()
|
||||
setStatusMessage(`Active provider: ${active.name}`)
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Active provider: ${active.name}. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: `Active provider: ${active.name}`,
|
||||
)
|
||||
setScreen('menu')
|
||||
},
|
||||
{ includeGithub: true },
|
||||
)
|
||||
break
|
||||
case 'select-edit':
|
||||
@@ -592,15 +877,35 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
|
||||
'Delete provider',
|
||||
'No providers available. Add one first.',
|
||||
profileId => {
|
||||
if (profileId === GITHUB_PROVIDER_ID) {
|
||||
const githubDeleteError = deleteGithubProvider()
|
||||
if (githubDeleteError) {
|
||||
setErrorMessage(`Could not delete GitHub provider: ${githubDeleteError}`)
|
||||
} else {
|
||||
refreshProfiles()
|
||||
setStatusMessage('GitHub provider deleted')
|
||||
}
|
||||
setScreen('menu')
|
||||
return
|
||||
}
|
||||
|
||||
const result = deleteProviderProfile(profileId)
|
||||
if (!result.removed) {
|
||||
setErrorMessage('Could not delete provider.')
|
||||
} else {
|
||||
const settingsOverrideError = result.activeProfileId
|
||||
? clearStartupProviderOverrideFromUserSettings()
|
||||
: null
|
||||
refreshProfiles()
|
||||
setStatusMessage('Provider deleted')
|
||||
setStatusMessage(
|
||||
settingsOverrideError
|
||||
? `Provider deleted. Warning: could not clear startup provider override (${settingsOverrideError}).`
|
||||
: 'Provider deleted',
|
||||
)
|
||||
}
|
||||
setScreen('menu')
|
||||
},
|
||||
{ includeGithub: true },
|
||||
)
|
||||
break
|
||||
case 'menu':
|
||||
|
||||
@@ -95,8 +95,8 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
|
||||
if (useGithub) {
|
||||
const model = process.env.OPENAI_MODEL || 'github:copilot'
|
||||
const baseUrl =
|
||||
process.env.OPENAI_BASE_URL || 'https://models.github.ai/inference'
|
||||
return { name: 'GitHub Models', model, baseUrl, isLocal: false }
|
||||
process.env.OPENAI_BASE_URL || 'https://api.githubcopilot.com'
|
||||
return { name: 'GitHub Copilot', model, baseUrl, isLocal: false }
|
||||
}
|
||||
|
||||
if (useOpenAI) {
|
||||
|
||||
@@ -68,11 +68,11 @@ When a user describes what they want an agent to do, you will:
|
||||
assistant: "Now let me use the test-runner agent to run the tests"
|
||||
</example>
|
||||
- <example>
|
||||
Context: User is creating an agent to respond to the word "hello" with a friendly jok.
|
||||
user: "Hello"
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the greeting-responder agent to respond with a friendly joke"
|
||||
Context: User is creating an agent for Claude Code product questions.
|
||||
user: "How do I configure Claude Code hooks?"
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the claude-code-guide agent to answer the question"
|
||||
<commentary>
|
||||
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke.
|
||||
Since the user is asking how to use Claude Code, use the claude-code-guide agent.
|
||||
</commentary>
|
||||
</example>
|
||||
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.
|
||||
|
||||
@@ -8,6 +8,34 @@ import {
|
||||
validateProviderEnvOrExit,
|
||||
} from '../utils/providerValidation.js'
|
||||
|
||||
// OpenClaude: polyfill globalThis.File for Node < 20.
|
||||
// undici v7 references `File` at module evaluation time (webidl type
|
||||
// assertions). Node 18 lacks the global, causing a ReferenceError inside
|
||||
// the bundled __commonJS require chain which deadlocks the process when a
|
||||
// proxy is configured (configureGlobalAgents → require_undici).
|
||||
// eslint-disable-next-line custom-rules/no-top-level-side-effects
|
||||
if (typeof globalThis.File === 'undefined') {
|
||||
try {
|
||||
// Node 18.13+ exposes File in node:buffer but not as a global.
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const { File: NodeFile } = require('node:buffer')
|
||||
// @ts-expect-error -- polyfilling missing global
|
||||
globalThis.File = NodeFile
|
||||
} catch {
|
||||
// Absolute fallback: stub so `MakeTypeAssertion(File)` doesn't throw.
|
||||
// @ts-expect-error -- minimal polyfill
|
||||
globalThis.File = class File extends Blob {
|
||||
name: string
|
||||
lastModified: number
|
||||
constructor(parts: BlobPart[], name: string, opts?: FilePropertyBag) {
|
||||
super(parts, opts)
|
||||
this.name = name
|
||||
this.lastModified = opts?.lastModified ?? Date.now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OpenClaude: disable experimental API betas by default.
|
||||
// Tool search (defer_loading), global cache scope, and context management
|
||||
// require internal API support not available to external accounts → 500.
|
||||
@@ -68,15 +96,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Enable configs first so we can read settings
|
||||
{
|
||||
const { enableConfigs } = await import('../utils/config.js')
|
||||
enableConfigs()
|
||||
}
|
||||
|
||||
// Apply settings.env from user settings (includes GitHub provider settings from /onboard-github)
|
||||
{
|
||||
const { applySafeConfigEnvironmentVariables } = await import('../utils/managedEnv.js')
|
||||
applySafeConfigEnvironmentVariables()
|
||||
const { hydrateGeminiAccessTokenFromSecureStorage } = await import('../utils/geminiCredentials.js')
|
||||
hydrateGeminiAccessTokenFromSecureStorage()
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import('../utils/githubModelsCredentials.js')
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
const startupEnv = await buildStartupEnvFromProfile({
|
||||
@@ -93,6 +122,16 @@ async function main(): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Hydrate GitHub credentials after profile is applied so CLAUDE_CODE_USE_GITHUB from profile is available
|
||||
{
|
||||
const {
|
||||
hydrateGithubModelsTokenFromSecureStorage,
|
||||
refreshGithubModelsTokenIfNeeded,
|
||||
} = await import('../utils/githubModelsCredentials.js')
|
||||
await refreshGithubModelsTokenIfNeeded()
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
}
|
||||
|
||||
await validateProviderEnvOrExit()
|
||||
|
||||
// Print the gradient startup screen before the Ink UI loads
|
||||
|
||||
@@ -40,7 +40,7 @@ export class GrpcServer {
|
||||
grpc.ServerCredentials.createInsecure(),
|
||||
(error, boundPort) => {
|
||||
if (error) {
|
||||
console.error('Failed to start gRPC server', error)
|
||||
console.error('Failed to start gRPC server')
|
||||
return
|
||||
}
|
||||
console.log(`gRPC Server running at ${host}:${boundPort}`)
|
||||
@@ -225,7 +225,7 @@ export class GrpcServer {
|
||||
call.end()
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error("Error processing stream:", err)
|
||||
console.error('Error processing stream')
|
||||
call.write({
|
||||
error: {
|
||||
message: err.message || "Internal server error",
|
||||
|
||||
@@ -366,14 +366,12 @@ const reconciler = createReconciler<
|
||||
createTextInstance(
|
||||
text: string,
|
||||
_root: DOMElement,
|
||||
hostContext: HostContext,
|
||||
_hostContext: HostContext,
|
||||
): TextNode {
|
||||
if (!hostContext.isInsideText) {
|
||||
throw new Error(
|
||||
`Text string "${text}" must be rendered inside <Text> component`,
|
||||
)
|
||||
}
|
||||
|
||||
// react-compiler memoization can reuse cached <Text> elements without
|
||||
// re-traversing getChildHostContext, so hostContext.isInsideText may be
|
||||
// stale. Always create the text node — Ink will render it correctly
|
||||
// regardless of the context tracking state.
|
||||
return createTextNode(text)
|
||||
},
|
||||
resetTextContent() {},
|
||||
|
||||
@@ -27,6 +27,21 @@ async function flushClipboardCopy(): Promise<void> {
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
}
|
||||
|
||||
async function waitForExecCall(
|
||||
command: string,
|
||||
attempts = 20,
|
||||
): Promise<(typeof execFileNoThrowMock.mock.calls)[number] | undefined> {
|
||||
for (let attempt = 0; attempt < attempts; attempt++) {
|
||||
const call = execFileNoThrowMock.mock.calls.find(([cmd]) => cmd === command)
|
||||
if (call) {
|
||||
return call
|
||||
}
|
||||
await flushClipboardCopy()
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
describe('Windows clipboard fallback', () => {
|
||||
beforeEach(() => {
|
||||
execFileNoThrowMock.mockClear()
|
||||
@@ -62,9 +77,7 @@ describe('Windows clipboard fallback', () => {
|
||||
await setClipboard('Привет мир')
|
||||
await flushClipboardCopy()
|
||||
|
||||
const windowsCall = execFileNoThrowMock.mock.calls.find(
|
||||
([cmd]) => cmd === 'powershell',
|
||||
)
|
||||
const windowsCall = await waitForExecCall('powershell')
|
||||
|
||||
expect(windowsCall?.[2]).toMatchObject({
|
||||
stdin: 'ignore',
|
||||
|
||||
@@ -237,6 +237,8 @@ import { useOfficialMarketplaceNotification } from 'src/hooks/useOfficialMarketp
|
||||
import { usePromptsFromClaudeInChrome } from 'src/hooks/usePromptsFromClaudeInChrome.js';
|
||||
import { getTipToShowOnSpinner, recordShownTip } from 'src/services/tips/tipScheduler.js';
|
||||
import type { Theme } from 'src/utils/theme.js';
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js';
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js';
|
||||
import { checkAndDisableBypassPermissionsIfNeeded, checkAndDisableAutoModeIfNeeded, useKickOffCheckAndDisableBypassPermissionsIfNeeded, useKickOffCheckAndDisableAutoModeIfNeeded } from 'src/utils/permissions/bypassPermissionsKillswitch.js';
|
||||
import { SandboxManager } from 'src/utils/sandbox/sandbox-adapter.js';
|
||||
import { SANDBOX_NETWORK_ACCESS_TOOL_NAME } from 'src/cli/structuredIO.js';
|
||||
@@ -791,10 +793,8 @@ export function REPL({
|
||||
// accepts, and only then is the REPL component mounted and this effect runs.
|
||||
// This ensures that plugin installations from repository and user settings only
|
||||
// happen after explicit user consent to trust the current working directory.
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession]);
|
||||
// Deferring startup checks is handled below (after promptTypingSuppressionActive
|
||||
// is declared) to avoid temporal dead zone issues.
|
||||
|
||||
// Allow Claude in Chrome MCP to send prompts through MCP notifications
|
||||
// and sync permission mode changes to the Chrome extension
|
||||
@@ -1336,6 +1336,7 @@ export function REPL({
|
||||
const [inputValue, setInputValueRaw] = useState(() => consumeEarlyInput());
|
||||
const inputValueRef = useRef(inputValue);
|
||||
inputValueRef.current = inputValue;
|
||||
const promptTypingSuppressionActive = isPromptTypingSuppressionActive(isPromptInputActive, inputValue);
|
||||
const insertTextRef = useRef<{
|
||||
insert: (text: string) => void;
|
||||
setInputWithCursor: (value: string, cursor: number) => void;
|
||||
@@ -1427,6 +1428,25 @@ export function REPL({
|
||||
const activeRemote = sshRemote.isRemoteMode ? sshRemote : directConnect.isRemoteMode ? directConnect : remoteSession;
|
||||
const [pastedContents, setPastedContents] = useState<Record<number, PastedContent>>({});
|
||||
const [submitCount, setSubmitCount] = useState(0);
|
||||
|
||||
// Defer startup checks until the user has submitted their first message.
|
||||
// A timeout or grace period is insufficient (issue #363): if the user pauses
|
||||
// before typing, startup checks can still fire and recommendation dialogs
|
||||
// steal focus. Only the user's first submission guarantees the prompt was
|
||||
// the first thing they interacted with.
|
||||
const startupChecksStartedRef = React.useRef(false);
|
||||
const hasHadFirstSubmission = (submitCount ?? 0) > 0;
|
||||
useEffect(() => {
|
||||
if (isRemoteSession) return;
|
||||
if (startupChecksStartedRef.current) return;
|
||||
if (!shouldRunStartupChecks({
|
||||
isRemoteSession,
|
||||
hasStarted: startupChecksStartedRef.current,
|
||||
hasHadFirstSubmission,
|
||||
})) return;
|
||||
startupChecksStartedRef.current = true;
|
||||
void performStartupChecks(setAppState);
|
||||
}, [setAppState, isRemoteSession, hasHadFirstSubmission]);
|
||||
// Ref instead of state to avoid triggering React re-renders on every
|
||||
// streaming text_delta. The spinner reads this via its animation timer.
|
||||
const responseLengthRef = useRef(0);
|
||||
@@ -2028,7 +2048,7 @@ export function REPL({
|
||||
if (isMessageSelectorVisible) return 'message-selector';
|
||||
|
||||
// Suppress interrupt dialogs while user is actively typing
|
||||
if (isPromptInputActive) return undefined;
|
||||
if (promptTypingSuppressionActive) return undefined;
|
||||
if (sandboxPermissionRequestQueue[0]) return 'sandbox-permission';
|
||||
|
||||
// Permission/interactive dialogs (show unless blocked by toolJSX)
|
||||
@@ -2059,19 +2079,20 @@ export function REPL({
|
||||
if (allowDialogsWithAnimation && showRemoteCallout) return 'remote-callout';
|
||||
|
||||
// LSP plugin recommendation (lowest priority - non-blocking suggestion)
|
||||
if (allowDialogsWithAnimation && lspRecommendation) return 'lsp-recommendation';
|
||||
// Suppress during startup window to prevent stealing focus from the prompt (issue #363)
|
||||
if (allowDialogsWithAnimation && lspRecommendation && startupChecksStartedRef.current) return 'lsp-recommendation';
|
||||
|
||||
// Plugin hint from CLI/SDK stderr (same priority band as LSP rec)
|
||||
if (allowDialogsWithAnimation && hintRecommendation) return 'plugin-hint';
|
||||
if (allowDialogsWithAnimation && hintRecommendation && startupChecksStartedRef.current) return 'plugin-hint';
|
||||
|
||||
// Desktop app upsell (max 3 launches, lowest priority)
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup) return 'desktop-upsell';
|
||||
if (allowDialogsWithAnimation && showDesktopUpsellStartup && startupChecksStartedRef.current) return 'desktop-upsell';
|
||||
return undefined;
|
||||
}
|
||||
const focusedInputDialog = getFocusedInputDialog();
|
||||
|
||||
// True when permission prompts exist but are hidden because the user is typing
|
||||
const hasSuppressedDialogs = isPromptInputActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
const hasSuppressedDialogs = promptTypingSuppressionActive && (sandboxPermissionRequestQueue[0] || toolUseConfirmQueue[0] || promptQueue[0] || workerSandboxPermissions.queue[0] || elicitation.queue[0] || showingCostDialog);
|
||||
|
||||
// Keep ref in sync so timer callbacks can read the current value
|
||||
focusedInputDialogRef.current = focusedInputDialog;
|
||||
|
||||
18
src/screens/replInputSuppression.test.ts
Normal file
18
src/screens/replInputSuppression.test.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { describe, expect, it } from 'bun:test'
|
||||
|
||||
import { isPromptTypingSuppressionActive } from './replInputSuppression.js'
|
||||
|
||||
describe('isPromptTypingSuppressionActive', () => {
|
||||
it('suppresses dialogs when early input already exists', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, 'hello')).toBe(true)
|
||||
})
|
||||
|
||||
it('does not suppress dialogs for empty or whitespace-only input', () => {
|
||||
expect(isPromptTypingSuppressionActive(false, '')).toBe(false)
|
||||
expect(isPromptTypingSuppressionActive(false, ' ')).toBe(false)
|
||||
})
|
||||
|
||||
it('keeps suppression active while the typing flag is set', () => {
|
||||
expect(isPromptTypingSuppressionActive(true, '')).toBe(true)
|
||||
})
|
||||
})
|
||||
6
src/screens/replInputSuppression.ts
Normal file
6
src/screens/replInputSuppression.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export function isPromptTypingSuppressionActive(
|
||||
isPromptInputActive: boolean,
|
||||
inputValue: string,
|
||||
): boolean {
|
||||
return isPromptInputActive || inputValue.trim().length > 0
|
||||
}
|
||||
53
src/screens/replStartupGates.test.ts
Normal file
53
src/screens/replStartupGates.test.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { shouldRunStartupChecks } from './replStartupGates.js'
|
||||
|
||||
describe('shouldRunStartupChecks', () => {
|
||||
test('runs checks after first message submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(true)
|
||||
})
|
||||
|
||||
test('skips checks in remote sessions even after submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks if already started', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: true,
|
||||
hasHadFirstSubmission: true,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('does not run checks when idle before first submission', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: false,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
test('skips checks in remote session regardless of other conditions', () => {
|
||||
expect(shouldRunStartupChecks({
|
||||
isRemoteSession: true,
|
||||
hasStarted: false,
|
||||
hasHadFirstSubmission: false,
|
||||
})).toBe(false)
|
||||
})
|
||||
})
|
||||
35
src/screens/replStartupGates.ts
Normal file
35
src/screens/replStartupGates.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Startup gates for the REPL.
|
||||
*
|
||||
* Prevents startup plugin checks and recommendation dialogs from stealing
|
||||
* focus before the user has interacted with the prompt.
|
||||
*
|
||||
* This addresses the root cause of issue #363: on mount, performStartupChecks
|
||||
* triggers plugin loading, which populates trackedFiles, which triggers
|
||||
* useLspPluginRecommendation to surface an LSP recommendation dialog. Since
|
||||
* promptTypingSuppressionActive is false before the user has typed anything,
|
||||
* getFocusedInputDialog() returns the dialog, unmounting PromptInput entirely.
|
||||
*
|
||||
* The fix gates startup checks on actual prompt interaction. A pure timeout
|
||||
* or grace period is insufficient because pausing before typing would still
|
||||
* allow dialogs to steal focus. Only the user's first submission guarantees
|
||||
* the prompt is no longer in the vulnerable pre-interaction window.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Determines whether startup checks should run.
|
||||
*
|
||||
* Startup checks are deferred until the user has submitted their first
|
||||
* message. This guarantees the prompt was the first thing the user interacted
|
||||
* with, so no recommendation dialog can steal focus before the first keystroke.
|
||||
*/
|
||||
export function shouldRunStartupChecks(options: {
|
||||
isRemoteSession: boolean;
|
||||
hasStarted: boolean;
|
||||
hasHadFirstSubmission: boolean;
|
||||
}): boolean {
|
||||
if (options.isRemoteSession) return false;
|
||||
if (options.hasStarted) return false;
|
||||
if (!options.hasHadFirstSubmission) return false;
|
||||
return true;
|
||||
}
|
||||
@@ -18,6 +18,7 @@ const originalEnv = {
|
||||
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
|
||||
GEMINI_MODEL: process.env.GEMINI_MODEL,
|
||||
GEMINI_BASE_URL: process.env.GEMINI_BASE_URL,
|
||||
GEMINI_AUTH_MODE: process.env.GEMINI_AUTH_MODE,
|
||||
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
@@ -32,6 +33,7 @@ beforeEach(() => {
|
||||
process.env.GEMINI_API_KEY = 'gemini-test-key'
|
||||
process.env.GEMINI_MODEL = 'gemini-2.0-flash'
|
||||
process.env.GEMINI_BASE_URL = 'https://gemini.example/v1beta/openai'
|
||||
process.env.GEMINI_AUTH_MODE = 'api-key'
|
||||
|
||||
delete process.env.GOOGLE_API_KEY
|
||||
delete process.env.OPENAI_API_KEY
|
||||
@@ -47,6 +49,7 @@ afterEach(() => {
|
||||
process.env.GEMINI_API_KEY = originalEnv.GEMINI_API_KEY
|
||||
process.env.GEMINI_MODEL = originalEnv.GEMINI_MODEL
|
||||
process.env.GEMINI_BASE_URL = originalEnv.GEMINI_BASE_URL
|
||||
process.env.GEMINI_AUTH_MODE = originalEnv.GEMINI_AUTH_MODE
|
||||
process.env.GOOGLE_API_KEY = originalEnv.GOOGLE_API_KEY
|
||||
process.env.OPENAI_API_KEY = originalEnv.OPENAI_API_KEY
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
|
||||
@@ -17,16 +17,23 @@ const tempDirs: string[] = []
|
||||
const originalEnv = {
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
if (originalEnv.OPENAI_BASE_URL === undefined) delete process.env.OPENAI_BASE_URL
|
||||
else process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
|
||||
if (originalEnv.OPENAI_API_BASE === undefined) delete process.env.OPENAI_API_BASE
|
||||
else process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
|
||||
if (originalEnv.CLAUDE_CODE_USE_GITHUB === undefined) delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
else process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop()
|
||||
if (dir) rmSync(dir, { recursive: true, force: true })
|
||||
}
|
||||
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
||||
})
|
||||
|
||||
function createTempAuthJson(payload: Record<string, unknown>): string {
|
||||
@@ -71,6 +78,7 @@ describe('Codex provider config', () => {
|
||||
test('resolves codexplan alias to Codex transport with reasoning', () => {
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.OPENAI_API_BASE
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
|
||||
const resolved = resolveProviderRequest({ model: 'codexplan' })
|
||||
expect(resolved.transport).toBe('codex_responses')
|
||||
@@ -201,6 +209,117 @@ describe('Codex request translation', () => {
|
||||
])
|
||||
})
|
||||
|
||||
test('preserves Grep tool pattern field in Codex strict schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'Grep',
|
||||
description: 'Search file contents',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Search pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'Grep',
|
||||
description: 'Search file contents',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Search pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern', 'path'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('preserves Glob tool pattern field in Codex strict schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'Glob',
|
||||
description: 'Find files by pattern',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Glob pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'Glob',
|
||||
description: 'Find files by pattern',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: { type: 'string', description: 'Glob pattern' },
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern', 'path'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('strips validator pattern keyword but keeps string field named pattern in Codex schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
name: 'RegexProbe',
|
||||
description: 'Probe regex schema handling',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
pattern: '^[a-z]+$',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
expect(tools).toEqual([
|
||||
{
|
||||
type: 'function',
|
||||
name: 'RegexProbe',
|
||||
description: 'Probe regex schema handling',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
strict: true,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test('removes unsupported uri format from strict Responses schemas', () => {
|
||||
const tools = convertToolsToResponsesTools([
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,9 +15,9 @@
|
||||
* OPENAI_MODEL=gpt-4o — default model override
|
||||
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
|
||||
*
|
||||
* GitHub Models (models.github.ai), OpenAI-compatible:
|
||||
* GitHub Copilot API (api.githubcopilot.com), OpenAI-compatible:
|
||||
* CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI)
|
||||
* GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth)
|
||||
* GITHUB_TOKEN or GH_TOKEN — Copilot API token (mapped to Bearer auth)
|
||||
* OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs
|
||||
*/
|
||||
|
||||
@@ -29,7 +29,9 @@ import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubMod
|
||||
import {
|
||||
codexStreamToAnthropic,
|
||||
collectCodexCompletedResponse,
|
||||
convertAnthropicMessagesToResponsesInput,
|
||||
convertCodexResponseToAnthropicMessage,
|
||||
convertToolsToResponsesTools,
|
||||
performCodexRequest,
|
||||
type AnthropicStreamEvent,
|
||||
type AnthropicUsage,
|
||||
@@ -39,9 +41,14 @@ import {
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
getGithubEndpointType,
|
||||
} from './providerConfig.js'
|
||||
import { sanitizeSchemaForOpenAICompat } from '../../utils/schemaSanitizer.js'
|
||||
import { redactSecretValueForDisplay } from '../../utils/providerProfile.js'
|
||||
import {
|
||||
normalizeToolArguments,
|
||||
hasToolFieldMapping,
|
||||
} from './toolArgumentNormalization.js'
|
||||
|
||||
type SecretValueSource = Partial<{
|
||||
OPENAI_API_KEY: string
|
||||
@@ -51,16 +58,33 @@ type SecretValueSource = Partial<{
|
||||
GEMINI_ACCESS_TOKEN: string
|
||||
}>
|
||||
|
||||
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
|
||||
const GITHUB_API_VERSION = '2022-11-28'
|
||||
const GITHUB_COPILOT_BASE = 'https://api.githubcopilot.com'
|
||||
const GITHUB_429_MAX_RETRIES = 3
|
||||
const GITHUB_429_BASE_DELAY_SEC = 1
|
||||
const GITHUB_429_MAX_DELAY_SEC = 32
|
||||
const GEMINI_API_HOST = 'generativelanguage.googleapis.com'
|
||||
|
||||
const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
function isGithubModelsMode(): boolean {
|
||||
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
}
|
||||
|
||||
function hasGeminiApiHost(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
|
||||
try {
|
||||
return new URL(baseUrl).hostname.toLowerCase() === GEMINI_API_HOST
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
function formatRetryAfterHint(response: Response): string {
|
||||
const ra = response.headers.get('retry-after')
|
||||
return ra ? ` (Retry-After: ${ra})` : ''
|
||||
@@ -180,10 +204,12 @@ function convertContentBlocks(
|
||||
// handled separately
|
||||
break
|
||||
case 'thinking':
|
||||
// Append thinking as text with a marker for models that support reasoning
|
||||
if (block.thinking) {
|
||||
parts.push({ type: 'text', text: `<thinking>${block.thinking}</thinking>` })
|
||||
}
|
||||
case 'redacted_thinking':
|
||||
// Strip thinking blocks for OpenAI-compatible providers.
|
||||
// These are Anthropic-specific content types that 3P providers
|
||||
// don't understand. Serializing them as <thinking> text corrupts
|
||||
// multi-turn context: the model sees the tags as part of its
|
||||
// previous reply and may mimic or misattribute them.
|
||||
break
|
||||
default:
|
||||
if (block.text) {
|
||||
@@ -197,6 +223,13 @@ function convertContentBlocks(
|
||||
return parts
|
||||
}
|
||||
|
||||
function isGeminiMode(): boolean {
|
||||
return (
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
hasGeminiApiHost(process.env.OPENAI_BASE_URL)
|
||||
)
|
||||
}
|
||||
|
||||
function convertMessages(
|
||||
messages: Array<{ role: string; message?: { role?: string; content?: unknown }; content?: unknown }>,
|
||||
system: unknown,
|
||||
@@ -248,6 +281,7 @@ function convertMessages(
|
||||
// Check for tool_use blocks
|
||||
if (Array.isArray(content)) {
|
||||
const toolUses = content.filter((b: { type?: string }) => b.type === 'tool_use')
|
||||
const thinkingBlock = content.find((b: { type?: string }) => b.type === 'thinking')
|
||||
const textContent = content.filter(
|
||||
(b: { type?: string }) => b.type !== 'tool_use' && b.type !== 'thinking',
|
||||
)
|
||||
@@ -267,18 +301,46 @@ function convertMessages(
|
||||
name?: string
|
||||
input?: unknown
|
||||
extra_content?: Record<string, unknown>
|
||||
}) => ({
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
...(tu.extra_content ? { extra_content: tu.extra_content } : {}),
|
||||
}),
|
||||
signature?: string
|
||||
}, index) => {
|
||||
const toolCall: NonNullable<OpenAIMessage['tool_calls']>[number] = {
|
||||
id: tu.id ?? `call_${crypto.randomUUID().replace(/-/g, '')}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tu.name ?? 'unknown',
|
||||
arguments:
|
||||
typeof tu.input === 'string'
|
||||
? tu.input
|
||||
: JSON.stringify(tu.input ?? {}),
|
||||
},
|
||||
}
|
||||
|
||||
// Preserve existing extra_content if present
|
||||
if (tu.extra_content) {
|
||||
toolCall.extra_content = { ...tu.extra_content }
|
||||
}
|
||||
|
||||
// Handle Gemini thought_signature
|
||||
if (isGeminiMode()) {
|
||||
// If the model provided a signature in the tool_use block itself (e.g. from a previous Turn/Step)
|
||||
// Use thinkingBlock.signature for ALL tool calls in the same assistant turn if available.
|
||||
// The API requires the same signature on every replayed function call part in a parallel set.
|
||||
const signature = tu.signature ?? (thinkingBlock as any)?.signature
|
||||
|
||||
// Merge into existing google-specific metadata if present
|
||||
const existingGoogle = (toolCall.extra_content?.google as Record<string, unknown>) ?? {}
|
||||
|
||||
toolCall.extra_content = {
|
||||
...toolCall.extra_content,
|
||||
google: {
|
||||
...existingGoogle,
|
||||
thought_signature: signature ?? "skip_thought_signature_validator"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return toolCall
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -359,11 +421,13 @@ function normalizeSchemaForOpenAI(
|
||||
record.properties = normalizedProps
|
||||
|
||||
if (strict) {
|
||||
// OpenAI strict mode requires every property to be listed in required[]
|
||||
const allKeys = Object.keys(normalizedProps)
|
||||
record.required = Array.from(new Set([...existingRequired, ...allKeys]))
|
||||
// OpenAI strict mode requires additionalProperties: false on all object
|
||||
// schemas — override unconditionally to ensure nested objects comply.
|
||||
// Keep only the properties that were originally marked required in the schema.
|
||||
// Adding every property to required[] (the previous behaviour) caused strict
|
||||
// OpenAI-compatible providers (Groq, Azure, etc.) to reject tool calls because
|
||||
// the model correctly omits optional arguments — but the provider treats them
|
||||
// as missing required fields and returns a 400 / tool_use_failed error.
|
||||
record.required = existingRequired.filter(k => k in normalizedProps)
|
||||
// additionalProperties: false is still required by strict-mode providers.
|
||||
record.additionalProperties = false
|
||||
} else {
|
||||
// For Gemini: keep only existing required keys that are present in properties
|
||||
@@ -397,7 +461,7 @@ function normalizeSchemaForOpenAI(
|
||||
function convertTools(
|
||||
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
|
||||
): OpenAITool[] {
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const isGemini = isGeminiMode()
|
||||
|
||||
return tools
|
||||
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
|
||||
@@ -439,6 +503,7 @@ interface OpenAIStreamChunk {
|
||||
delta: {
|
||||
role?: string
|
||||
content?: string | null
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
index: number
|
||||
id?: string
|
||||
@@ -476,6 +541,30 @@ function convertChunkUsage(
|
||||
}
|
||||
}
|
||||
|
||||
const JSON_REPAIR_SUFFIXES = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
|
||||
function repairPossiblyTruncatedObjectJson(raw: string): string | null {
|
||||
try {
|
||||
const parsed = JSON.parse(raw)
|
||||
return parsed && typeof parsed === 'object' && !Array.isArray(parsed)
|
||||
? raw
|
||||
: null
|
||||
} catch {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
const repaired = raw + combo
|
||||
const parsed = JSON.parse(repaired)
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
return repaired
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Async generator that transforms an OpenAI SSE stream into
|
||||
* Anthropic-format BetaRawMessageStreamEvent objects.
|
||||
@@ -486,8 +575,19 @@ async function* openaiStreamToAnthropic(
|
||||
): AsyncGenerator<AnthropicStreamEvent> {
|
||||
const messageId = makeMessageId()
|
||||
let contentBlockIndex = 0
|
||||
const activeToolCalls = new Map<number, { id: string; name: string; index: number; jsonBuffer: string }>()
|
||||
const activeToolCalls = new Map<
|
||||
number,
|
||||
{
|
||||
id: string
|
||||
name: string
|
||||
index: number
|
||||
jsonBuffer: string
|
||||
normalizeAtStop: boolean
|
||||
}
|
||||
>()
|
||||
let hasEmittedContentStart = false
|
||||
let hasEmittedThinkingStart = false
|
||||
let hasClosedThinking = false
|
||||
let lastStopReason: 'tool_use' | 'max_tokens' | 'end_turn' | null = null
|
||||
let hasEmittedFinalUsage = false
|
||||
let hasProcessedFinishReason = false
|
||||
@@ -544,9 +644,34 @@ async function* openaiStreamToAnthropic(
|
||||
for (const choice of chunk.choices ?? []) {
|
||||
const delta = choice.delta
|
||||
|
||||
// Reasoning models (e.g. GLM-5, DeepSeek) may stream chain-of-thought
|
||||
// in `reasoning_content` before the actual reply appears in `content`.
|
||||
// Emit reasoning as a thinking block and content as a text block.
|
||||
if (delta.reasoning_content != null && delta.reasoning_content !== '') {
|
||||
if (!hasEmittedThinkingStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: contentBlockIndex,
|
||||
content_block: { type: 'thinking', thinking: '' },
|
||||
}
|
||||
hasEmittedThinkingStart = true
|
||||
}
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: contentBlockIndex,
|
||||
delta: { type: 'thinking_delta', thinking: delta.reasoning_content },
|
||||
}
|
||||
}
|
||||
|
||||
// Text content — use != null to distinguish absent field from empty string,
|
||||
// some providers send "" as first delta to signal streaming start
|
||||
if (delta.content != null) {
|
||||
if (delta.content != null && delta.content !== '') {
|
||||
// Close thinking block if transitioning from reasoning to content
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (!hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
@@ -566,7 +691,12 @@ async function* openaiStreamToAnthropic(
|
||||
if (delta.tool_calls) {
|
||||
for (const tc of delta.tool_calls) {
|
||||
if (tc.id && tc.function?.name) {
|
||||
// New tool call starting
|
||||
// New tool call starting — close any open thinking block first
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
type: 'content_block_stop',
|
||||
@@ -577,11 +707,14 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
|
||||
const toolBlockIndex = contentBlockIndex
|
||||
const initialArguments = tc.function.arguments ?? ''
|
||||
const normalizeAtStop = hasToolFieldMapping(tc.function.name)
|
||||
activeToolCalls.set(tc.index, {
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
index: toolBlockIndex,
|
||||
jsonBuffer: tc.function.arguments ?? '',
|
||||
jsonBuffer: initialArguments,
|
||||
normalizeAtStop,
|
||||
})
|
||||
|
||||
yield {
|
||||
@@ -593,12 +726,19 @@ async function* openaiStreamToAnthropic(
|
||||
name: tc.function.name,
|
||||
input: {},
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? {
|
||||
signature: (tc.extra_content.google as any)
|
||||
.thought_signature,
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
}
|
||||
contentBlockIndex++
|
||||
|
||||
// Emit any initial arguments
|
||||
if (tc.function.arguments) {
|
||||
if (tc.function.arguments && !normalizeAtStop) {
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: toolBlockIndex,
|
||||
@@ -615,6 +755,11 @@ async function* openaiStreamToAnthropic(
|
||||
if (tc.function.arguments) {
|
||||
active.jsonBuffer += tc.function.arguments
|
||||
}
|
||||
|
||||
if (active.normalizeAtStop) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: active.index,
|
||||
@@ -633,6 +778,12 @@ async function* openaiStreamToAnthropic(
|
||||
if (choice.finish_reason && !hasProcessedFinishReason) {
|
||||
hasProcessedFinishReason = true
|
||||
|
||||
// Close any open thinking block that wasn't closed by content transition
|
||||
if (hasEmittedThinkingStart && !hasClosedThinking) {
|
||||
yield { type: 'content_block_stop', index: contentBlockIndex }
|
||||
contentBlockIndex++
|
||||
hasClosedThinking = true
|
||||
}
|
||||
// Close any open content blocks
|
||||
if (hasEmittedContentStart) {
|
||||
yield {
|
||||
@@ -642,16 +793,44 @@ async function* openaiStreamToAnthropic(
|
||||
}
|
||||
// Close active tool calls
|
||||
for (const [, tc] of activeToolCalls) {
|
||||
if (tc.normalizeAtStop) {
|
||||
let partialJson: string
|
||||
if (choice.finish_reason === 'length') {
|
||||
// Truncated by max tokens — preserve raw buffer to avoid
|
||||
// turning an incomplete tool call into an executable command
|
||||
partialJson = tc.jsonBuffer
|
||||
} else {
|
||||
const repairedStructuredJson = repairPossiblyTruncatedObjectJson(
|
||||
tc.jsonBuffer,
|
||||
)
|
||||
if (repairedStructuredJson) {
|
||||
partialJson = repairedStructuredJson
|
||||
} else {
|
||||
partialJson = JSON.stringify(
|
||||
normalizeToolArguments(tc.name, tc.jsonBuffer),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: tc.index,
|
||||
delta: {
|
||||
type: 'input_json_delta',
|
||||
partial_json: partialJson,
|
||||
},
|
||||
}
|
||||
yield { type: 'content_block_stop', index: tc.index }
|
||||
continue
|
||||
}
|
||||
|
||||
let suffixToAdd = ''
|
||||
if (tc.jsonBuffer) {
|
||||
try {
|
||||
JSON.parse(tc.jsonBuffer)
|
||||
} catch {
|
||||
const str = tc.jsonBuffer.trimEnd()
|
||||
const combinations = [
|
||||
'}', '"}', ']}', '"]}', '}}', '"}}', ']}}', '"]}}', '"]}]}', '}]}'
|
||||
]
|
||||
for (const combo of combinations) {
|
||||
for (const combo of JSON_REPAIR_SUFFIXES) {
|
||||
try {
|
||||
JSON.parse(str + combo)
|
||||
suffixToAdd = combo
|
||||
@@ -776,8 +955,9 @@ class OpenAIShimMessages {
|
||||
httpResponse = response
|
||||
|
||||
if (params.stream) {
|
||||
const isResponsesStream = response.url?.includes('/responses')
|
||||
return new OpenAIShimStream(
|
||||
request.transport === 'codex_responses'
|
||||
(request.transport === 'codex_responses' || isResponsesStream)
|
||||
? codexStreamToAnthropic(response, request.resolvedModel)
|
||||
: openaiStreamToAnthropic(response, request.resolvedModel),
|
||||
)
|
||||
@@ -791,8 +971,38 @@ class OpenAIShimMessages {
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
const isResponsesNonStream = response.url?.includes('/responses')
|
||||
if (isResponsesNonStream || (request.transport === 'chat_completions' && isGithubModelsMode())) {
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const parsed = await response.json() as Record<string, unknown>
|
||||
if (
|
||||
parsed &&
|
||||
typeof parsed === 'object' &&
|
||||
('output' in parsed || 'incomplete_details' in parsed)
|
||||
) {
|
||||
return convertCodexResponseToAnthropicMessage(
|
||||
parsed,
|
||||
request.resolvedModel,
|
||||
)
|
||||
}
|
||||
return self._convertNonStreamingResponse(parsed, request.resolvedModel)
|
||||
}
|
||||
}
|
||||
|
||||
const contentType = response.headers.get('content-type') ?? ''
|
||||
if (contentType.includes('application/json')) {
|
||||
const data = await response.json()
|
||||
return self._convertNonStreamingResponse(data, request.resolvedModel)
|
||||
}
|
||||
|
||||
const textBody = await response.text().catch(() => '')
|
||||
throw APIError.generate(
|
||||
response.status,
|
||||
undefined,
|
||||
`OpenAI API error ${response.status}: unexpected response: ${textBody.slice(0, 500)}`,
|
||||
response.headers as unknown as Headers,
|
||||
)
|
||||
})()
|
||||
|
||||
; (promise as unknown as Record<string, unknown>).withResponse =
|
||||
@@ -814,7 +1024,36 @@ class OpenAIShimMessages {
|
||||
params: ShimCreateParams,
|
||||
options?: { signal?: AbortSignal; headers?: Record<string, string> },
|
||||
): Promise<Response> {
|
||||
if (request.transport === 'codex_responses') {
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubMode = isGithubModelsMode()
|
||||
const isGithubWithCodexTransport = isGithubMode && request.transport === 'codex_responses'
|
||||
const isGithubCopilotEndpoint = isGithubMode && githubEndpointType === 'copilot'
|
||||
|
||||
if (isGithubWithCodexTransport) {
|
||||
const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'GitHub Copilot auth is required. Run /onboard-github to sign in.',
|
||||
)
|
||||
}
|
||||
|
||||
return performCodexRequest({
|
||||
request,
|
||||
credentials: {
|
||||
apiKey,
|
||||
source: 'env',
|
||||
},
|
||||
params,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders,
|
||||
...(options?.headers ?? {}),
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
signal: options?.signal,
|
||||
})
|
||||
}
|
||||
|
||||
if (request.transport === 'codex_responses' && !isGithubMode) {
|
||||
const credentials = resolveCodexApiCredentials()
|
||||
if (!credentials.apiKey) {
|
||||
const authHint = credentials.authPath
|
||||
@@ -888,6 +1127,10 @@ class OpenAIShimMessages {
|
||||
}
|
||||
|
||||
const isGithub = isGithubModelsMode()
|
||||
const githubEndpointType = getGithubEndpointType(request.baseUrl)
|
||||
const isGithubCopilot = isGithub && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithub && (githubEndpointType === 'models' || githubEndpointType === 'custom')
|
||||
|
||||
if (isGithub && body.max_completion_tokens !== undefined) {
|
||||
body.max_tokens = body.max_completion_tokens
|
||||
delete body.max_completion_tokens
|
||||
@@ -930,7 +1173,7 @@ class OpenAIShimMessages {
|
||||
...(options?.headers ?? {}),
|
||||
}
|
||||
|
||||
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
|
||||
const isGemini = isGeminiMode()
|
||||
const apiKey =
|
||||
this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? ''
|
||||
// Detect Azure endpoints by hostname (not raw URL) to prevent bypass via
|
||||
@@ -953,15 +1196,17 @@ class OpenAIShimMessages {
|
||||
const geminiCredential = await resolveGeminiCredential(process.env)
|
||||
if (geminiCredential.kind !== 'none') {
|
||||
headers.Authorization = `Bearer ${geminiCredential.credential}`
|
||||
if (geminiCredential.projectId) {
|
||||
if (geminiCredential.kind !== 'api-key' && 'projectId' in geminiCredential && geminiCredential.projectId) {
|
||||
headers['x-goog-user-project'] = geminiCredential.projectId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isGithub) {
|
||||
headers.Accept = 'application/vnd.github.v3+json'
|
||||
headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION
|
||||
if (isGithubCopilot) {
|
||||
Object.assign(headers, COPILOT_HEADERS)
|
||||
} else if (isGithubModels) {
|
||||
headers['Accept'] = 'application/vnd.github+json'
|
||||
headers['X-GitHub-Api-Version'] = '2022-11-28'
|
||||
}
|
||||
|
||||
// Build the chat completions URL
|
||||
@@ -1013,9 +1258,82 @@ class OpenAIShimMessages {
|
||||
await sleepMs(delaySec * 1000)
|
||||
continue
|
||||
}
|
||||
// Read body exactly once here — Response body is a stream that can only
|
||||
// be consumed a single time.
|
||||
const errorBody = await response.text().catch(() => 'unknown error')
|
||||
const rateHint =
|
||||
isGithub && response.status === 429 ? formatRetryAfterHint(response) : ''
|
||||
|
||||
// If GitHub Copilot returns error about /chat/completions,
|
||||
// try the /responses endpoint (needed for GPT-5+ models)
|
||||
if (isGithub && response.status === 400) {
|
||||
if (errorBody.includes('/chat/completions') || errorBody.includes('not accessible')) {
|
||||
const responsesUrl = `${request.baseUrl}/responses`
|
||||
const responsesBody: Record<string, unknown> = {
|
||||
model: request.resolvedModel,
|
||||
input: convertAnthropicMessagesToResponsesInput(
|
||||
params.messages as Array<{
|
||||
role?: string
|
||||
message?: { role?: string; content?: unknown }
|
||||
content?: unknown
|
||||
}>,
|
||||
),
|
||||
stream: params.stream ?? false,
|
||||
}
|
||||
|
||||
if (!Array.isArray(responsesBody.input) || responsesBody.input.length === 0) {
|
||||
responsesBody.input = [
|
||||
{
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: [{ type: 'input_text', text: '' }],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
const systemText = convertSystemPrompt(params.system)
|
||||
if (systemText) {
|
||||
responsesBody.instructions = systemText
|
||||
}
|
||||
|
||||
if (body.max_tokens !== undefined) {
|
||||
responsesBody.max_output_tokens = body.max_tokens
|
||||
}
|
||||
|
||||
if (params.tools && params.tools.length > 0) {
|
||||
const convertedTools = convertToolsToResponsesTools(
|
||||
params.tools as Array<{
|
||||
name?: string
|
||||
description?: string
|
||||
input_schema?: Record<string, unknown>
|
||||
}>,
|
||||
)
|
||||
if (convertedTools.length > 0) {
|
||||
responsesBody.tools = convertedTools
|
||||
}
|
||||
}
|
||||
|
||||
const responsesResponse = await fetch(responsesUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(responsesBody),
|
||||
signal: options?.signal,
|
||||
})
|
||||
if (responsesResponse.ok) {
|
||||
return responsesResponse
|
||||
}
|
||||
const responsesErrorBody = await responsesResponse.text().catch(() => 'unknown error')
|
||||
let responsesErrorResponse: object | undefined
|
||||
try { responsesErrorResponse = JSON.parse(responsesErrorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
responsesResponse.status,
|
||||
responsesErrorResponse,
|
||||
`OpenAI API error ${responsesResponse.status}: ${responsesErrorBody}`,
|
||||
responsesResponse.headers,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let errorResponse: object | undefined
|
||||
try { errorResponse = JSON.parse(errorBody) } catch { /* raw text */ }
|
||||
throw APIError.generate(
|
||||
@@ -1043,6 +1361,7 @@ class OpenAIShimMessages {
|
||||
| string
|
||||
| null
|
||||
| Array<{ type?: string; text?: string }>
|
||||
reasoning_content?: string | null
|
||||
tool_calls?: Array<{
|
||||
id: string
|
||||
function: { name: string; arguments: string }
|
||||
@@ -1064,7 +1383,17 @@ class OpenAIShimMessages {
|
||||
const choice = data.choices?.[0]
|
||||
const content: Array<Record<string, unknown>> = []
|
||||
|
||||
const rawContent = choice?.message?.content
|
||||
// Some reasoning models (e.g. GLM-5) put their reply in reasoning_content
|
||||
// while content stays null — emit reasoning as a thinking block, then
|
||||
// fall back to it for visible text if content is empty.
|
||||
const reasoningText = choice?.message?.reasoning_content
|
||||
if (typeof reasoningText === 'string' && reasoningText) {
|
||||
content.push({ type: 'thinking', thinking: reasoningText })
|
||||
}
|
||||
const rawContent =
|
||||
choice?.message?.content !== '' && choice?.message?.content != null
|
||||
? choice?.message?.content
|
||||
: choice?.message?.reasoning_content
|
||||
if (typeof rawContent === 'string' && rawContent) {
|
||||
content.push({ type: 'text', text: rawContent })
|
||||
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
|
||||
@@ -1087,18 +1416,20 @@ class OpenAIShimMessages {
|
||||
|
||||
if (choice?.message?.tool_calls) {
|
||||
for (const tc of choice.message.tool_calls) {
|
||||
let input: unknown
|
||||
try {
|
||||
input = JSON.parse(tc.function.arguments)
|
||||
} catch {
|
||||
input = { raw: tc.function.arguments }
|
||||
}
|
||||
const input = normalizeToolArguments(
|
||||
tc.function.name,
|
||||
tc.function.arguments,
|
||||
)
|
||||
content.push({
|
||||
type: 'tool_use',
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
input,
|
||||
...(tc.extra_content ? { extra_content: tc.extra_content } : {}),
|
||||
// Extract Gemini signature from extra_content
|
||||
...((tc.extra_content?.google as any)?.thought_signature
|
||||
? { signature: (tc.extra_content.google as any).thought_signature }
|
||||
: {}),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1170,7 +1501,7 @@ export function createOpenAIShimClient(options: {
|
||||
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
|
||||
}
|
||||
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE
|
||||
process.env.OPENAI_BASE_URL ??= GITHUB_COPILOT_BASE
|
||||
process.env.OPENAI_API_KEY ??=
|
||||
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
|
||||
}
|
||||
|
||||
@@ -23,6 +23,9 @@ test.each([
|
||||
['github:gpt-4o', 'gpt-4o'],
|
||||
['gpt-4o', 'gpt-4o'],
|
||||
['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL],
|
||||
// normalizeGithubModelsApiModel preserves provider prefix for models.github.ai compatibility
|
||||
['github:openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
['openai/gpt-4.1', 'openai/gpt-4.1'],
|
||||
] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => {
|
||||
expect(normalizeGithubModelsApiModel(input)).toBe(expected)
|
||||
})
|
||||
@@ -34,6 +37,20 @@ test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_G
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest routes GitHub GPT-5 codex models to responses transport', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5.3-codex' })
|
||||
expect(r.resolvedModel).toBe('gpt-5.3-codex')
|
||||
expect(r.transport).toBe('codex_responses')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest keeps gpt-5-mini on chat_completions for GitHub', () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const r = resolveProviderRequest({ model: 'gpt-5-mini' })
|
||||
expect(r.resolvedModel).toBe('gpt-5-mini')
|
||||
expect(r.transport).toBe('chat_completions')
|
||||
})
|
||||
|
||||
test('resolveProviderRequest leaves model unchanged without GitHub flag', () => {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
|
||||
|
||||
@@ -7,8 +7,8 @@ import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
||||
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
|
||||
/** Default GitHub Models API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1'
|
||||
/** Default GitHub Copilot API model when user selects copilot / github:copilot */
|
||||
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o'
|
||||
|
||||
const CODEX_ALIAS_MODELS: Record<
|
||||
string,
|
||||
@@ -227,6 +227,21 @@ export function shouldUseCodexTransport(
|
||||
return isCodexBaseUrl(explicitBaseUrl) || (!explicitBaseUrl && isCodexAlias(model))
|
||||
}
|
||||
|
||||
function shouldUseGithubResponsesApi(model: string): boolean {
|
||||
const normalized = model.trim().toLowerCase()
|
||||
|
||||
// Codex-branded models require /responses.
|
||||
if (normalized.includes('codex')) return true
|
||||
|
||||
// GPT-5+ models use /responses, except gpt-5-mini.
|
||||
const match = /^gpt-(\d+)/.exec(normalized)
|
||||
if (!match) return false
|
||||
const major = Number(match[1])
|
||||
if (major < 5) return false
|
||||
if (normalized.startsWith('gpt-5-mini')) return false
|
||||
return true
|
||||
}
|
||||
|
||||
export function isLocalProviderUrl(baseUrl: string | undefined): boolean {
|
||||
if (!baseUrl) return false
|
||||
try {
|
||||
@@ -280,19 +295,61 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models inference (models.github.ai).
|
||||
* Mirrors runtime devsper `github._normalize_model_id`.
|
||||
* Normalize user model string for GitHub Copilot API inference.
|
||||
* Mirrors how Copilot resolves model IDs internally.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
export function normalizeGithubCopilotModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Strip provider prefix if present (e.g., "openai/gpt-4o" -> "gpt-4o")
|
||||
const slashIndex = segment.indexOf('/')
|
||||
if (slashIndex !== -1) {
|
||||
return segment.slice(slashIndex + 1)
|
||||
}
|
||||
return segment
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize user model string for GitHub Models API inference.
|
||||
* Only normalizes the default alias, preserves provider-qualified models.
|
||||
*/
|
||||
export function normalizeGithubModelsApiModel(requestedModel: string): string {
|
||||
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
|
||||
const segment =
|
||||
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
|
||||
// Only normalize the default alias for GitHub Models
|
||||
if (!segment || segment.toLowerCase() === 'copilot') {
|
||||
return DEFAULT_GITHUB_MODELS_API_MODEL
|
||||
}
|
||||
// Preserve provider prefix for GitHub Models (e.g., "openai/gpt-4.1" stays as-is)
|
||||
return segment
|
||||
}
|
||||
|
||||
export const GITHUB_COPILOT_BASE_URL = 'https://api.githubcopilot.com'
|
||||
export const GITHUB_MODELS_BASE_URL = 'https://models.github.ai/inference'
|
||||
|
||||
export function getGithubEndpointType(
|
||||
baseUrl: string | undefined,
|
||||
): 'copilot' | 'models' | 'custom' {
|
||||
if (!baseUrl) return 'copilot'
|
||||
try {
|
||||
const hostname = new URL(baseUrl).hostname.toLowerCase()
|
||||
if (hostname === 'api.githubcopilot.com') {
|
||||
return 'copilot'
|
||||
}
|
||||
if (hostname === 'models.github.ai' || hostname.endsWith('.github.ai')) {
|
||||
return 'models'
|
||||
}
|
||||
return 'custom'
|
||||
} catch {
|
||||
return 'copilot'
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveProviderRequest(options?: {
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
@@ -310,31 +367,49 @@ export function resolveProviderRequest(options?: {
|
||||
asEnvUrl(options?.baseUrl) ??
|
||||
asEnvUrl(process.env.OPENAI_BASE_URL) ??
|
||||
asEnvUrl(process.env.OPENAI_API_BASE)
|
||||
|
||||
const githubEndpointType = isGithubMode
|
||||
? getGithubEndpointType(rawBaseUrl)
|
||||
: 'custom'
|
||||
const isGithubCopilot = isGithubMode && githubEndpointType === 'copilot'
|
||||
const isGithubModels = isGithubMode && githubEndpointType === 'models'
|
||||
const isGithubCustom = isGithubMode && githubEndpointType === 'custom'
|
||||
|
||||
const githubResolvedModel = isGithubMode
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: requestedModel
|
||||
|
||||
const transport: ProviderTransport =
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl)
|
||||
shouldUseCodexTransport(requestedModel, rawBaseUrl) ||
|
||||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
|
||||
? 'codex_responses'
|
||||
: 'chat_completions'
|
||||
|
||||
const resolvedModel =
|
||||
transport === 'chat_completions' &&
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
? normalizeGithubModelsApiModel(requestedModel)
|
||||
: descriptor.baseModel
|
||||
// For GitHub Copilot API, normalize to real model ID (e.g., "github:copilot" -> "gpt-4o")
|
||||
// For GitHub Models/custom endpoints:
|
||||
// - Normalize default alias (github:copilot -> gpt-4o)
|
||||
// - Preserve provider-qualified models (openai/gpt-4.1 stays as-is)
|
||||
const resolvedModel = isGithubCopilot
|
||||
? normalizeGithubCopilotModel(descriptor.baseModel)
|
||||
: (isGithubModels || isGithubCustom
|
||||
? normalizeGithubModelsApiModel(descriptor.baseModel)
|
||||
: descriptor.baseModel)
|
||||
|
||||
const reasoning = options?.reasoningEffortOverride
|
||||
? { effort: options.reasoningEffortOverride }
|
||||
: descriptor.reasoning
|
||||
|
||||
|
||||
return {
|
||||
transport,
|
||||
requestedModel,
|
||||
resolvedModel,
|
||||
baseUrl:
|
||||
(rawBaseUrl ??
|
||||
(transport === 'codex_responses'
|
||||
? DEFAULT_CODEX_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL)
|
||||
(isGithubCopilot && transport === 'codex_responses'
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: (isGithubMode
|
||||
? GITHUB_COPILOT_BASE_URL
|
||||
: DEFAULT_OPENAI_BASE_URL))
|
||||
).replace(/\/+$/, ''),
|
||||
reasoning,
|
||||
}
|
||||
|
||||
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
180
src/services/api/toolArgumentNormalization.test.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { normalizeToolArguments } from './toolArgumentNormalization'
|
||||
|
||||
describe('normalizeToolArguments', () => {
|
||||
describe('Bash tool', () => {
|
||||
test('wraps plain string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', 'pwd')).toEqual({ command: 'pwd' })
|
||||
})
|
||||
|
||||
test('wraps multi-word command', () => {
|
||||
expect(normalizeToolArguments('Bash', 'ls -la /tmp')).toEqual({
|
||||
command: 'ls -la /tmp',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Bash', '{"command":"echo hi"}'),
|
||||
).toEqual({ command: 'echo hi' })
|
||||
})
|
||||
|
||||
test('returns empty object for blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '')).toEqual({})
|
||||
expect(normalizeToolArguments('Bash', ' ')).toEqual({})
|
||||
})
|
||||
|
||||
test('returns parsed blank for JSON-encoded blank string', () => {
|
||||
expect(normalizeToolArguments('Bash', '""')).toEqual('')
|
||||
expect(normalizeToolArguments('Bash', '" "')).toEqual(' ')
|
||||
})
|
||||
|
||||
test('returns empty object for malformed structured object literal', () => {
|
||||
expect(normalizeToolArguments('Bash', '{ "command": "pwd"')).toEqual({})
|
||||
})
|
||||
|
||||
test.each([
|
||||
['{command:"pwd"}'],
|
||||
["{'command':'pwd'}"],
|
||||
['{command: pwd}'],
|
||||
])(
|
||||
'returns empty object for malformed object-shaped string %s (does not wrap into command)',
|
||||
(input) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual({})
|
||||
},
|
||||
)
|
||||
|
||||
test.each([
|
||||
['false', false],
|
||||
['null', null],
|
||||
['[]', [] as unknown[]],
|
||||
['0', 0],
|
||||
['true', true],
|
||||
['123', 123],
|
||||
])(
|
||||
'preserves JSON literal %s as-is (does not wrap into command)',
|
||||
(input, expected) => {
|
||||
expect(normalizeToolArguments('Bash', input)).toEqual(expected)
|
||||
},
|
||||
)
|
||||
|
||||
test('wraps JSON-encoded string into { command }', () => {
|
||||
expect(normalizeToolArguments('Bash', '"pwd"')).toEqual({
|
||||
command: 'pwd',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('undefined arguments', () => {
|
||||
test('returns empty object for undefined', () => {
|
||||
expect(normalizeToolArguments('Bash', undefined)).toEqual({})
|
||||
expect(normalizeToolArguments('UnknownTool', undefined)).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Read tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '/home/user/file.txt')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('wraps JSON-encoded string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Read', '"/home/user/file.txt"')).toEqual({
|
||||
file_path: '/home/user/file.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Read', '{"file_path":"/tmp/f.txt","limit":10}'),
|
||||
).toEqual({ file_path: '/tmp/f.txt', limit: 10 })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Write tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Write', '/tmp/out.txt')).toEqual({
|
||||
file_path: '/tmp/out.txt',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Write',
|
||||
'{"file_path":"/tmp/out.txt","content":"hello"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/out.txt', content: 'hello' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edit tool', () => {
|
||||
test('wraps plain string into { file_path }', () => {
|
||||
expect(normalizeToolArguments('Edit', '/tmp/edit.ts')).toEqual({
|
||||
file_path: '/tmp/edit.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments(
|
||||
'Edit',
|
||||
'{"file_path":"/tmp/f.ts","old_string":"a","new_string":"b"}',
|
||||
),
|
||||
).toEqual({ file_path: '/tmp/f.ts', old_string: 'a', new_string: 'b' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Glob tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Glob', '**/*.ts')).toEqual({
|
||||
pattern: '**/*.ts',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Glob', '{"pattern":"*.js","path":"/src"}'),
|
||||
).toEqual({ pattern: '*.js', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('Grep tool', () => {
|
||||
test('wraps plain string into { pattern }', () => {
|
||||
expect(normalizeToolArguments('Grep', 'TODO')).toEqual({
|
||||
pattern: 'TODO',
|
||||
})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('Grep', '{"pattern":"fixme","path":"/src"}'),
|
||||
).toEqual({ pattern: 'fixme', path: '/src' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('unknown tools', () => {
|
||||
test('returns empty object for plain string (no known field mapping)', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'some value')).toEqual({})
|
||||
})
|
||||
|
||||
test('passes through structured JSON object', () => {
|
||||
expect(
|
||||
normalizeToolArguments('UnknownTool', '{"key":"val"}'),
|
||||
).toEqual({ key: 'val' })
|
||||
})
|
||||
|
||||
test('preserves JSON literals as-is', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', 'false')).toEqual(false)
|
||||
expect(normalizeToolArguments('UnknownTool', 'null')).toEqual(null)
|
||||
expect(normalizeToolArguments('UnknownTool', '[]')).toEqual([])
|
||||
})
|
||||
|
||||
test('returns parsed string for JSON-encoded string on unknown tools', () => {
|
||||
expect(normalizeToolArguments('UnknownTool', '"hello"')).toEqual(
|
||||
'hello',
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
69
src/services/api/toolArgumentNormalization.ts
Normal file
69
src/services/api/toolArgumentNormalization.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
const STRING_ARGUMENT_TOOL_FIELDS: Record<string, string> = {
|
||||
Bash: 'command',
|
||||
Read: 'file_path',
|
||||
Write: 'file_path',
|
||||
Edit: 'file_path',
|
||||
Glob: 'pattern',
|
||||
Grep: 'pattern',
|
||||
}
|
||||
|
||||
function isBlankString(value: string): boolean {
|
||||
return value.trim().length === 0
|
||||
}
|
||||
|
||||
function isLikelyStructuredObjectLiteral(value: string): boolean {
|
||||
// Match object-like patterns with key-value syntax:
|
||||
// {"key":, {key:, {'key':, { "key" :, etc.
|
||||
// But NOT bash compound commands like { pwd; } or { echo hi; }
|
||||
return /^\s*\{\s*['"]?\w+['"]?\s*:/.test(value)
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||
}
|
||||
|
||||
function getPlainStringToolArgumentField(toolName: string): string | null {
|
||||
return STRING_ARGUMENT_TOOL_FIELDS[toolName] ?? null
|
||||
}
|
||||
|
||||
export function hasToolFieldMapping(toolName: string): boolean {
|
||||
return toolName in STRING_ARGUMENT_TOOL_FIELDS
|
||||
}
|
||||
|
||||
function wrapPlainStringToolArguments(
|
||||
toolName: string,
|
||||
value: string,
|
||||
): Record<string, string> | null {
|
||||
const field = getPlainStringToolArgumentField(toolName)
|
||||
if (!field) return null
|
||||
return { [field]: value }
|
||||
}
|
||||
|
||||
export function normalizeToolArguments(
|
||||
toolName: string,
|
||||
rawArguments: string | undefined,
|
||||
): unknown {
|
||||
if (rawArguments === undefined) return {}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(rawArguments)
|
||||
if (isRecord(parsed)) {
|
||||
return parsed
|
||||
}
|
||||
// Parsed as a non-object JSON value (string, number, boolean, null, array)
|
||||
if (typeof parsed === 'string' && !isBlankString(parsed)) {
|
||||
return wrapPlainStringToolArguments(toolName, parsed) ?? parsed
|
||||
}
|
||||
// For blank strings, booleans, null, arrays — pass through as-is
|
||||
// and let Zod schema validation produce a meaningful error
|
||||
return parsed
|
||||
} catch {
|
||||
// rawArguments is not valid JSON — treat as a plain string
|
||||
if (isBlankString(rawArguments) || isLikelyStructuredObjectLiteral(rawArguments)) {
|
||||
// Blank or looks like a malformed object literal — don't wrap into
|
||||
// a tool field to avoid turning garbage into executable input
|
||||
return {}
|
||||
}
|
||||
return wrapPlainStringToolArguments(toolName, rawArguments) ?? {}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { APIError } from '@anthropic-ai/sdk'
|
||||
|
||||
// Helper to build a mock APIError with specific headers
|
||||
@@ -15,15 +15,27 @@ function makeError(headers: Record<string, string>): APIError {
|
||||
|
||||
// Save/restore env vars between tests
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const envKeys = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
'OPENAI_MODEL',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_BASE',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of envKeys) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'CLAUDE_CODE_USE_FOUNDRY',
|
||||
]) {
|
||||
for (const key of envKeys) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
}
|
||||
|
||||
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
106
src/services/autoFix/autoFixConfig.test.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { AutoFixConfigSchema, getAutoFixConfig, type AutoFixConfig } from './autoFixConfig.js'
|
||||
|
||||
describe('AutoFixConfigSchema', () => {
|
||||
test('parses valid full config', () => {
|
||||
const input = {
|
||||
enabled: true,
|
||||
lint: 'eslint . --fix',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
}
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.enabled).toBe(true)
|
||||
expect(result.data.lint).toBe('eslint . --fix')
|
||||
expect(result.data.test).toBe('bun test')
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
}
|
||||
})
|
||||
|
||||
test('parses minimal config with defaults', () => {
|
||||
const input = { enabled: true, lint: 'eslint .' }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.maxRetries).toBe(3)
|
||||
expect(result.data.timeout).toBe(30000)
|
||||
expect(result.data.test).toBeUndefined()
|
||||
}
|
||||
})
|
||||
|
||||
test('rejects config with enabled but no lint or test', () => {
|
||||
const input = { enabled: true }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('accepts disabled config without commands', () => {
|
||||
const input = { enabled: false }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('rejects negative maxRetries', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: -1 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
|
||||
test('rejects maxRetries above 10', () => {
|
||||
const input = { enabled: true, lint: 'eslint .', maxRetries: 11 }
|
||||
const result = AutoFixConfigSchema.safeParse(input)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAutoFixConfig', () => {
|
||||
test('returns null when settings have no autoFix', () => {
|
||||
const result = getAutoFixConfig(undefined)
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns null when autoFix is disabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: false })
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test('returns parsed config when valid and enabled', () => {
|
||||
const result = getAutoFixConfig({ enabled: true, lint: 'eslint .' })
|
||||
expect(result).not.toBeNull()
|
||||
expect(result!.enabled).toBe(true)
|
||||
expect(result!.lint).toBe('eslint .')
|
||||
})
|
||||
})
|
||||
|
||||
describe('SettingsSchema autoFix integration', () => {
|
||||
test('SettingsSchema accepts autoFix field', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
lint: 'eslint .',
|
||||
test: 'bun test',
|
||||
maxRetries: 3,
|
||||
timeout: 30000,
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
test('SettingsSchema rejects invalid autoFix', async () => {
|
||||
const { SettingsSchema } = await import('../../utils/settings/types.js')
|
||||
const settings = {
|
||||
autoFix: {
|
||||
enabled: true,
|
||||
// missing lint and test - should fail refine
|
||||
},
|
||||
}
|
||||
const result = SettingsSchema().safeParse(settings)
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
52
src/services/autoFix/autoFixConfig.ts
Normal file
52
src/services/autoFix/autoFixConfig.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { z } from 'zod/v4'
|
||||
|
||||
export const AutoFixConfigSchema = z
|
||||
.object({
|
||||
enabled: z.boolean().describe('Whether auto-fix is enabled'),
|
||||
lint: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Lint command to run after file edits (e.g. "eslint . --fix")'),
|
||||
test: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Test command to run after file edits (e.g. "bun test")'),
|
||||
maxRetries: z
|
||||
.number()
|
||||
.int()
|
||||
.min(0)
|
||||
.max(10)
|
||||
.default(3)
|
||||
.describe('Maximum number of auto-fix retry attempts (default: 3)'),
|
||||
timeout: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1000)
|
||||
.max(300000)
|
||||
.default(30000)
|
||||
.describe('Timeout in ms for each lint/test command (default: 30000)'),
|
||||
})
|
||||
.refine(
|
||||
data => !data.enabled || data.lint !== undefined || data.test !== undefined,
|
||||
{
|
||||
message: 'At least one of "lint" or "test" must be set when enabled',
|
||||
},
|
||||
)
|
||||
|
||||
export type AutoFixConfig = z.infer<typeof AutoFixConfigSchema>
|
||||
|
||||
export function getAutoFixConfig(
|
||||
rawConfig: unknown,
|
||||
): AutoFixConfig | null {
|
||||
if (!rawConfig || typeof rawConfig !== 'object') {
|
||||
return null
|
||||
}
|
||||
const parsed = AutoFixConfigSchema.safeParse(rawConfig)
|
||||
if (!parsed.success) {
|
||||
return null
|
||||
}
|
||||
if (!parsed.data.enabled) {
|
||||
return null
|
||||
}
|
||||
return parsed.data
|
||||
}
|
||||
63
src/services/autoFix/autoFixHook.test.ts
Normal file
63
src/services/autoFix/autoFixHook.test.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
shouldRunAutoFix,
|
||||
buildAutoFixContext,
|
||||
} from './autoFixHook.js'
|
||||
|
||||
describe('shouldRunAutoFix', () => {
|
||||
test('returns true for file_edit tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true for file_write tool when autoFix enabled', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_write', config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false for bash tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('bash', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for file_read tool', () => {
|
||||
const config = { enabled: true, lint: 'eslint .', maxRetries: 3, timeout: 30000 }
|
||||
expect(shouldRunAutoFix('file_read', config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when config is null', () => {
|
||||
expect(shouldRunAutoFix('file_edit', null)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildAutoFixContext', () => {
|
||||
test('formats lint errors as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
lintOutput: 'src/foo.ts:10:5 error no-unused-vars',
|
||||
lintExitCode: 1,
|
||||
errorSummary: 'Lint errors (exit code 1):\nsrc/foo.ts:10:5 error no-unused-vars',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('no-unused-vars')
|
||||
expect(context).toContain('Please fix')
|
||||
})
|
||||
|
||||
test('returns null when no errors', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: false,
|
||||
})
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
|
||||
test('formats test failures as AI-readable context', () => {
|
||||
const context = buildAutoFixContext({
|
||||
hasErrors: true,
|
||||
testOutput: 'FAIL src/foo.test.ts\n expected true, got false',
|
||||
testExitCode: 1,
|
||||
errorSummary: 'Test failures (exit code 1):\nFAIL src/foo.test.ts',
|
||||
})
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('FAIL')
|
||||
})
|
||||
})
|
||||
25
src/services/autoFix/autoFixHook.ts
Normal file
25
src/services/autoFix/autoFixHook.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import type { AutoFixConfig } from './autoFixConfig.js'
|
||||
import type { AutoFixResult } from './autoFixRunner.js'
|
||||
|
||||
const AUTO_FIX_TOOLS = new Set(['file_edit', 'file_write'])
|
||||
|
||||
export function shouldRunAutoFix(
|
||||
toolName: string,
|
||||
config: AutoFixConfig | null,
|
||||
): boolean {
|
||||
if (!config) return false
|
||||
return AUTO_FIX_TOOLS.has(toolName)
|
||||
}
|
||||
|
||||
export function buildAutoFixContext(result: AutoFixResult): string | null {
|
||||
if (!result.hasErrors || !result.errorSummary) return null
|
||||
|
||||
return (
|
||||
`<auto_fix_feedback>\n` +
|
||||
`AUTO-FIX: The file you just edited has errors. Please fix them:\n\n` +
|
||||
`${result.errorSummary}\n\n` +
|
||||
`Please fix these errors in the files you just edited. ` +
|
||||
`Do not ask the user — just apply the fix.\n` +
|
||||
`</auto_fix_feedback>`
|
||||
)
|
||||
}
|
||||
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
48
src/services/autoFix/autoFixIntegration.test.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import { getAutoFixConfig } from './autoFixConfig.js'
|
||||
import { shouldRunAutoFix, buildAutoFixContext } from './autoFixHook.js'
|
||||
import { runAutoFixCheck } from './autoFixRunner.js'
|
||||
|
||||
describe('autoFix end-to-end flow', () => {
|
||||
test('full flow: config → shouldRun → check → context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "error: unused" && exit 1',
|
||||
maxRetries: 2,
|
||||
timeout: 5000,
|
||||
})
|
||||
expect(config).not.toBeNull()
|
||||
expect(shouldRunAutoFix('file_edit', config)).toBe(true)
|
||||
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
test: config!.test,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).not.toBeNull()
|
||||
expect(context).toContain('AUTO-FIX')
|
||||
expect(context).toContain('unused')
|
||||
})
|
||||
|
||||
test('full flow: no errors = no context', async () => {
|
||||
const config = getAutoFixConfig({
|
||||
enabled: true,
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
})
|
||||
const result = await runAutoFixCheck({
|
||||
lint: config!.lint,
|
||||
timeout: config!.timeout,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
const context = buildAutoFixContext(result)
|
||||
expect(context).toBeNull()
|
||||
})
|
||||
})
|
||||
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
103
src/services/autoFix/autoFixRunner.test.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
runAutoFixCheck,
|
||||
type AutoFixResult,
|
||||
type AutoFixCheckOptions,
|
||||
} from './autoFixRunner.js'
|
||||
|
||||
describe('runAutoFixCheck', () => {
|
||||
test('returns success when lint command exits 0', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "all clean"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('all clean')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('returns errors when lint command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "error: unused var" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('unused var')
|
||||
expect(result.lintExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('returns errors when test command exits non-zero', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
test: 'echo "FAIL test_foo" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.testOutput).toContain('FAIL test_foo')
|
||||
expect(result.testExitCode).toBe(1)
|
||||
})
|
||||
|
||||
test('runs both lint and test commands', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint ok"',
|
||||
test: 'echo "test ok"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
expect(result.lintOutput).toContain('lint ok')
|
||||
expect(result.testOutput).toContain('test ok')
|
||||
})
|
||||
|
||||
test('skips test if lint fails', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "lint error" && exit 1',
|
||||
test: 'echo "should not run"',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.lintOutput).toContain('lint error')
|
||||
expect(result.testOutput).toBeUndefined()
|
||||
})
|
||||
|
||||
test('handles timeout gracefully', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'sleep 10',
|
||||
timeout: 100,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
expect(result.timedOut).toBe(true)
|
||||
})
|
||||
|
||||
test('returns success with no commands configured', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(false)
|
||||
})
|
||||
|
||||
test('formats error summary for AI consumption', async () => {
|
||||
const result = await runAutoFixCheck({
|
||||
lint: 'echo "src/foo.ts:10:5 error no-unused-vars" && exit 1',
|
||||
timeout: 5000,
|
||||
|
||||
cwd: '/tmp',
|
||||
})
|
||||
expect(result.hasErrors).toBe(true)
|
||||
const summary = result.errorSummary
|
||||
expect(summary).toContain('Lint errors')
|
||||
expect(summary).toContain('no-unused-vars')
|
||||
})
|
||||
})
|
||||
169
src/services/autoFix/autoFixRunner.ts
Normal file
169
src/services/autoFix/autoFixRunner.ts
Normal file
@@ -0,0 +1,169 @@
|
||||
import { spawn } from 'child_process'
|
||||
|
||||
export interface AutoFixCheckOptions {
|
||||
lint?: string
|
||||
test?: string
|
||||
timeout: number
|
||||
cwd: string
|
||||
signal?: AbortSignal
|
||||
}
|
||||
|
||||
export interface AutoFixResult {
|
||||
hasErrors: boolean
|
||||
lintOutput?: string
|
||||
lintExitCode?: number
|
||||
testOutput?: string
|
||||
testExitCode?: number
|
||||
timedOut?: boolean
|
||||
errorSummary?: string
|
||||
}
|
||||
|
||||
async function runCommand(
|
||||
command: string,
|
||||
cwd: string,
|
||||
timeout: number,
|
||||
signal?: AbortSignal,
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number; timedOut: boolean }> {
|
||||
return new Promise((resolve) => {
|
||||
if (signal?.aborted) {
|
||||
resolve({ stdout: '', stderr: 'Aborted', exitCode: 1, timedOut: false })
|
||||
return
|
||||
}
|
||||
|
||||
let timedOut = false
|
||||
let stdout = ''
|
||||
let stderr = ''
|
||||
|
||||
const isWindows = process.platform === 'win32'
|
||||
const proc = spawn(command, [], {
|
||||
cwd,
|
||||
env: { ...process.env },
|
||||
shell: true,
|
||||
windowsHide: true,
|
||||
// On Unix, create a process group so we can kill child processes on timeout/abort
|
||||
detached: !isWindows,
|
||||
})
|
||||
|
||||
const killTree = () => {
|
||||
try {
|
||||
if (!isWindows && proc.pid) {
|
||||
// Kill the entire process group
|
||||
process.kill(-proc.pid, 'SIGTERM')
|
||||
} else {
|
||||
proc.kill('SIGTERM')
|
||||
}
|
||||
} catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
|
||||
const onAbort = () => {
|
||||
killTree()
|
||||
}
|
||||
signal?.addEventListener('abort', onAbort, { once: true })
|
||||
|
||||
proc.stdout?.on('data', (data: Buffer) => {
|
||||
stdout += data.toString()
|
||||
})
|
||||
proc.stderr?.on('data', (data: Buffer) => {
|
||||
stderr += data.toString()
|
||||
})
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
timedOut = true
|
||||
killTree()
|
||||
}, timeout)
|
||||
|
||||
proc.on('close', (code) => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout: stdout.slice(0, 10000),
|
||||
stderr: stderr.slice(0, 10000),
|
||||
exitCode: code ?? 1,
|
||||
timedOut,
|
||||
})
|
||||
})
|
||||
|
||||
proc.on('error', () => {
|
||||
clearTimeout(timer)
|
||||
signal?.removeEventListener('abort', onAbort)
|
||||
resolve({
|
||||
stdout,
|
||||
stderr: stderr || 'Command failed to start',
|
||||
exitCode: 1,
|
||||
timedOut: false,
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function buildErrorSummary(result: AutoFixResult): string | undefined {
|
||||
if (!result.hasErrors) return undefined
|
||||
const parts: string[] = []
|
||||
|
||||
if (result.timedOut) {
|
||||
parts.push('Command timed out.')
|
||||
}
|
||||
if (result.lintExitCode !== undefined && result.lintExitCode !== 0) {
|
||||
parts.push(`Lint errors (exit code ${result.lintExitCode}):\n${result.lintOutput ?? ''}`)
|
||||
}
|
||||
if (result.testExitCode !== undefined && result.testExitCode !== 0) {
|
||||
parts.push(`Test failures (exit code ${result.testExitCode}):\n${result.testOutput ?? ''}`)
|
||||
}
|
||||
|
||||
return parts.join('\n\n')
|
||||
}
|
||||
|
||||
export async function runAutoFixCheck(
|
||||
options: AutoFixCheckOptions,
|
||||
): Promise<AutoFixResult> {
|
||||
const { lint, test, timeout, cwd, signal } = options
|
||||
|
||||
if (!lint && !test) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
return { hasErrors: false }
|
||||
}
|
||||
|
||||
const result: AutoFixResult = { hasErrors: false }
|
||||
|
||||
// Run lint first
|
||||
if (lint) {
|
||||
const lintResult = await runCommand(lint, cwd, timeout, signal)
|
||||
result.lintOutput = (lintResult.stdout + '\n' + lintResult.stderr).trim()
|
||||
result.lintExitCode = lintResult.exitCode
|
||||
|
||||
if (lintResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
|
||||
if (lintResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests only if lint passed (or no lint configured)
|
||||
if (test) {
|
||||
const testResult = await runCommand(test, cwd, timeout, signal)
|
||||
result.testOutput = (testResult.stdout + '\n' + testResult.stderr).trim()
|
||||
result.testExitCode = testResult.exitCode
|
||||
|
||||
if (testResult.timedOut) {
|
||||
result.hasErrors = true
|
||||
result.timedOut = true
|
||||
} else if (testResult.exitCode !== 0) {
|
||||
result.hasErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
result.errorSummary = buildErrorSummary(result)
|
||||
return result
|
||||
}
|
||||
127
src/services/compact/microCompact.test.ts
Normal file
127
src/services/compact/microCompact.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import type { Message } from '../../types/message.js'
|
||||
import { createAssistantMessage, createUserMessage } from '../../utils/messages.js'
|
||||
|
||||
// We test the exported collectCompactableToolIds behavior indirectly via
|
||||
// the public microcompactMessages + time-based path. But first we need to
|
||||
// verify the core predicate: MCP tools (prefixed 'mcp__') should be
|
||||
// compactable alongside the built-in tool set.
|
||||
|
||||
// Import internals we can test
|
||||
import { evaluateTimeBasedTrigger } from './microCompact.js'
|
||||
|
||||
/**
|
||||
* Helper: build a minimal assistant message with a tool_use block.
|
||||
*/
|
||||
function assistantWithToolUse(toolName: string, toolId: string): Message {
|
||||
return createAssistantMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use' as const,
|
||||
id: toolId,
|
||||
name: toolName,
|
||||
input: {},
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: build a user message with a tool_result block.
|
||||
*/
|
||||
function userWithToolResult(toolId: string, output: string): Message {
|
||||
return createUserMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result' as const,
|
||||
tool_use_id: toolId,
|
||||
content: output,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
describe('microCompact MCP tool compaction', () => {
|
||||
// We can't easily unit-test the private isCompactableTool directly,
|
||||
// but we can test the full time-based microcompact path which exercises
|
||||
// collectCompactableToolIds → isCompactableTool under the hood.
|
||||
// The time-based path is the simplest to trigger: it content-clears
|
||||
// old tool results when the gap since last assistant message exceeds
|
||||
// the threshold.
|
||||
|
||||
// However, evaluateTimeBasedTrigger depends on config (GrowthBook).
|
||||
// So instead, let's test the observable behavior by importing the
|
||||
// microcompactMessages function and checking that MCP tool_use blocks
|
||||
// are collected.
|
||||
|
||||
// Since collectCompactableToolIds is not exported, we test the predicate
|
||||
// behavior by verifying that the module loads without error and that
|
||||
// built-in and MCP tools are treated consistently.
|
||||
|
||||
test('module exports load correctly', async () => {
|
||||
const mod = await import('./microCompact.js')
|
||||
expect(mod.microcompactMessages).toBeFunction()
|
||||
expect(mod.estimateMessageTokens).toBeFunction()
|
||||
expect(mod.evaluateTimeBasedTrigger).toBeFunction()
|
||||
})
|
||||
|
||||
test('estimateMessageTokens counts MCP tool_use blocks', async () => {
|
||||
const { estimateMessageTokens } = await import('./microCompact.js')
|
||||
|
||||
const builtinMessages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-builtin-1'),
|
||||
userWithToolResult('tool-builtin-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const mcpMessages: Message[] = [
|
||||
assistantWithToolUse('mcp__github__get_file_contents', 'tool-mcp-1'),
|
||||
userWithToolResult('tool-mcp-1', 'file contents here'),
|
||||
]
|
||||
|
||||
const builtinTokens = estimateMessageTokens(builtinMessages)
|
||||
const mcpTokens = estimateMessageTokens(mcpMessages)
|
||||
|
||||
// Both should produce non-zero estimates
|
||||
expect(builtinTokens).toBeGreaterThan(0)
|
||||
expect(mcpTokens).toBeGreaterThan(0)
|
||||
|
||||
// The tool_result content is identical, so token estimates should be
|
||||
// similar (tool_use name differs slightly, so not exactly equal)
|
||||
expect(Math.abs(builtinTokens - mcpTokens)).toBeLessThan(50)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes MCP tools without error', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('mcp__slack__send_message', 'tool-mcp-2'),
|
||||
userWithToolResult('tool-mcp-2', 'Message sent successfully'),
|
||||
assistantWithToolUse('mcp__github__create_pull_request', 'tool-mcp-3'),
|
||||
userWithToolResult('tool-mcp-3', JSON.stringify({ number: 42, url: 'https://github.com/org/repo/pull/42' })),
|
||||
]
|
||||
|
||||
// Should not throw — MCP tools should be handled gracefully
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
|
||||
test('microcompactMessages processes mixed built-in and MCP tools', async () => {
|
||||
const { microcompactMessages } = await import('./microCompact.js')
|
||||
|
||||
const messages: Message[] = [
|
||||
assistantWithToolUse('Read', 'tool-read-1'),
|
||||
userWithToolResult('tool-read-1', 'some file content'),
|
||||
assistantWithToolUse('mcp__playwright__screenshot', 'tool-mcp-4'),
|
||||
userWithToolResult('tool-mcp-4', 'base64-encoded-screenshot-data'.repeat(100)),
|
||||
assistantWithToolUse('Bash', 'tool-bash-1'),
|
||||
userWithToolResult('tool-bash-1', 'command output'),
|
||||
]
|
||||
|
||||
const result = await microcompactMessages(messages)
|
||||
expect(result).toBeDefined()
|
||||
expect(result.messages.length).toBe(messages.length)
|
||||
})
|
||||
})
|
||||
@@ -37,7 +37,7 @@ export const TIME_BASED_MC_CLEARED_MESSAGE = '[Old tool result content cleared]'
|
||||
|
||||
const IMAGE_MAX_TOKEN_SIZE = 2000
|
||||
|
||||
// Only compact these tools
|
||||
// Only compact these built-in tools (MCP tools are also compactable via prefix match)
|
||||
const COMPACTABLE_TOOLS = new Set<string>([
|
||||
FILE_READ_TOOL_NAME,
|
||||
...SHELL_TOOL_NAMES,
|
||||
@@ -49,7 +49,13 @@ const COMPACTABLE_TOOLS = new Set<string>([
|
||||
FILE_WRITE_TOOL_NAME,
|
||||
])
|
||||
|
||||
// --- Cached microcompact state (internal-only, gated by feature('CACHED_MICROCOMPACT')) ---
|
||||
const MCP_TOOL_PREFIX = 'mcp__'
|
||||
|
||||
function isCompactableTool(name: string): boolean {
|
||||
return COMPACTABLE_TOOLS.has(name) || name.startsWith(MCP_TOOL_PREFIX)
|
||||
}
|
||||
|
||||
// --- Cached microcompact state (gated by feature('CACHED_MICROCOMPACT')) ---
|
||||
|
||||
// Lazy-initialized cached MC module and state to avoid importing in external builds.
|
||||
// The imports and state live inside feature() checks for dead code elimination.
|
||||
@@ -231,7 +237,7 @@ function collectCompactableToolIds(messages: Message[]): string[] {
|
||||
Array.isArray(message.message.content)
|
||||
) {
|
||||
for (const block of message.message.content) {
|
||||
if (block.type === 'tool_use' && COMPACTABLE_TOOLS.has(block.name)) {
|
||||
if (block.type === 'tool_use' && isCompactableTool(block.name)) {
|
||||
ids.push(block.id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,32 @@
|
||||
import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
DEFAULT_GITHUB_DEVICE_SCOPE,
|
||||
GitHubDeviceFlowError,
|
||||
pollAccessToken,
|
||||
requestDeviceCode,
|
||||
} from './deviceFlow.js'
|
||||
|
||||
async function importFreshModule() {
|
||||
mock.restore()
|
||||
return import(`./deviceFlow.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe('requestDeviceCode', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
beforeEach(() => {
|
||||
mock.restore()
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('parses successful device code response', async () => {
|
||||
const { requestDeviceCode } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
@@ -41,6 +54,9 @@ describe('requestDeviceCode', () => {
|
||||
})
|
||||
|
||||
test('throws on HTTP error', async () => {
|
||||
const { requestDeviceCode, GitHubDeviceFlowError } =
|
||||
await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('bad', { status: 500 })),
|
||||
)
|
||||
@@ -48,6 +64,81 @@ describe('requestDeviceCode', () => {
|
||||
requestDeviceCode({ clientId: 'x', fetchImpl: globalThis.fetch }),
|
||||
).rejects.toThrow(GitHubDeviceFlowError)
|
||||
})
|
||||
|
||||
test('uses OAuth-safe default scope', async () => {
|
||||
let capturedScope = ''
|
||||
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
||||
const body = init?.body
|
||||
if (body instanceof URLSearchParams) {
|
||||
capturedScope = body.get('scope') ?? ''
|
||||
} else {
|
||||
capturedScope = new URLSearchParams(String(body ?? '')).get('scope') ?? ''
|
||||
}
|
||||
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
device_code: 'abc',
|
||||
user_code: 'ABCD-1234',
|
||||
verification_uri: 'https://github.com/login/device',
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
)
|
||||
})
|
||||
|
||||
await requestDeviceCode({ clientId: 'test-client', fetchImpl: globalThis.fetch })
|
||||
expect(capturedScope).toBe(DEFAULT_GITHUB_DEVICE_SCOPE)
|
||||
expect(capturedScope).toBe('read:user')
|
||||
})
|
||||
|
||||
test('retries with OAuth-safe scope on invalid_scope', async () => {
|
||||
const scopesSeen: string[] = []
|
||||
let callCount = 0
|
||||
|
||||
globalThis.fetch = mock((_url: RequestInfo | URL, init?: RequestInit) => {
|
||||
const body = init?.body
|
||||
const scope =
|
||||
body instanceof URLSearchParams
|
||||
? body.get('scope') ?? ''
|
||||
: new URLSearchParams(String(body ?? '')).get('scope') ?? ''
|
||||
scopesSeen.push(scope)
|
||||
callCount++
|
||||
|
||||
if (callCount === 1) {
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
error: 'invalid_scope',
|
||||
error_description: 'invalid models scope',
|
||||
}),
|
||||
{ status: 400 },
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
device_code: 'abc',
|
||||
user_code: 'ABCD-1234',
|
||||
verification_uri: 'https://github.com/login/device',
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
)
|
||||
})
|
||||
|
||||
const result = await requestDeviceCode({
|
||||
clientId: 'test-client',
|
||||
scope: 'read:user,models:read',
|
||||
fetchImpl: globalThis.fetch,
|
||||
})
|
||||
|
||||
expect(result.device_code).toBe('abc')
|
||||
expect(callCount).toBe(2)
|
||||
expect(scopesSeen).toEqual(['read:user,models:read', 'read:user'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('pollAccessToken', () => {
|
||||
@@ -58,6 +149,8 @@ describe('pollAccessToken', () => {
|
||||
})
|
||||
|
||||
test('returns token when GitHub responds with access_token immediately', async () => {
|
||||
const { pollAccessToken } = await importFreshModule()
|
||||
|
||||
let calls = 0
|
||||
globalThis.fetch = mock(() => {
|
||||
calls++
|
||||
@@ -77,6 +170,8 @@ describe('pollAccessToken', () => {
|
||||
})
|
||||
|
||||
test('throws on access_denied', async () => {
|
||||
const { pollAccessToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ error: 'access_denied' }), {
|
||||
@@ -92,3 +187,62 @@ describe('pollAccessToken', () => {
|
||||
).rejects.toThrow(/denied/)
|
||||
})
|
||||
})
|
||||
|
||||
describe('exchangeForCopilotToken', () => {
|
||||
const originalFetch = globalThis.fetch
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.fetch = originalFetch
|
||||
})
|
||||
|
||||
test('parses successful Copilot token response', async () => {
|
||||
const { exchangeForCopilotToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
token: 'copilot-token-xyz',
|
||||
expires_at: 1700000000,
|
||||
refresh_in: 3600,
|
||||
endpoints: {
|
||||
api: 'https://api.githubcopilot.com',
|
||||
},
|
||||
}),
|
||||
{ status: 200 },
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
const result = await exchangeForCopilotToken('oauth-token', globalThis.fetch)
|
||||
expect(result.token).toBe('copilot-token-xyz')
|
||||
expect(result.expires_at).toBe(1700000000)
|
||||
expect(result.refresh_in).toBe(3600)
|
||||
expect(result.endpoints.api).toBe('https://api.githubcopilot.com')
|
||||
})
|
||||
|
||||
test('throws on HTTP error', async () => {
|
||||
const { exchangeForCopilotToken, GitHubDeviceFlowError } =
|
||||
await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(new Response('unauthorized', { status: 401 })),
|
||||
)
|
||||
await expect(
|
||||
exchangeForCopilotToken('bad-token', globalThis.fetch),
|
||||
).rejects.toThrow(GitHubDeviceFlowError)
|
||||
})
|
||||
|
||||
test('throws on malformed response', async () => {
|
||||
const { exchangeForCopilotToken } = await importFreshModule()
|
||||
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve(
|
||||
new Response(JSON.stringify({ invalid: 'data' }), { status: 200 }),
|
||||
),
|
||||
)
|
||||
await expect(
|
||||
exchangeForCopilotToken('oauth-token', globalThis.fetch),
|
||||
).rejects.toThrow(/Malformed/)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,17 +1,35 @@
|
||||
/**
|
||||
* GitHub OAuth device flow for CLI login (https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow).
|
||||
* Uses GitHub Copilot's official OAuth app for device authentication.
|
||||
*/
|
||||
|
||||
import { execFileNoThrow } from '../../utils/execFileNoThrow.js'
|
||||
|
||||
export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Ov23liXjWSSui6QIahPl'
|
||||
export const DEFAULT_GITHUB_DEVICE_FLOW_CLIENT_ID = 'Iv1.b507a08c87ecfe98'
|
||||
|
||||
export const GITHUB_DEVICE_CODE_URL = 'https://github.com/login/device/code'
|
||||
export const GITHUB_DEVICE_ACCESS_TOKEN_URL =
|
||||
'https://github.com/login/oauth/access_token'
|
||||
export const COPILOT_TOKEN_URL = 'https://api.github.com/copilot_internal/v2/token'
|
||||
|
||||
/** Match runtime devsper github_oauth DEFAULT_SCOPE */
|
||||
export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user,models:read'
|
||||
/** Only read:user scope — required for Copilot OAuth */
|
||||
export const DEFAULT_GITHUB_DEVICE_SCOPE = 'read:user'
|
||||
|
||||
export const COPILOT_HEADERS: Record<string, string> = {
|
||||
'User-Agent': 'GitHubCopilotChat/0.26.7',
|
||||
'Editor-Version': 'vscode/1.99.3',
|
||||
'Editor-Plugin-Version': 'copilot-chat/0.26.7',
|
||||
'Copilot-Integration-Id': 'vscode-chat',
|
||||
}
|
||||
|
||||
export type CopilotTokenResponse = {
|
||||
token: string
|
||||
expires_at: number
|
||||
refresh_in: number
|
||||
endpoints: {
|
||||
api: string
|
||||
}
|
||||
}
|
||||
|
||||
export class GitHubDeviceFlowError extends Error {
|
||||
constructor(message: string) {
|
||||
@@ -28,6 +46,8 @@ export type DeviceCodeResult = {
|
||||
interval: number
|
||||
}
|
||||
|
||||
type FetchLike = (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>
|
||||
|
||||
export function getGithubDeviceFlowClientId(): string {
|
||||
return (
|
||||
process.env.GITHUB_DEVICE_FLOW_CLIENT_ID?.trim() ||
|
||||
@@ -42,54 +62,77 @@ function sleep(ms: number): Promise<void> {
|
||||
export async function requestDeviceCode(options?: {
|
||||
clientId?: string
|
||||
scope?: string
|
||||
fetchImpl?: typeof fetch
|
||||
fetchImpl?: FetchLike
|
||||
}): Promise<DeviceCodeResult> {
|
||||
const clientId = options?.clientId ?? getGithubDeviceFlowClientId()
|
||||
if (!clientId) {
|
||||
throw new GitHubDeviceFlowError(
|
||||
'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID or paste a PAT instead.',
|
||||
'No OAuth client ID: set GITHUB_DEVICE_FLOW_CLIENT_ID.',
|
||||
)
|
||||
}
|
||||
const fetchFn = options?.fetchImpl ?? fetch
|
||||
const res = await fetchFn(GITHUB_DEVICE_CODE_URL, {
|
||||
method: 'POST',
|
||||
headers: { Accept: 'application/json' },
|
||||
body: new URLSearchParams({
|
||||
client_id: clientId,
|
||||
scope: options?.scope ?? DEFAULT_GITHUB_DEVICE_SCOPE,
|
||||
}),
|
||||
})
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
throw new GitHubDeviceFlowError(
|
||||
`Device code request failed: ${res.status} ${text}`,
|
||||
)
|
||||
}
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const device_code = data.device_code
|
||||
const user_code = data.user_code
|
||||
const verification_uri = data.verification_uri
|
||||
if (
|
||||
typeof device_code !== 'string' ||
|
||||
typeof user_code !== 'string' ||
|
||||
typeof verification_uri !== 'string'
|
||||
) {
|
||||
throw new GitHubDeviceFlowError('Malformed device code response from GitHub')
|
||||
}
|
||||
return {
|
||||
device_code,
|
||||
user_code,
|
||||
verification_uri,
|
||||
expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900,
|
||||
interval: typeof data.interval === 'number' ? data.interval : 5,
|
||||
const requestedScope =
|
||||
options?.scope?.trim() || DEFAULT_GITHUB_DEVICE_SCOPE
|
||||
const scopesToTry =
|
||||
requestedScope === DEFAULT_GITHUB_DEVICE_SCOPE
|
||||
? [requestedScope]
|
||||
: [requestedScope, DEFAULT_GITHUB_DEVICE_SCOPE]
|
||||
|
||||
let lastError = 'Device code request failed.'
|
||||
|
||||
for (const scope of scopesToTry) {
|
||||
const res = await fetchFn(GITHUB_DEVICE_CODE_URL, {
|
||||
method: 'POST',
|
||||
headers: { Accept: 'application/json' },
|
||||
body: new URLSearchParams({
|
||||
client_id: clientId,
|
||||
scope,
|
||||
}),
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
lastError = `Device code request failed: ${res.status} ${text}`
|
||||
const isInvalidScope = /invalid_scope/i.test(text)
|
||||
const canRetryWithFallback =
|
||||
scope !== DEFAULT_GITHUB_DEVICE_SCOPE && isInvalidScope
|
||||
if (canRetryWithFallback) {
|
||||
continue
|
||||
}
|
||||
throw new GitHubDeviceFlowError(lastError)
|
||||
}
|
||||
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const device_code = data.device_code
|
||||
const user_code = data.user_code
|
||||
const verification_uri = data.verification_uri
|
||||
if (
|
||||
typeof device_code !== 'string' ||
|
||||
typeof user_code !== 'string' ||
|
||||
typeof verification_uri !== 'string'
|
||||
) {
|
||||
throw new GitHubDeviceFlowError(
|
||||
'Malformed device code response from GitHub',
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
device_code,
|
||||
user_code,
|
||||
verification_uri,
|
||||
expires_in: typeof data.expires_in === 'number' ? data.expires_in : 900,
|
||||
interval: typeof data.interval === 'number' ? data.interval : 5,
|
||||
}
|
||||
}
|
||||
|
||||
throw new GitHubDeviceFlowError(lastError)
|
||||
}
|
||||
|
||||
export type PollOptions = {
|
||||
clientId?: string
|
||||
initialInterval?: number
|
||||
timeoutSeconds?: number
|
||||
fetchImpl?: typeof fetch
|
||||
fetchImpl?: FetchLike
|
||||
}
|
||||
|
||||
export async function pollAccessToken(
|
||||
@@ -172,3 +215,49 @@ export async function openVerificationUri(uri: string): Promise<void> {
|
||||
// User can open the URL manually
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Exchange an OAuth access token for a Copilot API token.
|
||||
* The OAuth token alone cannot be used with the Copilot API endpoint.
|
||||
*/
|
||||
export async function exchangeForCopilotToken(
|
||||
oauthToken: string,
|
||||
fetchImpl?: FetchLike,
|
||||
): Promise<CopilotTokenResponse> {
|
||||
const fetchFn = fetchImpl ?? fetch
|
||||
const res = await fetchFn(COPILOT_TOKEN_URL, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
Authorization: `Bearer ${oauthToken}`,
|
||||
...COPILOT_HEADERS,
|
||||
},
|
||||
})
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => '')
|
||||
throw new GitHubDeviceFlowError(
|
||||
`Copilot token exchange failed: ${res.status} ${text}`,
|
||||
)
|
||||
}
|
||||
const data = (await res.json()) as Record<string, unknown>
|
||||
const token = data.token
|
||||
const expires_at = data.expires_at
|
||||
const refresh_in = data.refresh_in
|
||||
const endpoints = data.endpoints
|
||||
if (
|
||||
typeof token !== 'string' ||
|
||||
typeof expires_at !== 'number' ||
|
||||
typeof refresh_in !== 'number' ||
|
||||
!endpoints ||
|
||||
typeof endpoints !== 'object' ||
|
||||
typeof (endpoints as Record<string, unknown>).api !== 'string'
|
||||
) {
|
||||
throw new GitHubDeviceFlowError('Malformed Copilot token response')
|
||||
}
|
||||
return {
|
||||
token,
|
||||
expires_at,
|
||||
refresh_in,
|
||||
endpoints: endpoints as { api: string },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
// Mock rate limits for testing [internal-only]
|
||||
// The external build keeps this module as a stable no-op surface so imports
|
||||
// remain valid without exposing internal-only rate-limit simulation behavior.
|
||||
// This allows testing various rate limit scenarios without hitting actual limits
|
||||
//
|
||||
// WARNING: This is for internal testing/demo purposes only!
|
||||
// The mock headers may not exactly match the API specification or real-world behavior.
|
||||
// Always validate against actual API responses before relying on this for production features.
|
||||
|
||||
import { setMockBillingAccessOverride } from '../utils/billing.js'
|
||||
import type { OverageDisabledReason } from './claudeAiLimits.js'
|
||||
|
||||
@@ -645,7 +645,7 @@ const internalOnlyTips: Tip[] =
|
||||
{
|
||||
id: 'skillify',
|
||||
content: async () =>
|
||||
'[internal] Turn repeatable workflows into reusable project skills when they keep recurring',
|
||||
'[internal] Use /skillify to turn repeatable recurring workflows into reusable project skills',
|
||||
cooldownSessions: 15,
|
||||
isRelevant: async () => true,
|
||||
},
|
||||
|
||||
33
src/services/tools/toolExecution.test.ts
Normal file
33
src/services/tools/toolExecution.test.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { SkillTool } from '../../tools/SkillTool/SkillTool.js'
|
||||
import {
|
||||
getSchemaValidationErrorOverride,
|
||||
getSchemaValidationToolUseResult,
|
||||
} from './toolExecution.js'
|
||||
|
||||
describe('getSchemaValidationErrorOverride', () => {
|
||||
test('returns actionable missing-skill error for SkillTool', () => {
|
||||
expect(getSchemaValidationErrorOverride(SkillTool, {})).toBe(
|
||||
'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
|
||||
)
|
||||
})
|
||||
|
||||
test('does not override unrelated tool schema failures', () => {
|
||||
expect(getSchemaValidationErrorOverride({ name: 'Read' } as never, {})).toBe(
|
||||
null,
|
||||
)
|
||||
})
|
||||
|
||||
test('does not override SkillTool when skill is present', () => {
|
||||
expect(
|
||||
getSchemaValidationErrorOverride(SkillTool, { skill: 'commit' }),
|
||||
).toBe(null)
|
||||
})
|
||||
|
||||
test('uses the actionable override for structured toolUseResult too', () => {
|
||||
expect(getSchemaValidationToolUseResult(SkillTool, {} as never)).toBe(
|
||||
'InputValidationError: Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -43,6 +43,7 @@ import { FILE_READ_TOOL_NAME } from '../../tools/FileReadTool/prompt.js'
|
||||
import { FILE_WRITE_TOOL_NAME } from '../../tools/FileWriteTool/prompt.js'
|
||||
import { NOTEBOOK_EDIT_TOOL_NAME } from '../../tools/NotebookEditTool/constants.js'
|
||||
import { POWERSHELL_TOOL_NAME } from '../../tools/PowerShellTool/toolName.js'
|
||||
import { SKILL_TOOL_NAME } from '../../tools/SkillTool/constants.js'
|
||||
import { parseGitCommitId } from '../../tools/shared/gitOperationTracking.js'
|
||||
import {
|
||||
isDeferredTool,
|
||||
@@ -596,6 +597,31 @@ export function buildSchemaNotSentHint(
|
||||
)
|
||||
}
|
||||
|
||||
export function getSchemaValidationErrorOverride(
|
||||
tool: Tool,
|
||||
input: unknown,
|
||||
): string | null {
|
||||
if (tool.name !== SKILL_TOOL_NAME || !input || typeof input !== 'object') {
|
||||
return null
|
||||
}
|
||||
|
||||
const skill = (input as { skill?: unknown }).skill
|
||||
if (skill === undefined || skill === null) {
|
||||
return 'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).'
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export function getSchemaValidationToolUseResult(
|
||||
tool: Tool,
|
||||
input: unknown,
|
||||
fallbackMessage?: string,
|
||||
): string {
|
||||
const override = getSchemaValidationErrorOverride(tool, input)
|
||||
return `InputValidationError: ${override ?? fallbackMessage ?? ''}`
|
||||
}
|
||||
|
||||
async function checkPermissionsAndCallTool(
|
||||
tool: Tool,
|
||||
toolUseID: string,
|
||||
@@ -614,7 +640,9 @@ async function checkPermissionsAndCallTool(
|
||||
// Validate input types with zod (surprisingly, the model is not great at generating valid input)
|
||||
const parsedInput = tool.inputSchema.safeParse(input)
|
||||
if (!parsedInput.success) {
|
||||
let errorContent = formatZodValidationError(tool.name, parsedInput.error)
|
||||
const fallbackErrorContent = formatZodValidationError(tool.name, parsedInput.error)
|
||||
let errorContent =
|
||||
getSchemaValidationErrorOverride(tool, input) ?? fallbackErrorContent
|
||||
|
||||
const schemaHint = buildSchemaNotSentHint(
|
||||
tool,
|
||||
@@ -672,7 +700,11 @@ async function checkPermissionsAndCallTool(
|
||||
tool_use_id: toolUseID,
|
||||
},
|
||||
],
|
||||
toolUseResult: `InputValidationError: ${parsedInput.error.message}`,
|
||||
toolUseResult: getSchemaValidationToolUseResult(
|
||||
tool,
|
||||
input,
|
||||
parsedInput.error.message,
|
||||
),
|
||||
sourceToolAssistantUUID: assistantMessage.uuid,
|
||||
}),
|
||||
},
|
||||
|
||||
@@ -29,6 +29,13 @@ import {
|
||||
} from '../../utils/permissions/PermissionResult.js'
|
||||
import { checkRuleBasedPermissions } from '../../utils/permissions/permissions.js'
|
||||
import { formatError } from '../../utils/toolErrors.js'
|
||||
import { getAutoFixConfig } from '../autoFix/autoFixConfig.js'
|
||||
import { shouldRunAutoFix, buildAutoFixContext } from '../autoFix/autoFixHook.js'
|
||||
import { runAutoFixCheck } from '../autoFix/autoFixRunner.js'
|
||||
|
||||
// Track auto-fix retry count per query chain to enforce maxRetries cap.
|
||||
// Key: queryChainId (or 'default'), Value: number of auto-fix attempts used.
|
||||
const autoFixRetryCount = new Map<string, number>()
|
||||
import { isMcpTool } from '../mcp/utils.js'
|
||||
import type { McpServerType, MessageUpdateLazy } from './toolExecution.js'
|
||||
|
||||
@@ -185,6 +192,65 @@ export async function* runPostToolUseHooks<Input extends AnyObject, Output>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-fix: run lint/test if configured for this tool
|
||||
const autoFixSettings = toolUseContext.getAppState().settings
|
||||
const autoFixConfig = getAutoFixConfig(
|
||||
autoFixSettings && typeof autoFixSettings === 'object' && 'autoFix' in autoFixSettings
|
||||
? (autoFixSettings as Record<string, unknown>).autoFix
|
||||
: undefined,
|
||||
)
|
||||
if (shouldRunAutoFix(tool.name, autoFixConfig) && autoFixConfig) {
|
||||
// Enforce maxRetries cap to prevent unbounded auto-fix loops.
|
||||
// Uses queryChainId to scope the counter to the current conversation turn.
|
||||
const chainKey = (toolUseContext.queryTracking?.chainId as string) ?? 'default'
|
||||
const currentRetries = autoFixRetryCount.get(chainKey) ?? 0
|
||||
|
||||
if (currentRetries >= autoFixConfig.maxRetries) {
|
||||
// Max retries reached — skip auto-fix and let the user know
|
||||
yield {
|
||||
message: createAttachmentMessage({
|
||||
type: 'hook_additional_context',
|
||||
content: [
|
||||
`<auto_fix_feedback>\nAUTO-FIX: Maximum retry limit (${autoFixConfig.maxRetries}) reached. ` +
|
||||
`Skipping further auto-fix attempts. Please review the errors manually.\n</auto_fix_feedback>`,
|
||||
],
|
||||
hookName: `AutoFix:${tool.name}`,
|
||||
toolUseID,
|
||||
hookEvent: 'PostToolUse',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const cwd = toolUseContext.options?.cwd ?? process.cwd()
|
||||
const autoFixResult = await runAutoFixCheck({
|
||||
lint: autoFixConfig.lint,
|
||||
test: autoFixConfig.test,
|
||||
timeout: autoFixConfig.timeout,
|
||||
cwd,
|
||||
signal: toolUseContext.abortController.signal,
|
||||
})
|
||||
const autoFixContext = buildAutoFixContext(autoFixResult)
|
||||
if (autoFixContext) {
|
||||
autoFixRetryCount.set(chainKey, currentRetries + 1)
|
||||
yield {
|
||||
message: createAttachmentMessage({
|
||||
type: 'hook_additional_context',
|
||||
content: [autoFixContext],
|
||||
hookName: `AutoFix:${tool.name}`,
|
||||
toolUseID,
|
||||
hookEvent: 'PostToolUse',
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
// Lint/test passed — reset the retry counter for this chain
|
||||
autoFixRetryCount.delete(chainKey)
|
||||
}
|
||||
} catch (autoFixError) {
|
||||
logError(autoFixError)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import { getGlobalConfig, saveGlobalConfig } from '../utils/config.js'
|
||||
import { toError } from '../utils/errors.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { applyConfigEnvironmentVariables } from '../utils/managedEnv.js'
|
||||
import { persistActiveProviderProfileModel } from '../utils/providerProfiles.js'
|
||||
import {
|
||||
permissionModeFromString,
|
||||
toExternalPermissionMode,
|
||||
@@ -110,6 +111,12 @@ export function onChangeAppState({
|
||||
// Save to settings
|
||||
updateSettingsForSource('userSettings', { model: newState.mainLoopModel })
|
||||
setMainLoopModelOverride(newState.mainLoopModel)
|
||||
|
||||
// Keep active provider profiles in sync with /model choices so restarts
|
||||
// keep using the last selected model instead of the profile's old default.
|
||||
if (process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1') {
|
||||
persistActiveProviderProfileModel(newState.mainLoopModel)
|
||||
}
|
||||
}
|
||||
|
||||
// expandedView → persist as showExpandedTodos + showSpinnerTree for backwards compat
|
||||
|
||||
@@ -156,34 +156,24 @@ ${AGENT_TOOL_NAME}({
|
||||
const currentExamples = `Example usage:
|
||||
|
||||
<example_agent_descriptions>
|
||||
"test-runner": use this agent after you are done writing code to run tests
|
||||
"greeting-responder": use this agent to respond to user greetings with a friendly joke
|
||||
"claude-code-guide": use this agent when the user asks how Claude Code works or how to use its features
|
||||
"statusline-setup": use this agent to configure the user's Claude Code status line setting
|
||||
</example_agent_descriptions>
|
||||
|
||||
<example>
|
||||
user: "Please write a function that checks if a number is prime"
|
||||
assistant: I'm going to use the ${FILE_WRITE_TOOL_NAME} tool to write the following code:
|
||||
<code>
|
||||
function isPrime(n) {
|
||||
if (n <= 1) return false
|
||||
for (let i = 2; i * i <= n; i++) {
|
||||
if (n % i === 0) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
</code>
|
||||
user: "How do I configure Claude Code hooks?"
|
||||
<commentary>
|
||||
Since a significant piece of code was written and the task was completed, now use the test-runner agent to run the tests
|
||||
This is a Claude Code usage question, so use the claude-code-guide agent
|
||||
</commentary>
|
||||
assistant: Uses the ${AGENT_TOOL_NAME} tool to launch the test-runner agent
|
||||
assistant: Uses the ${AGENT_TOOL_NAME} tool to launch the claude-code-guide agent
|
||||
</example>
|
||||
|
||||
<example>
|
||||
user: "Hello"
|
||||
user: "Set up my Claude Code status line"
|
||||
<commentary>
|
||||
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke
|
||||
This matches the statusline-setup agent, so use it to configure the setting
|
||||
</commentary>
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the greeting-responder agent"
|
||||
assistant: "I'm going to use the ${AGENT_TOOL_NAME} tool to launch the statusline-setup agent"
|
||||
</example>
|
||||
`
|
||||
|
||||
|
||||
31
src/tools/SkillTool/SkillTool.test.ts
Normal file
31
src/tools/SkillTool/SkillTool.test.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { SkillTool } from './SkillTool.js'
|
||||
|
||||
describe('SkillTool missing parameter handling', () => {
|
||||
test('missing skill stays required at the schema level', async () => {
|
||||
const parsed = SkillTool.inputSchema.safeParse({})
|
||||
|
||||
expect(parsed.success).toBe(false)
|
||||
})
|
||||
|
||||
test('validateInput still returns an actionable error when called with missing skill', async () => {
|
||||
const result = await SkillTool.validateInput?.({} as never, {
|
||||
options: { tools: [] },
|
||||
messages: [],
|
||||
} as never)
|
||||
|
||||
expect(result).toEqual({
|
||||
result: false,
|
||||
message:
|
||||
'Missing skill name. Pass the slash command name as the skill parameter (e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
|
||||
errorCode: 1,
|
||||
})
|
||||
})
|
||||
|
||||
test('valid skill input still parses and validates', async () => {
|
||||
const parsed = SkillTool.inputSchema.safeParse({ skill: 'commit' })
|
||||
|
||||
expect(parsed.success).toBe(true)
|
||||
})
|
||||
})
|
||||
@@ -352,6 +352,16 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
|
||||
toAutoClassifierInput: ({ skill }) => skill ?? '',
|
||||
|
||||
async validateInput({ skill }, context): Promise<ValidationResult> {
|
||||
if (!skill || typeof skill !== 'string') {
|
||||
return {
|
||||
result: false,
|
||||
message:
|
||||
'Missing skill name. Pass the slash command name as the skill parameter ' +
|
||||
'(e.g., skill: "commit" for /commit, skill: "review-pr" for /review-pr).',
|
||||
errorCode: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Skills are just skill names, no arguments
|
||||
const trimmed = skill.trim()
|
||||
if (!trimmed) {
|
||||
@@ -434,7 +444,7 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
|
||||
context,
|
||||
): Promise<PermissionDecision> {
|
||||
// Skills are just skill names, no arguments
|
||||
const trimmed = skill.trim()
|
||||
const trimmed = skill ?? ''
|
||||
|
||||
// Remove leading slash if present (for compatibility)
|
||||
const commandName = trimmed.startsWith('/') ? trimmed.substring(1) : trimmed
|
||||
@@ -592,7 +602,7 @@ export const SkillTool: Tool<InputSchema, Output, Progress> = buildTool({
|
||||
// - Skill is a prompt-based skill
|
||||
|
||||
// Skills are just names, with optional arguments
|
||||
const trimmed = skill.trim()
|
||||
const trimmed = skill ?? ''
|
||||
|
||||
// Remove leading slash if present (for compatibility)
|
||||
const commandName = trimmed.startsWith('/') ? trimmed.substring(1) : trimmed
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import { z } from 'zod/v4'
|
||||
import { getEmptyToolPermissionContext, type Tool, type Tools } from '../Tool.js'
|
||||
import { SkillTool } from '../tools/SkillTool/SkillTool.js'
|
||||
import { toolToAPISchema } from './api.js'
|
||||
|
||||
test('toolToAPISchema preserves provider-specific schema keywords in input_schema', async () => {
|
||||
@@ -64,3 +65,16 @@ test('toolToAPISchema preserves provider-specific schema keywords in input_schem
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test('toolToAPISchema keeps skill required for SkillTool', async () => {
|
||||
const schema = await toolToAPISchema(SkillTool, {
|
||||
getToolPermissionContext: async () => getEmptyToolPermissionContext(),
|
||||
tools: [] as unknown as Tools,
|
||||
agents: [],
|
||||
})
|
||||
|
||||
expect((schema as { input_schema: unknown }).input_schema).toMatchObject({
|
||||
type: 'object',
|
||||
required: ['skill'],
|
||||
})
|
||||
})
|
||||
|
||||
@@ -94,3 +94,22 @@ test('gpt-5.4 family keeps large max output overrides within provider limits', (
|
||||
expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000)
|
||||
expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000)
|
||||
})
|
||||
|
||||
test('MiniMax-M2.7 uses explicit provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
|
||||
expect(getContextWindowForModel('MiniMax-M2.7')).toBe(204_800)
|
||||
expect(getModelMaxOutputTokens('MiniMax-M2.7')).toEqual({
|
||||
default: 131_072,
|
||||
upperLimit: 131_072,
|
||||
})
|
||||
expect(getMaxOutputTokensForModel('MiniMax-M2.7')).toBe(131_072)
|
||||
})
|
||||
|
||||
test('unknown openai-compatible models still use the conservative fallback window', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
|
||||
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(8_000)
|
||||
})
|
||||
|
||||
@@ -72,16 +72,23 @@ export function getContextWindowForModel(
|
||||
return 1_000_000
|
||||
}
|
||||
|
||||
// OpenAI-compatible provider — use known context windows for the model
|
||||
if (
|
||||
// OpenAI-compatible provider — use known context windows for the model.
|
||||
// Unknown models get a conservative 8k default so auto-compact triggers
|
||||
// before hitting a hard context_window_exceeded error.
|
||||
const isOpenAIProvider =
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
|
||||
) {
|
||||
if (isOpenAIProvider) {
|
||||
const openaiWindow = getOpenAIContextWindow(model)
|
||||
if (openaiWindow !== undefined) {
|
||||
return openaiWindow
|
||||
}
|
||||
console.error(
|
||||
`[context] Warning: model "${model}" not in context window table — using conservative 8k default. ` +
|
||||
'Add it to src/utils/model/openaiContextWindows.ts for accurate compaction.',
|
||||
)
|
||||
return 8_000
|
||||
}
|
||||
|
||||
const cap = getModelCapability(model)
|
||||
|
||||
@@ -69,3 +69,93 @@ test('loadConversationForResume rejects oversized transcripts before resume hook
|
||||
)
|
||||
expect(hookSpy).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test('deserializeMessagesWithInterruptDetection strips thinking blocks only for OpenAI-compatible providers', async () => {
|
||||
const serializedMessages = [
|
||||
user(id(10), 'hello'),
|
||||
{
|
||||
type: 'assistant',
|
||||
uuid: id(11),
|
||||
parentUuid: id(10),
|
||||
timestamp: ts,
|
||||
cwd: '/tmp',
|
||||
sessionId,
|
||||
version: 'test',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'secret reasoning' },
|
||||
{ type: 'text', text: 'visible reply' },
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'assistant',
|
||||
uuid: id(12),
|
||||
parentUuid: id(11),
|
||||
timestamp: ts,
|
||||
cwd: '/tmp',
|
||||
sessionId,
|
||||
version: 'test',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [{ type: 'thinking', thinking: 'only hidden reasoning' }],
|
||||
},
|
||||
},
|
||||
user(id(13), 'follow up'),
|
||||
]
|
||||
|
||||
mock.module('./model/providers.js', () => ({
|
||||
getAPIProvider: () => 'openai',
|
||||
isOpenAICompatibleProvider: (provider: string) =>
|
||||
provider === 'openai' ||
|
||||
provider === 'gemini' ||
|
||||
provider === 'github' ||
|
||||
provider === 'codex',
|
||||
}))
|
||||
|
||||
const openaiModule = await import(`./conversationRecovery.ts?provider=openai-${Date.now()}`)
|
||||
const thirdParty = openaiModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
|
||||
const thirdPartyAssistantMessages = thirdParty.messages.filter(
|
||||
message => message.type === 'assistant',
|
||||
)
|
||||
|
||||
expect(thirdPartyAssistantMessages).toHaveLength(2)
|
||||
expect(thirdPartyAssistantMessages[0]?.message?.content).toEqual([
|
||||
{ type: 'text', text: 'visible reply' },
|
||||
])
|
||||
expect(
|
||||
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
|
||||
).not.toContain('secret reasoning')
|
||||
expect(
|
||||
JSON.stringify(thirdPartyAssistantMessages.map(message => message.message?.content)),
|
||||
).not.toContain('only hidden reasoning')
|
||||
|
||||
mock.restore()
|
||||
mock.module('./model/providers.js', () => ({
|
||||
getAPIProvider: () => 'bedrock',
|
||||
isOpenAICompatibleProvider: (provider: string) =>
|
||||
provider === 'openai' ||
|
||||
provider === 'gemini' ||
|
||||
provider === 'github' ||
|
||||
provider === 'codex',
|
||||
}))
|
||||
|
||||
const bedrockModule = await import(`./conversationRecovery.ts?provider=bedrock-${Date.now()}`)
|
||||
const anthropicCompatible = bedrockModule.deserializeMessagesWithInterruptDetection(serializedMessages as never[])
|
||||
const anthropicAssistantMessages = anthropicCompatible.messages.filter(
|
||||
message => message.type === 'assistant',
|
||||
)
|
||||
|
||||
expect(anthropicAssistantMessages).toHaveLength(2)
|
||||
expect(anthropicAssistantMessages[0]?.message?.content).toEqual([
|
||||
{ type: 'thinking', thinking: 'secret reasoning' },
|
||||
{ type: 'text', text: 'visible reply' },
|
||||
])
|
||||
expect(
|
||||
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
|
||||
).toContain('secret reasoning')
|
||||
expect(
|
||||
JSON.stringify(anthropicAssistantMessages.map(message => message.message?.content)),
|
||||
).not.toContain('only hidden reasoning')
|
||||
})
|
||||
|
||||
@@ -13,6 +13,7 @@ const originalSimple = process.env.CLAUDE_CODE_SIMPLE
|
||||
const sessionId = '00000000-0000-4000-8000-000000001999'
|
||||
const ts = '2026-04-02T00:00:00.000Z'
|
||||
|
||||
|
||||
function id(n: number): string {
|
||||
return `00000000-0000-4000-8000-${String(n).padStart(12, '0')}`
|
||||
}
|
||||
@@ -76,4 +77,3 @@ test('loadConversationForResume rejects oversized reconstructed transcripts', as
|
||||
'Reconstructed transcript is too large to resume safely',
|
||||
)
|
||||
})
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import {
|
||||
type FileHistorySnapshot,
|
||||
} from './fileHistory.js'
|
||||
import { logError } from './log.js'
|
||||
import { getAPIProvider } from './model/providers.js'
|
||||
import {
|
||||
createAssistantMessage,
|
||||
createUserMessage,
|
||||
@@ -177,6 +178,25 @@ export type DeserializeResult = {
|
||||
turnInterruptionState: TurnInterruptionState
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove thinking/redacted_thinking content blocks from assistant messages.
|
||||
* Messages that become empty after stripping are removed entirely.
|
||||
*/
|
||||
function stripThinkingBlocks(messages: NormalizedMessage[]): NormalizedMessage[] {
|
||||
return messages.reduce<NormalizedMessage[]>((acc, msg) => {
|
||||
if (msg.type !== 'assistant' || !Array.isArray(msg.message?.content)) {
|
||||
acc.push(msg)
|
||||
return acc
|
||||
}
|
||||
const filtered = msg.message.content.filter(
|
||||
(block: { type?: string }) => block.type !== 'thinking' && block.type !== 'redacted_thinking',
|
||||
)
|
||||
if (filtered.length === 0) return acc
|
||||
acc.push({ ...msg, message: { ...msg.message, content: filtered } })
|
||||
return acc
|
||||
}, [])
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserializes messages from a log file into the format expected by the REPL.
|
||||
* Filters unresolved tool uses, orphaned thinking messages, and appends a
|
||||
@@ -227,10 +247,19 @@ export function deserializeMessagesWithInterruptDetection(
|
||||
filteredToolUses,
|
||||
) as NormalizedMessage[]
|
||||
|
||||
// Strip thinking/redacted_thinking content blocks from assistant messages
|
||||
// when resuming against a 3P provider. These Anthropic-specific blocks cause
|
||||
// 400 errors or context corruption on OpenAI-compatible providers (issue #248 finding 5).
|
||||
const provider = getAPIProvider()
|
||||
const isThirdPartyProvider = provider !== 'firstParty' && provider !== 'bedrock' && provider !== 'vertex' && provider !== 'foundry'
|
||||
const thinkingStripped = isThirdPartyProvider
|
||||
? stripThinkingBlocks(filteredThinking)
|
||||
: filteredThinking
|
||||
|
||||
// Filter out assistant messages with only whitespace text content.
|
||||
// This can happen when model outputs "\n\n" before thinking, user cancels mid-stream.
|
||||
const filteredMessages = filterWhitespaceOnlyAssistantMessages(
|
||||
filteredThinking,
|
||||
thinkingStripped,
|
||||
) as NormalizedMessage[]
|
||||
|
||||
const internalState = detectTurnInterruption(filteredMessages)
|
||||
|
||||
@@ -4,6 +4,10 @@ import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { extractDraggedFilePaths } from './dragDropPaths.js'
|
||||
|
||||
function escapeFinderDraggedPath(filePath: string): string {
|
||||
return filePath.replace(/([\\ ])/g, '\\$1')
|
||||
}
|
||||
|
||||
describe('extractDraggedFilePaths', () => {
|
||||
// Paths that exist on any system.
|
||||
const thisFile = import.meta.path
|
||||
@@ -80,6 +84,12 @@ describe('extractDraggedFilePaths', () => {
|
||||
})
|
||||
})
|
||||
|
||||
test('escapeFinderDraggedPath escapes spaces and backslashes', () => {
|
||||
expect(escapeFinderDraggedPath('/tmp/my\\notes file.txt')).toBe(
|
||||
'/tmp/my\\\\notes\\ file.txt',
|
||||
)
|
||||
})
|
||||
|
||||
// Backslash-escaped paths are a Finder/macOS + Linux convention — on
|
||||
// Windows the shell-escape step is skipped, so these cases do not apply.
|
||||
if (process.platform !== 'win32') {
|
||||
@@ -92,7 +102,7 @@ describe('extractDraggedFilePaths', () => {
|
||||
|
||||
test('resolves an escaped real file with a space in its name', () => {
|
||||
// Raw form matches what a terminal delivers on Finder drag.
|
||||
const escaped = spacedFile.replace(/ /g, '\\ ')
|
||||
const escaped = escapeFinderDraggedPath(spacedFile)
|
||||
expect(extractDraggedFilePaths(escaped)).toEqual([spacedFile])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,6 +3,7 @@ import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
|
||||
type MockStorageData = Record<string, unknown>
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
const originalArgv = [...process.argv]
|
||||
let storageState: MockStorageData = {}
|
||||
|
||||
async function importFreshModule() {
|
||||
@@ -27,11 +28,14 @@ async function importFreshModule() {
|
||||
|
||||
beforeEach(() => {
|
||||
process.env = { ...originalEnv }
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
process.argv = originalArgv.filter(arg => arg !== '--bare')
|
||||
storageState = {}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv }
|
||||
process.argv = [...originalArgv]
|
||||
storageState = {}
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
@@ -10,6 +10,8 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
CLAUDE_CODE_GITHUB_TOKEN_HYDRATED:
|
||||
process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED,
|
||||
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
|
||||
}
|
||||
|
||||
@@ -39,15 +41,17 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
'./githubModelsCredentials.js?hydrate=sets-token'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('stored-secret')
|
||||
expect(process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED).toBe('1')
|
||||
})
|
||||
|
||||
test('does not override existing GITHUB_TOKEN', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.GITHUB_TOKEN = 'already'
|
||||
delete process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
@@ -58,9 +62,10 @@ describe('hydrateGithubModelsTokenFromSecureStorage', () => {
|
||||
}))
|
||||
|
||||
const { hydrateGithubModelsTokenFromSecureStorage } = await import(
|
||||
'./githubModelsCredentials.js'
|
||||
'./githubModelsCredentials.js?hydrate=preserve-existing'
|
||||
)
|
||||
hydrateGithubModelsTokenFromSecureStorage()
|
||||
expect(process.env.GITHUB_TOKEN).toBe('already')
|
||||
expect(process.env.CLAUDE_CODE_GITHUB_TOKEN_HYDRATED).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
118
src/utils/githubModelsCredentials.refresh.test.ts
Normal file
118
src/utils/githubModelsCredentials.refresh.test.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import { afterEach, beforeEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
async function importFreshModule() {
|
||||
mock.restore()
|
||||
return import(`./githubModelsCredentials.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe('refreshGithubModelsTokenIfNeeded', () => {
|
||||
const orig = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_SIMPLE: process.env.CLAUDE_CODE_SIMPLE,
|
||||
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
||||
GH_TOKEN: process.env.GH_TOKEN,
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const [k, v] of Object.entries(orig)) {
|
||||
if (v === undefined) {
|
||||
delete process.env[k as keyof typeof orig]
|
||||
} else {
|
||||
process.env[k as keyof typeof orig] = v
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test('refreshes expired Copilot token using stored OAuth token', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const futureExp = Math.floor(Date.now() / 1000) + 3600
|
||||
let store: Record<string, unknown> = {
|
||||
githubModels: {
|
||||
accessToken: 'tid=stale;exp=1;sku=free',
|
||||
oauthAccessToken: 'ghu_oauth_secret',
|
||||
},
|
||||
}
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => store,
|
||||
update: (next: Record<string, unknown>) => {
|
||||
store = next
|
||||
return { success: true }
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
mock.module('../services/github/deviceFlow.js', () => ({
|
||||
DEFAULT_GITHUB_DEVICE_SCOPE: 'read:user',
|
||||
exchangeForCopilotToken: async () => ({
|
||||
token: `tid=fresh;exp=${futureExp};sku=free`,
|
||||
expires_at: futureExp,
|
||||
refresh_in: 1500,
|
||||
endpoints: { api: 'https://api.githubcopilot.com' },
|
||||
}),
|
||||
}))
|
||||
|
||||
const { refreshGithubModelsTokenIfNeeded } = await importFreshModule()
|
||||
|
||||
const refreshed = await refreshGithubModelsTokenIfNeeded()
|
||||
expect(refreshed).toBe(true)
|
||||
expect(process.env.GITHUB_TOKEN?.startsWith('tid=fresh;exp=')).toBe(true)
|
||||
|
||||
const githubModels = (store.githubModels ?? {}) as {
|
||||
accessToken?: string
|
||||
oauthAccessToken?: string
|
||||
}
|
||||
expect(githubModels.accessToken?.startsWith('tid=fresh;exp=')).toBe(true)
|
||||
expect(githubModels.oauthAccessToken).toBe('ghu_oauth_secret')
|
||||
})
|
||||
|
||||
test('does not refresh when current Copilot token is valid', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
delete process.env.CLAUDE_CODE_SIMPLE
|
||||
delete process.env.GITHUB_TOKEN
|
||||
delete process.env.GH_TOKEN
|
||||
|
||||
const futureExp = Math.floor(Date.now() / 1000) + 3600
|
||||
const exchangeSpy = mock(async () => ({
|
||||
token: `tid=unexpected;exp=${futureExp};sku=free`,
|
||||
expires_at: futureExp,
|
||||
refresh_in: 1500,
|
||||
endpoints: { api: 'https://api.githubcopilot.com' },
|
||||
}))
|
||||
|
||||
mock.module('./secureStorage/index.js', () => ({
|
||||
getSecureStorage: () => ({
|
||||
read: () => ({
|
||||
githubModels: {
|
||||
accessToken: `tid=already-valid;exp=${futureExp};sku=free`,
|
||||
oauthAccessToken: 'ghu_oauth_secret',
|
||||
},
|
||||
}),
|
||||
update: () => ({ success: true }),
|
||||
}),
|
||||
}))
|
||||
|
||||
mock.module('../services/github/deviceFlow.js', () => ({
|
||||
DEFAULT_GITHUB_DEVICE_SCOPE: 'read:user',
|
||||
exchangeForCopilotToken: exchangeSpy,
|
||||
}))
|
||||
|
||||
const { refreshGithubModelsTokenIfNeeded } = await importFreshModule()
|
||||
|
||||
const refreshed = await refreshGithubModelsTokenIfNeeded()
|
||||
expect(refreshed).toBe(false)
|
||||
expect(exchangeSpy).not.toHaveBeenCalled()
|
||||
expect(process.env.GITHUB_TOKEN?.startsWith('tid=already-valid;exp=')).toBe(
|
||||
true,
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -1,13 +1,11 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import {
|
||||
clearGithubModelsToken,
|
||||
readGithubModelsToken,
|
||||
saveGithubModelsToken,
|
||||
} from './githubModelsCredentials.js'
|
||||
|
||||
describe('readGithubModelsToken', () => {
|
||||
test('returns undefined in bare mode', () => {
|
||||
test('returns undefined in bare mode', async () => {
|
||||
const { readGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?read-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(readGithubModelsToken()).toBeUndefined()
|
||||
@@ -20,7 +18,11 @@ describe('readGithubModelsToken', () => {
|
||||
})
|
||||
|
||||
describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||
test('save returns failure in bare mode', () => {
|
||||
test('save returns failure in bare mode', async () => {
|
||||
const { saveGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?save-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
const r = saveGithubModelsToken('abc')
|
||||
@@ -33,7 +35,11 @@ describe('saveGithubModelsToken / clearGithubModelsToken', () => {
|
||||
}
|
||||
})
|
||||
|
||||
test('clear succeeds in bare mode', () => {
|
||||
test('clear succeeds in bare mode', async () => {
|
||||
const { clearGithubModelsToken } = await import(
|
||||
'./githubModelsCredentials.js?clear-bare-mode'
|
||||
)
|
||||
|
||||
const prev = process.env.CLAUDE_CODE_SIMPLE
|
||||
process.env.CLAUDE_CODE_SIMPLE = '1'
|
||||
expect(clearGithubModelsToken().success).toBe(true)
|
||||
|
||||
@@ -1,11 +1,46 @@
|
||||
import { isBareMode, isEnvTruthy } from './envUtils.js'
|
||||
import { getSecureStorage } from './secureStorage/index.js'
|
||||
import { exchangeForCopilotToken } from '../services/github/deviceFlow.js'
|
||||
|
||||
/** JSON key in the shared OpenClaude secure storage blob. */
|
||||
export const GITHUB_MODELS_STORAGE_KEY = 'githubModels' as const
|
||||
export const GITHUB_MODELS_HYDRATED_ENV_MARKER =
|
||||
'CLAUDE_CODE_GITHUB_TOKEN_HYDRATED' as const
|
||||
|
||||
export type GithubModelsCredentialBlob = {
|
||||
accessToken: string
|
||||
oauthAccessToken?: string
|
||||
}
|
||||
|
||||
type GithubTokenStatus = 'valid' | 'expired' | 'invalid_format'
|
||||
|
||||
function checkGithubTokenStatus(token: string): GithubTokenStatus {
|
||||
const expMatch = token.match(/exp=(\d+)/)
|
||||
if (expMatch) {
|
||||
const expSeconds = Number(expMatch[1])
|
||||
if (!Number.isNaN(expSeconds)) {
|
||||
return Date.now() >= expSeconds * 1000 ? 'expired' : 'valid'
|
||||
}
|
||||
}
|
||||
|
||||
const parts = token.split('.')
|
||||
const looksLikeJwt =
|
||||
parts.length === 3 && parts.every(part => /^[A-Za-z0-9_-]+$/.test(part))
|
||||
if (looksLikeJwt) {
|
||||
try {
|
||||
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
|
||||
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
|
||||
const json = Buffer.from(padded, 'base64').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
if (parsed && typeof parsed === 'object' && parsed.exp) {
|
||||
return Date.now() >= (parsed.exp as number) * 1000 ? 'expired' : 'valid'
|
||||
}
|
||||
} catch {
|
||||
return 'invalid_format'
|
||||
}
|
||||
}
|
||||
|
||||
return 'invalid_format'
|
||||
}
|
||||
|
||||
export function readGithubModelsToken(): string | undefined {
|
||||
@@ -21,27 +56,105 @@ export function readGithubModelsToken(): string | undefined {
|
||||
}
|
||||
}
|
||||
|
||||
export async function readGithubModelsTokenAsync(): Promise<string | undefined> {
|
||||
if (isBareMode()) return undefined
|
||||
try {
|
||||
const data = (await getSecureStorage().readAsync()) as
|
||||
| ({ githubModels?: GithubModelsCredentialBlob } & Record<string, unknown>)
|
||||
| null
|
||||
const t = data?.githubModels?.accessToken?.trim()
|
||||
return t || undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If GitHub Models mode is on and no token is in the environment, copy the
|
||||
* stored token into process.env so the OpenAI shim and validation see it.
|
||||
*/
|
||||
export function hydrateGithubModelsTokenFromSecureStorage(): void {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
return
|
||||
}
|
||||
if (process.env.GITHUB_TOKEN?.trim() || process.env.GH_TOKEN?.trim()) {
|
||||
if (process.env.GH_TOKEN?.trim()) {
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
return
|
||||
}
|
||||
if (process.env.GITHUB_TOKEN?.trim()) {
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
return
|
||||
}
|
||||
if (isBareMode()) {
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
return
|
||||
}
|
||||
const t = readGithubModelsToken()
|
||||
if (t) {
|
||||
process.env.GITHUB_TOKEN = t
|
||||
process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER] = '1'
|
||||
return
|
||||
}
|
||||
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
|
||||
}
|
||||
|
||||
/**
|
||||
* Startup auto-refresh for GitHub Models mode.
|
||||
*
|
||||
* If a stored Copilot token is expired/invalid and an OAuth token is present,
|
||||
* exchange the OAuth token for a fresh Copilot token and persist it.
|
||||
*/
|
||||
export async function refreshGithubModelsTokenIfNeeded(): Promise<boolean> {
|
||||
if (!isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
|
||||
return false
|
||||
}
|
||||
if (isBareMode()) {
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
const secureStorage = getSecureStorage()
|
||||
const data = secureStorage.read() as
|
||||
| ({ githubModels?: GithubModelsCredentialBlob } & Record<string, unknown>)
|
||||
| null
|
||||
const blob = data?.githubModels
|
||||
const accessToken = blob?.accessToken?.trim() || ''
|
||||
const oauthToken = blob?.oauthAccessToken?.trim() || ''
|
||||
|
||||
if (!accessToken && !oauthToken) {
|
||||
return false
|
||||
}
|
||||
|
||||
const status = accessToken ? checkGithubTokenStatus(accessToken) : 'expired'
|
||||
if (status === 'valid') {
|
||||
if (!process.env.GITHUB_TOKEN?.trim() && !process.env.GH_TOKEN?.trim()) {
|
||||
process.env.GITHUB_TOKEN = accessToken
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if (!oauthToken) {
|
||||
return false
|
||||
}
|
||||
|
||||
const refreshed = await exchangeForCopilotToken(oauthToken)
|
||||
const saved = saveGithubModelsToken(refreshed.token, oauthToken)
|
||||
if (!saved.success) {
|
||||
return false
|
||||
}
|
||||
|
||||
process.env.GITHUB_TOKEN = refreshed.token
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export function saveGithubModelsToken(token: string): {
|
||||
export function saveGithubModelsToken(
|
||||
token: string,
|
||||
oauthToken?: string,
|
||||
): {
|
||||
success: boolean
|
||||
warning?: string
|
||||
} {
|
||||
@@ -54,9 +167,21 @@ export function saveGithubModelsToken(token: string): {
|
||||
}
|
||||
const secureStorage = getSecureStorage()
|
||||
const prev = secureStorage.read() || {}
|
||||
const prevGithubModels = (prev as Record<string, unknown>)[
|
||||
GITHUB_MODELS_STORAGE_KEY
|
||||
] as GithubModelsCredentialBlob | undefined
|
||||
const oauthTrimmed = oauthToken?.trim()
|
||||
const mergedBlob: GithubModelsCredentialBlob = {
|
||||
accessToken: trimmed,
|
||||
}
|
||||
if (oauthTrimmed) {
|
||||
mergedBlob.oauthAccessToken = oauthTrimmed
|
||||
} else if (prevGithubModels?.oauthAccessToken?.trim()) {
|
||||
mergedBlob.oauthAccessToken = prevGithubModels.oauthAccessToken.trim()
|
||||
}
|
||||
const merged = {
|
||||
...(prev as Record<string, unknown>),
|
||||
[GITHUB_MODELS_STORAGE_KEY]: { accessToken: trimmed },
|
||||
[GITHUB_MODELS_STORAGE_KEY]: mergedBlob,
|
||||
}
|
||||
return secureStorage.update(merged as typeof prev)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
|
||||
foundry: 'claude-3-7-sonnet',
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
||||
@@ -44,6 +46,8 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
|
||||
foundry: 'claude-3-5-sonnet',
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_3_5_HAIKU_CONFIG = {
|
||||
@@ -53,6 +57,8 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
|
||||
foundry: 'claude-3-5-haiku',
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash-lite',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_HAIKU_4_5_CONFIG = {
|
||||
@@ -62,6 +68,8 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
|
||||
foundry: 'claude-haiku-4-5',
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash-lite',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_SONNET_4_CONFIG = {
|
||||
@@ -71,6 +79,8 @@ export const CLAUDE_SONNET_4_CONFIG = {
|
||||
foundry: 'claude-sonnet-4',
|
||||
openai: 'gpt-4o-mini',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_SONNET_4_5_CONFIG = {
|
||||
@@ -80,6 +90,8 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
|
||||
foundry: 'claude-sonnet-4-5',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_OPUS_4_CONFIG = {
|
||||
@@ -89,6 +101,8 @@ export const CLAUDE_OPUS_4_CONFIG = {
|
||||
foundry: 'claude-opus-4',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_OPUS_4_1_CONFIG = {
|
||||
@@ -98,6 +112,8 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
|
||||
foundry: 'claude-opus-4-1',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_OPUS_4_5_CONFIG = {
|
||||
@@ -107,6 +123,8 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
|
||||
foundry: 'claude-opus-4-5',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_OPUS_4_6_CONFIG = {
|
||||
@@ -116,6 +134,8 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
|
||||
foundry: 'claude-opus-4-6',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.5-pro-preview-03-25',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
export const CLAUDE_SONNET_4_6_CONFIG = {
|
||||
@@ -125,6 +145,8 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
|
||||
foundry: 'claude-sonnet-4-6',
|
||||
openai: 'gpt-4o',
|
||||
gemini: 'gemini-2.0-flash',
|
||||
github: 'github:copilot',
|
||||
codex: 'gpt-5.4',
|
||||
} as const satisfies ModelConfig
|
||||
|
||||
// @[MODEL LAUNCH]: Register the new config here.
|
||||
|
||||
351
src/utils/model/copilotModels.ts
Normal file
351
src/utils/model/copilotModels.ts
Normal file
@@ -0,0 +1,351 @@
|
||||
/**
|
||||
* Hardcoded Copilot model registry from models.dev/api.json
|
||||
* These are the 19 models available through GitHub Copilot.
|
||||
*/
|
||||
|
||||
export type CopilotModel = {
|
||||
id: string
|
||||
name: string
|
||||
family: string
|
||||
attachment: boolean
|
||||
reasoning: boolean
|
||||
tool_call: boolean
|
||||
temperature: boolean
|
||||
knowledge: string
|
||||
release_date: string
|
||||
last_updated: string
|
||||
modalities: {
|
||||
input: string[]
|
||||
output: string[]
|
||||
}
|
||||
open_weights: boolean
|
||||
cost: {
|
||||
input: number
|
||||
output: number
|
||||
cache_read?: number
|
||||
}
|
||||
limit: {
|
||||
context: number
|
||||
input?: number
|
||||
output: number
|
||||
}
|
||||
}
|
||||
|
||||
export const COPILOT_MODELS: Record<string, CopilotModel> = {
|
||||
'gpt-5.4': {
|
||||
id: 'gpt-5.4',
|
||||
name: 'GPT-5.4',
|
||||
family: 'gpt',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.4-mini': {
|
||||
id: 'gpt-5.4-mini',
|
||||
name: 'GPT-5.4 mini',
|
||||
family: 'gpt-mini',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.3-codex': {
|
||||
id: 'gpt-5.3-codex',
|
||||
name: 'GPT-5.3-Codex',
|
||||
family: 'gpt-codex',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.2-codex': {
|
||||
id: 'gpt-5.2-codex',
|
||||
name: 'GPT-5.2-Codex',
|
||||
family: 'gpt-codex',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.2': {
|
||||
id: 'gpt-5.2',
|
||||
name: 'GPT-5.2',
|
||||
family: 'gpt',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 264000, output: 32768 },
|
||||
},
|
||||
'gpt-5.1-codex': {
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1-Codex',
|
||||
family: 'gpt-codex',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.1-codex-max': {
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1-Codex-max',
|
||||
family: 'gpt-codex',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-5.1-codex-mini': {
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1-Codex-mini',
|
||||
family: 'gpt-codex',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 400000, output: 32768 },
|
||||
},
|
||||
'gpt-4o': {
|
||||
id: 'gpt-4o',
|
||||
name: 'GPT-4o',
|
||||
family: 'gpt',
|
||||
attachment: true,
|
||||
reasoning: false,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2023-10',
|
||||
release_date: '2024-05-01',
|
||||
last_updated: '2024-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 16384 },
|
||||
},
|
||||
'gpt-4.1': {
|
||||
id: 'gpt-4.1',
|
||||
name: 'GPT-4.1',
|
||||
family: 'gpt',
|
||||
attachment: false,
|
||||
reasoning: false,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2024-06',
|
||||
release_date: '2024-06-01',
|
||||
last_updated: '2024-06-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 32768 },
|
||||
},
|
||||
'claude-opus-4.6': {
|
||||
id: 'claude-opus-4.6',
|
||||
name: 'Claude Opus 4.6',
|
||||
family: 'claude-opus',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 144000, output: 32768 },
|
||||
},
|
||||
'claude-opus-4.5': {
|
||||
id: 'claude-opus-4.5',
|
||||
name: 'Claude Opus 4.5',
|
||||
family: 'claude-opus',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 160000, output: 32768 },
|
||||
},
|
||||
'claude-sonnet-4.6': {
|
||||
id: 'claude-sonnet-4.6',
|
||||
name: 'Claude Sonnet 4.6',
|
||||
family: 'claude-sonnet',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 200000, output: 32768 },
|
||||
},
|
||||
'claude-sonnet-4.5': {
|
||||
id: 'claude-sonnet-4.5',
|
||||
name: 'Claude Sonnet 4.5',
|
||||
family: 'claude-sonnet',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 144000, output: 32768 },
|
||||
},
|
||||
'claude-haiku-4.5': {
|
||||
id: 'claude-haiku-4.5',
|
||||
name: 'Claude Haiku 4.5',
|
||||
family: 'claude-haiku',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 144000, output: 32768 },
|
||||
},
|
||||
'gemini-3.1-pro-preview': {
|
||||
id: 'gemini-3.1-pro-preview',
|
||||
name: 'Gemini 3.1 Pro Preview',
|
||||
family: 'gemini-pro',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image', 'audio'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 32768 },
|
||||
},
|
||||
'gemini-3-flash-preview': {
|
||||
id: 'gemini-3-flash-preview',
|
||||
name: 'Gemini 3 Flash',
|
||||
family: 'gemini-flash',
|
||||
attachment: true,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 32768 },
|
||||
},
|
||||
'gemini-2.5-pro': {
|
||||
id: 'gemini-2.5-pro',
|
||||
name: 'Gemini 2.5 Pro',
|
||||
family: 'gemini-pro',
|
||||
attachment: true,
|
||||
reasoning: false,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text', 'image'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 32768 },
|
||||
},
|
||||
'grok-code-fast-1': {
|
||||
id: 'grok-code-fast-1',
|
||||
name: 'Grok Code Fast 1',
|
||||
family: 'grok',
|
||||
attachment: false,
|
||||
reasoning: true,
|
||||
tool_call: true,
|
||||
temperature: true,
|
||||
knowledge: '2025-05',
|
||||
release_date: '2025-05-01',
|
||||
last_updated: '2025-05-01',
|
||||
modalities: { input: ['text'], output: ['text'] },
|
||||
open_weights: false,
|
||||
cost: { input: 0, output: 0 },
|
||||
limit: { context: 128000, output: 32768 },
|
||||
},
|
||||
}
|
||||
|
||||
export function getCopilotModelIds(): string[] {
|
||||
return Object.keys(COPILOT_MODELS)
|
||||
}
|
||||
|
||||
export function getCopilotModel(id: string): CopilotModel | undefined {
|
||||
return COPILOT_MODELS[id]
|
||||
}
|
||||
|
||||
export function getAllCopilotModels(): CopilotModel[] {
|
||||
return Object.values(COPILOT_MODELS)
|
||||
}
|
||||
@@ -43,6 +43,10 @@ export function getSmallFastModel(): ModelName {
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
}
|
||||
// For GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
return getDefaultHaikuModel()
|
||||
}
|
||||
|
||||
@@ -80,7 +84,9 @@ export function getUserSpecifiedModelSetting(): ModelSetting | undefined {
|
||||
const provider = getAPIProvider()
|
||||
specifiedModel =
|
||||
(provider === 'gemini' ? process.env.GEMINI_MODEL : undefined) ||
|
||||
(provider === 'openai' || provider === 'gemini' ? process.env.OPENAI_MODEL : undefined) ||
|
||||
(provider === 'openai' || provider === 'gemini' || provider === 'github'
|
||||
? process.env.OPENAI_MODEL
|
||||
: undefined) ||
|
||||
(provider === 'firstParty' ? process.env.ANTHROPIC_MODEL : undefined) ||
|
||||
settings.model ||
|
||||
undefined
|
||||
@@ -135,6 +141,10 @@ export function getDefaultOpusModel(): ModelName {
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// 3P providers (Bedrock, Vertex, Foundry) — kept as a separate branch
|
||||
// even when values match, since 3P availability lags firstParty and
|
||||
// these will diverge again at the next model launch.
|
||||
@@ -161,6 +171,10 @@ export function getDefaultSonnetModel(): ModelName {
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// Default to Sonnet 4.5 for 3P since they may not have 4.6 yet
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return getModelStrings().sonnet45
|
||||
@@ -173,10 +187,6 @@ export function getDefaultHaikuModel(): ModelName {
|
||||
if (process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL) {
|
||||
return process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL
|
||||
}
|
||||
// Gemini provider
|
||||
if (getAPIProvider() === 'gemini') {
|
||||
return process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
|
||||
}
|
||||
// OpenAI provider
|
||||
if (getAPIProvider() === 'openai') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
@@ -185,6 +195,14 @@ export function getDefaultHaikuModel(): ModelName {
|
||||
if (getAPIProvider() === 'codex') {
|
||||
return process.env.OPENAI_MODEL || 'gpt-5.4'
|
||||
}
|
||||
// GitHub Copilot provider
|
||||
if (getAPIProvider() === 'github') {
|
||||
return process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// Gemini provider
|
||||
if (getAPIProvider() === 'gemini') {
|
||||
return process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
|
||||
}
|
||||
|
||||
// Haiku 4.5 is available on all platforms (first-party, Foundry, Bedrock, Vertex)
|
||||
return getModelStrings().haiku45
|
||||
@@ -229,6 +247,11 @@ export function getRuntimeMainLoopModel(params: {
|
||||
* @returns The default model setting to use
|
||||
*/
|
||||
export function getDefaultMainLoopModelSetting(): ModelName | ModelAlias {
|
||||
// GitHub Copilot provider: check settings.model first, then env, then default
|
||||
if (getAPIProvider() === 'github') {
|
||||
const settings = getSettings_DEPRECATED() || {}
|
||||
return settings.model || process.env.OPENAI_MODEL || 'github:copilot'
|
||||
}
|
||||
// Gemini provider: always use the configured Gemini model
|
||||
if (getAPIProvider() === 'gemini') {
|
||||
return process.env.GEMINI_MODEL || 'gemini-2.0-flash'
|
||||
@@ -420,8 +443,33 @@ export function renderModelSetting(setting: ModelName | ModelAlias): string {
|
||||
* if the model is not recognized as a public model.
|
||||
*/
|
||||
export function getPublicModelDisplayName(model: ModelName): string | null {
|
||||
// For OpenAI/Gemini/Codex providers, show the actual model name not a Claude alias
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex') {
|
||||
// For OpenAI/Gemini/Codex/GitHub providers, show the actual model name not a Claude alias
|
||||
if (getAPIProvider() === 'openai' || getAPIProvider() === 'gemini' || getAPIProvider() === 'codex' || getAPIProvider() === 'github') {
|
||||
// Return display names for known GitHub Copilot models
|
||||
const copilotModelNames: Record<string, string> = {
|
||||
'gpt-5.4': 'GPT-5.4',
|
||||
'gpt-5.4-mini': 'GPT-5.4 mini',
|
||||
'gpt-5.3-codex': 'GPT-5.3 Codex',
|
||||
'gpt-5.2-codex': 'GPT-5.2 Codex',
|
||||
'gpt-5.2': 'GPT-5.2',
|
||||
'gpt-5.1-codex': 'GPT-5.1 Codex',
|
||||
'gpt-5.1-codex-max': 'GPT-5.1 Codex max',
|
||||
'gpt-5.1-codex-mini': 'GPT-5.1 Codex mini',
|
||||
'gpt-4o': 'GPT-4o',
|
||||
'gpt-4.1': 'GPT-4.1',
|
||||
'claude-opus-4.6': 'Claude Opus 4.6',
|
||||
'claude-opus-4.5': 'Claude Opus 4.5',
|
||||
'claude-sonnet-4.6': 'Claude Sonnet 4.6',
|
||||
'claude-sonnet-4.5': 'Claude Sonnet 4.5',
|
||||
'claude-haiku-4.5': 'Claude Haiku 4.5',
|
||||
'gemini-3.1-pro-preview': 'Gemini 3.1 Pro Preview',
|
||||
'gemini-3-flash-preview': 'Gemini 3 Flash',
|
||||
'gemini-2.5-pro': 'Gemini 2.5 Pro',
|
||||
'grok-code-fast-1': 'Grok Code Fast 1',
|
||||
}
|
||||
if (copilotModelNames[model]) {
|
||||
return copilotModelNames[model]
|
||||
}
|
||||
return null
|
||||
}
|
||||
switch (model) {
|
||||
@@ -478,6 +526,10 @@ export function renderModelName(model: ModelName): string {
|
||||
if (publicName) {
|
||||
return publicName
|
||||
}
|
||||
// Handle GitHub Copilot special model aliases
|
||||
if (model === 'github:copilot') {
|
||||
return 'GPT-4o'
|
||||
}
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const resolved = parseUserSpecifiedModel(model)
|
||||
const antModel = resolveAntModel(model)
|
||||
|
||||
84
src/utils/model/modelOptions.github.test.ts
Normal file
84
src/utils/model/modelOptions.github.test.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
|
||||
|
||||
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
|
||||
import { saveGlobalConfig } from '../config.js'
|
||||
|
||||
async function importFreshModelOptionsModule() {
|
||||
mock.restore()
|
||||
mock.module('./providers.js', () => ({
|
||||
getAPIProvider: () => 'github',
|
||||
}))
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
return import(`./modelOptions.js?ts=${nonce}`)
|
||||
}
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
||||
ANTHROPIC_CUSTOM_MODEL_OPTION: process.env.ANTHROPIC_CUSTOM_MODEL_OPTION,
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
mock.restore()
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
delete process.env.OPENAI_MODEL
|
||||
delete process.env.OPENAI_BASE_URL
|
||||
delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION
|
||||
resetModelStringsForTestingOnly()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
||||
process.env.ANTHROPIC_CUSTOM_MODEL_OPTION =
|
||||
originalEnv.ANTHROPIC_CUSTOM_MODEL_OPTION
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
additionalModelOptionsCache: [],
|
||||
additionalModelOptionsCacheScope: undefined,
|
||||
openaiAdditionalModelOptionsCache: [],
|
||||
openaiAdditionalModelOptionsCacheByProfile: {},
|
||||
providerProfiles: [],
|
||||
activeProviderProfileId: undefined,
|
||||
}))
|
||||
resetModelStringsForTestingOnly()
|
||||
})
|
||||
|
||||
test('GitHub provider exposes default + all Copilot models in /model options', async () => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
|
||||
process.env.OPENAI_MODEL = 'gpt-4o'
|
||||
delete process.env.ANTHROPIC_CUSTOM_MODEL_OPTION
|
||||
|
||||
const { getModelOptions } = await importFreshModelOptionsModule()
|
||||
const options = getModelOptions(false)
|
||||
const nonDefault = options.filter(
|
||||
(option: { value: unknown }) => option.value !== null,
|
||||
)
|
||||
|
||||
expect(nonDefault.length).toBeGreaterThan(1)
|
||||
expect(nonDefault.some((o: { value: unknown }) => o.value === 'gpt-4o')).toBe(true)
|
||||
expect(nonDefault.some((o: { value: unknown }) => o.value === 'gpt-5.3-codex')).toBe(true)
|
||||
})
|
||||
@@ -35,6 +35,7 @@ import { has1mContext } from '../context.js'
|
||||
import { getGlobalConfig } from '../config.js'
|
||||
import { getActiveOpenAIModelOptionsCache } from '../providerProfiles.js'
|
||||
import { getCachedOllamaModelOptions, isOllamaProvider } from './ollamaModels.js'
|
||||
import { getAntModels } from './antModels.js'
|
||||
|
||||
// @[MODEL LAUNCH]: Update all the available and default model option strings below.
|
||||
|
||||
@@ -351,7 +352,22 @@ function getCodexModelOptions(): ModelOption[] {
|
||||
|
||||
// @[MODEL LAUNCH]: Update the model picker lists below to include/reorder options for the new model.
|
||||
// Each user tier (ant, Max/Team Premium, Pro/Team Standard/Enterprise, PAYG 1P, PAYG 3P) has its own list.
|
||||
|
||||
import { getAllCopilotModels } from './copilotModels.js'
|
||||
|
||||
function getCopilotModelOptions(): ModelOption[] {
|
||||
return getAllCopilotModels().map(m => ({
|
||||
value: m.id,
|
||||
label: m.name,
|
||||
description: `${m.family}${m.reasoning ? ' · Reasoning' : ''}${m.tool_call ? ' · Tool call' : ''} · ${Math.round(m.limit.context / 1000)}K context`,
|
||||
}))
|
||||
}
|
||||
|
||||
function getModelOptionsBase(fastMode = false): ModelOption[] {
|
||||
if (getAPIProvider() === 'github') {
|
||||
return [getDefaultOptionForUser(fastMode), ...getCopilotModelOptions()]
|
||||
}
|
||||
|
||||
// When using Ollama, show models from the Ollama server instead of Claude models
|
||||
if (getAPIProvider() === 'openai' && isOllamaProvider()) {
|
||||
const defaultOption = getDefaultOptionForUser(fastMode)
|
||||
@@ -579,6 +595,10 @@ function getKnownModelOption(model: string): ModelOption | null {
|
||||
}
|
||||
|
||||
export function getModelOptions(fastMode = false): ModelOption[] {
|
||||
if (getAPIProvider() === 'github') {
|
||||
return filterModelOptionsByAllowlist(getModelOptionsBase(fastMode))
|
||||
}
|
||||
|
||||
const options = getModelOptionsBase(fastMode)
|
||||
|
||||
// Add the custom model from the ANTHROPIC_CUSTOM_MODEL_OPTION env var
|
||||
|
||||
54
src/utils/model/modelStrings.github.test.ts
Normal file
54
src/utils/model/modelStrings.github.test.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import { afterEach, expect, test } from 'bun:test'
|
||||
|
||||
import { resetModelStringsForTestingOnly } from '../../bootstrap/state.js'
|
||||
import { parseUserSpecifiedModel } from './model.js'
|
||||
import { getModelStrings } from './modelStrings.js'
|
||||
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
||||
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
||||
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
||||
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
||||
}
|
||||
|
||||
function clearProviderFlags(): void {
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
||||
resetModelStringsForTestingOnly()
|
||||
})
|
||||
|
||||
test('GitHub provider model strings are concrete IDs', () => {
|
||||
clearProviderFlags()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
const modelStrings = getModelStrings()
|
||||
|
||||
for (const value of Object.values(modelStrings)) {
|
||||
expect(typeof value).toBe('string')
|
||||
expect(value.trim().length).toBeGreaterThan(0)
|
||||
}
|
||||
})
|
||||
|
||||
test('GitHub provider model strings are safe to parse', () => {
|
||||
clearProviderFlags()
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
|
||||
const modelStrings = getModelStrings()
|
||||
|
||||
expect(() => parseUserSpecifiedModel(modelStrings.sonnet46 as any)).not.toThrow()
|
||||
})
|
||||
@@ -25,7 +25,7 @@ const MODEL_KEYS = Object.keys(ALL_MODEL_CONFIGS) as ModelKey[]
|
||||
function getBuiltinModelStrings(provider: APIProvider): ModelStrings {
|
||||
// Codex piggybacks on the OpenAI provider transport for Anthropic tier aliases.
|
||||
// Reuse OpenAI mappings so model string lookups never return undefined.
|
||||
const providerKey = provider === 'codex' ? 'openai' : provider
|
||||
const providerKey = provider === 'codex' || provider === 'github' ? 'openai' : provider
|
||||
const out = {} as ModelStrings
|
||||
for (const key of MODEL_KEYS) {
|
||||
out[key] = ALL_MODEL_CONFIGS[key][providerKey]
|
||||
|
||||
@@ -44,6 +44,10 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
|
||||
'mistral-large-latest': 131_072,
|
||||
'mistral-small-latest': 131_072,
|
||||
|
||||
// MiniMax
|
||||
'MiniMax-M2.7': 204_800,
|
||||
'minimax-m2.7': 204_800,
|
||||
|
||||
// Google (via OpenRouter)
|
||||
'google/gemini-2.0-flash':1_048_576,
|
||||
'google/gemini-2.5-pro': 1_048_576,
|
||||
@@ -110,6 +114,10 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
|
||||
'mistral-large-latest': 32_768,
|
||||
'mistral-small-latest': 32_768,
|
||||
|
||||
// MiniMax
|
||||
'MiniMax-M2.7': 131_072,
|
||||
'minimax-m2.7': 131_072,
|
||||
|
||||
// Google (via OpenRouter)
|
||||
'google/gemini-2.0-flash': 8_192,
|
||||
'google/gemini-2.5-pro': 65_536,
|
||||
|
||||
@@ -51,6 +51,7 @@ export const DANGEROUS_BASH_PATTERNS: readonly string[] = [
|
||||
'xargs',
|
||||
'sudo',
|
||||
// Internal-only: internal-only tools plus general tools that ant sandbox
|
||||
// data shows are frequently over-allowlisted as broad prefixes.
|
||||
// dotfile data shows are commonly over-allowlisted as broad prefixes.
|
||||
// These stay internal-only — external users don't have coo, and the rest are
|
||||
// an empirical-risk call grounded in ant sandbox data, not a universal
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { describe, expect, test, afterEach } from 'bun:test'
|
||||
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
|
||||
import {
|
||||
parseProviderFlag,
|
||||
applyProviderFlag,
|
||||
@@ -6,22 +6,52 @@ import {
|
||||
VALID_PROVIDERS,
|
||||
} from './providerFlag.js'
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
const ENV_KEYS = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
]
|
||||
|
||||
const originalEnv: Record<string, string | undefined> = {}
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of ENV_KEYS) {
|
||||
originalEnv[key] = process.env[key]
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
const RESET_KEYS = [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
] as const
|
||||
|
||||
beforeEach(() => {
|
||||
for (const key of RESET_KEYS) {
|
||||
delete process.env[key]
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
for (const key of [
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
'CLAUDE_CODE_USE_BEDROCK',
|
||||
'CLAUDE_CODE_USE_VERTEX',
|
||||
'OPENAI_BASE_URL',
|
||||
'OPENAI_API_KEY',
|
||||
'OPENAI_MODEL',
|
||||
'GEMINI_MODEL',
|
||||
]) {
|
||||
if (originalEnv[key] === undefined) delete process.env[key]
|
||||
else process.env[key] = originalEnv[key]
|
||||
for (const key of ENV_KEYS) {
|
||||
if (originalEnv[key] === undefined) {
|
||||
delete process.env[key]
|
||||
} else {
|
||||
process.env[key] = originalEnv[key]
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -485,6 +485,26 @@ test('buildStartupEnvFromProfile leaves explicit provider selections untouched',
|
||||
assert.equal(env.OPENAI_API_KEY, undefined)
|
||||
})
|
||||
|
||||
test('buildStartupEnvFromProfile leaves profile-managed env untouched', async () => {
|
||||
const processEnv = {
|
||||
CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED: '1',
|
||||
ANTHROPIC_BASE_URL: 'https://api.anthropic.com',
|
||||
ANTHROPIC_MODEL: 'claude-sonnet-4-6',
|
||||
}
|
||||
|
||||
const env = await buildStartupEnvFromProfile({
|
||||
persisted: profile('openai', {
|
||||
OPENAI_API_KEY: 'sk-persisted',
|
||||
OPENAI_MODEL: 'gpt-4o',
|
||||
}),
|
||||
processEnv,
|
||||
})
|
||||
|
||||
assert.equal(env, processEnv)
|
||||
assert.equal(env.ANTHROPIC_MODEL, 'claude-sonnet-4-6')
|
||||
assert.equal(env.OPENAI_MODEL, undefined)
|
||||
})
|
||||
|
||||
test('buildStartupEnvFromProfile treats explicit falsey provider flags as user intent', async () => {
|
||||
const processEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: '0',
|
||||
|
||||
@@ -407,6 +407,11 @@ export function deleteProfileFile(options?: ProfileFileLocation): string {
|
||||
export function hasExplicitProviderSelection(
|
||||
processEnv: NodeJS.ProcessEnv = process.env,
|
||||
): boolean {
|
||||
// If env was already applied from a provider profile, preserve it.
|
||||
if (processEnv.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED === '1') {
|
||||
return true
|
||||
}
|
||||
|
||||
return (
|
||||
processEnv.CLAUDE_CODE_USE_OPENAI !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||
|
||||
@@ -2,10 +2,15 @@ import { afterEach, describe, expect, mock, test } from 'bun:test'
|
||||
|
||||
import type { ProviderProfile } from './config.js'
|
||||
|
||||
async function importFreshProvidersModule() {
|
||||
return import(`./model/providers.ts?ts=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
const RESTORED_KEYS = [
|
||||
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
|
||||
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID',
|
||||
'CLAUDE_CODE_USE_OPENAI',
|
||||
'CLAUDE_CODE_USE_GEMINI',
|
||||
'CLAUDE_CODE_USE_GITHUB',
|
||||
@@ -21,8 +26,35 @@ const RESTORED_KEYS = [
|
||||
'ANTHROPIC_API_KEY',
|
||||
] as const
|
||||
|
||||
type MockConfigState = {
|
||||
providerProfiles: ProviderProfile[]
|
||||
activeProviderProfileId?: string
|
||||
openaiAdditionalModelOptionsCache: unknown[]
|
||||
openaiAdditionalModelOptionsCacheByProfile: Record<string, unknown[]>
|
||||
additionalModelOptionsCache?: unknown[]
|
||||
additionalModelOptionsCacheScope?: string
|
||||
}
|
||||
|
||||
function createMockConfigState(): MockConfigState {
|
||||
return {
|
||||
providerProfiles: [],
|
||||
activeProviderProfileId: undefined,
|
||||
openaiAdditionalModelOptionsCache: [],
|
||||
openaiAdditionalModelOptionsCacheByProfile: {},
|
||||
additionalModelOptionsCache: [],
|
||||
additionalModelOptionsCacheScope: undefined,
|
||||
}
|
||||
}
|
||||
|
||||
let mockConfigState: MockConfigState = createMockConfigState()
|
||||
|
||||
function saveMockGlobalConfig(
|
||||
updater: (current: MockConfigState) => MockConfigState,
|
||||
): void {
|
||||
mockConfigState = updater(mockConfigState)
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
for (const key of RESTORED_KEYS) {
|
||||
if (originalEnv[key] === undefined) {
|
||||
delete process.env[key]
|
||||
@@ -30,8 +62,31 @@ afterEach(() => {
|
||||
process.env[key] = originalEnv[key]
|
||||
}
|
||||
}
|
||||
|
||||
mock.restore()
|
||||
mockConfigState = createMockConfigState()
|
||||
})
|
||||
|
||||
async function importFreshProviderProfileModules() {
|
||||
mock.restore()
|
||||
mock.module('./config.js', () => ({
|
||||
getGlobalConfig: () => mockConfigState,
|
||||
saveGlobalConfig: (
|
||||
updater: (current: MockConfigState) => MockConfigState,
|
||||
) => {
|
||||
mockConfigState = updater(mockConfigState)
|
||||
},
|
||||
}))
|
||||
const nonce = `${Date.now()}-${Math.random()}`
|
||||
const providers = await import(`./model/providers.js?ts=${nonce}`)
|
||||
const providerProfiles = await import(`./providerProfiles.js?ts=${nonce}`)
|
||||
|
||||
return {
|
||||
...providers,
|
||||
...providerProfiles,
|
||||
}
|
||||
}
|
||||
|
||||
function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
|
||||
return {
|
||||
id: 'provider_test',
|
||||
@@ -43,57 +98,31 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
|
||||
}
|
||||
}
|
||||
|
||||
async function importFreshProviderModules() {
|
||||
mock.restore()
|
||||
let configState = {
|
||||
providerProfiles: [] as ProviderProfile[],
|
||||
activeProviderProfileId: undefined as string | undefined,
|
||||
openaiAdditionalModelOptionsCache: [] as any[],
|
||||
openaiAdditionalModelOptionsCacheByProfile: {} as Record<string, any[]>,
|
||||
}
|
||||
|
||||
mock.module('./config.js', () => ({
|
||||
getGlobalConfig: () => configState,
|
||||
saveGlobalConfig: (
|
||||
updater: (current: typeof configState) => typeof configState,
|
||||
) => {
|
||||
configState = updater(configState)
|
||||
},
|
||||
}))
|
||||
|
||||
const providerProfiles = await import(
|
||||
`./providerProfiles.js?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
const providers = await import(
|
||||
`./model/providers.js?ts=${Date.now()}-${Math.random()}`
|
||||
)
|
||||
|
||||
return {
|
||||
...providerProfiles,
|
||||
...providers,
|
||||
}
|
||||
}
|
||||
|
||||
describe('applyProviderProfileToProcessEnv', () => {
|
||||
test('openai profile clears competing gemini/github flags', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const { applyProviderProfileToProcessEnv, getAPIProvider } =
|
||||
await importFreshProviderModules()
|
||||
|
||||
applyProviderProfileToProcessEnv(buildProfile())
|
||||
const { getAPIProvider: getFreshAPIProvider } =
|
||||
await importFreshProvidersModule()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(getAPIProvider()).toBe('openai')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
||||
'provider_test',
|
||||
)
|
||||
expect(getFreshAPIProvider()).toBe('openai')
|
||||
})
|
||||
|
||||
test('anthropic profile clears competing gemini/github flags', async () => {
|
||||
const { applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
const { applyProviderProfileToProcessEnv, getAPIProvider } =
|
||||
await importFreshProviderModules()
|
||||
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildProfile({
|
||||
@@ -102,21 +131,23 @@ describe('applyProviderProfileToProcessEnv', () => {
|
||||
model: 'claude-sonnet-4-6',
|
||||
}),
|
||||
)
|
||||
const { getAPIProvider: getFreshAPIProvider } =
|
||||
await importFreshProvidersModule()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
||||
expect(getAPIProvider()).toBe('firstParty')
|
||||
expect(getFreshAPIProvider()).toBe('firstParty')
|
||||
})
|
||||
})
|
||||
|
||||
describe('applyActiveProviderProfileFromConfig', () => {
|
||||
test('does not override explicit startup provider selection', async () => {
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5:3b'
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderModules()
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
@@ -135,12 +166,12 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
})
|
||||
|
||||
test('does not override explicit startup selection when profile marker is stale', async () => {
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1'
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5:3b'
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderModules()
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
@@ -154,12 +185,74 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
} as any)
|
||||
|
||||
expect(applied).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
||||
})
|
||||
|
||||
test('re-applies active profile when profile-managed env drifts', async () => {
|
||||
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildProfile({
|
||||
id: 'saved_openai',
|
||||
baseUrl: 'http://192.168.33.108:11434/v1',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
}),
|
||||
)
|
||||
|
||||
// Simulate settings/env merge clobbering the model while profile flags remain.
|
||||
process.env.OPENAI_MODEL = 'github:copilot'
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
buildProfile({
|
||||
id: 'saved_openai',
|
||||
baseUrl: 'http://192.168.33.108:11434/v1',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
}),
|
||||
],
|
||||
activeProviderProfileId: 'saved_openai',
|
||||
} as any)
|
||||
|
||||
expect(applied?.id).toBe('saved_openai')
|
||||
expect(process.env.OPENAI_MODEL).toBe('kimi-k2.5:cloud')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://192.168.33.108:11434/v1')
|
||||
})
|
||||
|
||||
test('does not re-apply active profile when flags conflict with current provider', async () => {
|
||||
const { applyActiveProviderProfileFromConfig, applyProviderProfileToProcessEnv } =
|
||||
await importFreshProviderProfileModules()
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildProfile({
|
||||
id: 'saved_openai',
|
||||
baseUrl: 'http://192.168.33.108:11434/v1',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
}),
|
||||
)
|
||||
|
||||
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
||||
process.env.OPENAI_MODEL = 'github:copilot'
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
buildProfile({
|
||||
id: 'saved_openai',
|
||||
baseUrl: 'http://192.168.33.108:11434/v1',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
}),
|
||||
],
|
||||
activeProviderProfileId: 'saved_openai',
|
||||
} as any)
|
||||
|
||||
expect(applied).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('github:copilot')
|
||||
})
|
||||
|
||||
test('applies active profile when no explicit provider is selected', async () => {
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderProfileModules()
|
||||
delete process.env.CLAUDE_CODE_USE_OPENAI
|
||||
delete process.env.CLAUDE_CODE_USE_GEMINI
|
||||
delete process.env.CLAUDE_CODE_USE_GITHUB
|
||||
@@ -169,8 +262,6 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5:3b'
|
||||
const { applyActiveProviderProfileFromConfig } =
|
||||
await importFreshProviderModules()
|
||||
|
||||
const applied = applyActiveProviderProfileFromConfig({
|
||||
providerProfiles: [
|
||||
@@ -184,16 +275,82 @@ describe('applyActiveProviderProfileFromConfig', () => {
|
||||
} as any)
|
||||
|
||||
expect(applied?.id).toBe('saved_openai')
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('https://api.openai.com/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
||||
})
|
||||
})
|
||||
|
||||
describe('persistActiveProviderProfileModel', () => {
|
||||
test('updates active profile model and current env for profile-managed sessions', async () => {
|
||||
const {
|
||||
applyProviderProfileToProcessEnv,
|
||||
getProviderProfiles,
|
||||
persistActiveProviderProfileModel,
|
||||
} = await importFreshProviderProfileModules()
|
||||
const activeProfile = buildProfile({
|
||||
id: 'saved_openai',
|
||||
baseUrl: 'http://192.168.33.108:11434/v1',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
})
|
||||
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [activeProfile],
|
||||
activeProviderProfileId: activeProfile.id,
|
||||
}))
|
||||
applyProviderProfileToProcessEnv(activeProfile)
|
||||
|
||||
const updated = persistActiveProviderProfileModel('minimax-m2.5:cloud')
|
||||
|
||||
expect(updated?.id).toBe(activeProfile.id)
|
||||
expect(updated?.model).toBe('minimax-m2.5:cloud')
|
||||
expect(process.env.OPENAI_MODEL).toBe('minimax-m2.5:cloud')
|
||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID).toBe(
|
||||
activeProfile.id,
|
||||
)
|
||||
|
||||
const saved = getProviderProfiles().find(
|
||||
(profile: ProviderProfile) => profile.id === activeProfile.id,
|
||||
)
|
||||
expect(saved?.model).toBe('minimax-m2.5:cloud')
|
||||
})
|
||||
|
||||
test('does not mutate process env when session is not profile-managed', async () => {
|
||||
const {
|
||||
getProviderProfiles,
|
||||
persistActiveProviderProfileModel,
|
||||
} = await importFreshProviderProfileModules()
|
||||
const activeProfile = buildProfile({
|
||||
id: 'saved_openai',
|
||||
model: 'kimi-k2.5:cloud',
|
||||
})
|
||||
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [activeProfile],
|
||||
activeProviderProfileId: activeProfile.id,
|
||||
}))
|
||||
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_MODEL = 'cli-model'
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID
|
||||
|
||||
persistActiveProviderProfileModel('minimax-m2.5:cloud')
|
||||
|
||||
expect(process.env.OPENAI_MODEL).toBe('cli-model')
|
||||
const saved = getProviderProfiles().find(
|
||||
(profile: ProviderProfile) => profile.id === activeProfile.id,
|
||||
)
|
||||
expect(saved?.model).toBe('minimax-m2.5:cloud')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getProviderPresetDefaults', () => {
|
||||
test('ollama preset defaults to a local Ollama model', async () => {
|
||||
const { getProviderPresetDefaults } = await importFreshProviderProfileModules()
|
||||
delete process.env.OPENAI_MODEL
|
||||
const { getProviderPresetDefaults } = await importFreshProviderModules()
|
||||
|
||||
const defaults = getProviderPresetDefaults('ollama')
|
||||
|
||||
@@ -205,21 +362,25 @@ describe('getProviderPresetDefaults', () => {
|
||||
describe('deleteProviderProfile', () => {
|
||||
test('deleting final profile clears provider env when active profile applied it', async () => {
|
||||
const {
|
||||
addProviderProfile,
|
||||
applyProviderProfileToProcessEnv,
|
||||
deleteProviderProfile,
|
||||
} =
|
||||
await importFreshProviderModules()
|
||||
const profile = addProviderProfile({
|
||||
name: 'Only Profile',
|
||||
provider: 'openai',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-4o',
|
||||
apiKey: 'sk-test',
|
||||
})
|
||||
} = await importFreshProviderProfileModules()
|
||||
applyProviderProfileToProcessEnv(
|
||||
buildProfile({
|
||||
id: 'only_profile',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-4o',
|
||||
apiKey: 'sk-test',
|
||||
}),
|
||||
)
|
||||
|
||||
expect(profile).not.toBeNull()
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [buildProfile({ id: 'only_profile' })],
|
||||
activeProviderProfileId: 'only_profile',
|
||||
}))
|
||||
|
||||
const result = deleteProviderProfile(profile!.id)
|
||||
const result = deleteProviderProfile('only_profile')
|
||||
|
||||
expect(result.removed).toBe(true)
|
||||
expect(result.activeProfileId).toBeUndefined()
|
||||
@@ -244,30 +405,24 @@ describe('deleteProviderProfile', () => {
|
||||
})
|
||||
|
||||
test('deleting final profile preserves explicit startup provider env', async () => {
|
||||
const { addProviderProfile, deleteProviderProfile } =
|
||||
await importFreshProviderModules()
|
||||
const profile = addProviderProfile({
|
||||
name: 'Only Profile',
|
||||
provider: 'openai',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
model: 'gpt-4o',
|
||||
})
|
||||
|
||||
expect(profile).not.toBeNull()
|
||||
|
||||
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = undefined
|
||||
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
|
||||
const { deleteProviderProfile } = await importFreshProviderProfileModules()
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
|
||||
process.env.OPENAI_MODEL = 'qwen2.5:3b'
|
||||
|
||||
const result = deleteProviderProfile(profile!.id)
|
||||
saveMockGlobalConfig(current => ({
|
||||
...current,
|
||||
providerProfiles: [buildProfile({ id: 'only_profile' })],
|
||||
activeProviderProfileId: 'only_profile',
|
||||
}))
|
||||
|
||||
const result = deleteProviderProfile('only_profile')
|
||||
|
||||
expect(result.removed).toBe(true)
|
||||
expect(result.activeProfileId).toBeUndefined()
|
||||
|
||||
expect(process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED).toBeUndefined()
|
||||
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
||||
expect(String(process.env.CLAUDE_CODE_USE_OPENAI)).toBe('1')
|
||||
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
||||
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
||||
})
|
||||
|
||||
@@ -37,6 +37,7 @@ export type ProviderPresetDefaults = Omit<ProviderProfileInput, 'provider'> & {
|
||||
const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1'
|
||||
const DEFAULT_OLLAMA_MODEL = 'llama3.1:8b'
|
||||
const PROFILE_ENV_APPLIED_FLAG = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED'
|
||||
const PROFILE_ENV_APPLIED_ID = 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED_ID'
|
||||
|
||||
function trimValue(value: string | undefined): string {
|
||||
return value?.trim() ?? ''
|
||||
@@ -264,6 +265,23 @@ function hasProviderSelectionFlags(
|
||||
)
|
||||
}
|
||||
|
||||
function hasConflictingProviderFlagsForProfile(
|
||||
processEnv: NodeJS.ProcessEnv,
|
||||
profile: ProviderProfile,
|
||||
): boolean {
|
||||
if (profile.provider === 'anthropic') {
|
||||
return hasProviderSelectionFlags(processEnv)
|
||||
}
|
||||
|
||||
return (
|
||||
processEnv.CLAUDE_CODE_USE_GEMINI !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_GITHUB !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_BEDROCK !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_VERTEX !== undefined ||
|
||||
processEnv.CLAUDE_CODE_USE_FOUNDRY !== undefined
|
||||
)
|
||||
}
|
||||
|
||||
function sameOptionalEnvValue(
|
||||
left: string | undefined,
|
||||
right: string | undefined,
|
||||
@@ -284,6 +302,10 @@ function isProcessEnvAlignedWithProfile(
|
||||
return false
|
||||
}
|
||||
|
||||
if (trimOrUndefined(processEnv[PROFILE_ENV_APPLIED_ID]) !== profile.id) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (profile.provider === 'anthropic') {
|
||||
return (
|
||||
!hasProviderSelectionFlags(processEnv) &&
|
||||
@@ -339,11 +361,13 @@ export function clearProviderProfileEnvFromProcessEnv(
|
||||
delete processEnv.ANTHROPIC_MODEL
|
||||
delete processEnv.ANTHROPIC_API_KEY
|
||||
delete processEnv[PROFILE_ENV_APPLIED_FLAG]
|
||||
delete processEnv[PROFILE_ENV_APPLIED_ID]
|
||||
}
|
||||
|
||||
export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void {
|
||||
clearProviderProfileEnvFromProcessEnv()
|
||||
process.env[PROFILE_ENV_APPLIED_FLAG] = '1'
|
||||
process.env[PROFILE_ENV_APPLIED_ID] = profile.id
|
||||
|
||||
process.env.ANTHROPIC_MODEL = profile.model
|
||||
if (profile.provider === 'anthropic') {
|
||||
@@ -386,12 +410,24 @@ export function applyActiveProviderProfileFromConfig(
|
||||
return undefined
|
||||
}
|
||||
|
||||
const isCurrentEnvProfileManaged =
|
||||
processEnv[PROFILE_ENV_APPLIED_FLAG] === '1' &&
|
||||
trimOrUndefined(processEnv[PROFILE_ENV_APPLIED_ID]) === activeProfile.id
|
||||
|
||||
if (!options?.force && hasProviderSelectionFlags(processEnv)) {
|
||||
// Respect explicit startup provider intent. Re-apply only when the
|
||||
// current process env is already profile-managed and aligned.
|
||||
if (!isProcessEnvAlignedWithProfile(processEnv, activeProfile)) {
|
||||
// Respect explicit startup provider intent. Auto-heal only when this
|
||||
// exact active profile previously applied the current env.
|
||||
if (!isCurrentEnvProfileManaged) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (hasConflictingProviderFlagsForProfile(processEnv, activeProfile)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (isProcessEnvAlignedWithProfile(processEnv, activeProfile)) {
|
||||
return activeProfile
|
||||
}
|
||||
}
|
||||
|
||||
applyProviderProfileToProcessEnv(activeProfile)
|
||||
@@ -496,6 +532,61 @@ export function updateProviderProfile(
|
||||
return updatedProfile
|
||||
}
|
||||
|
||||
export function persistActiveProviderProfileModel(
|
||||
model: string,
|
||||
): ProviderProfile | null {
|
||||
const nextModel = trimOrUndefined(model)
|
||||
if (!nextModel) {
|
||||
return null
|
||||
}
|
||||
|
||||
const activeProfile = getActiveProviderProfile()
|
||||
if (!activeProfile) {
|
||||
return null
|
||||
}
|
||||
|
||||
saveGlobalConfig(current => {
|
||||
const currentProfiles = getProviderProfiles(current)
|
||||
const profileIndex = currentProfiles.findIndex(
|
||||
profile => profile.id === activeProfile.id,
|
||||
)
|
||||
|
||||
if (profileIndex < 0) {
|
||||
return current
|
||||
}
|
||||
|
||||
const currentProfile = currentProfiles[profileIndex]
|
||||
if (currentProfile.model === nextModel) {
|
||||
return current
|
||||
}
|
||||
|
||||
const nextProfiles = [...currentProfiles]
|
||||
nextProfiles[profileIndex] = {
|
||||
...currentProfile,
|
||||
model: nextModel,
|
||||
}
|
||||
|
||||
return {
|
||||
...current,
|
||||
providerProfiles: nextProfiles,
|
||||
}
|
||||
})
|
||||
|
||||
const resolvedProfile = getActiveProviderProfile()
|
||||
if (!resolvedProfile || resolvedProfile.id !== activeProfile.id) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (
|
||||
process.env[PROFILE_ENV_APPLIED_FLAG] === '1' &&
|
||||
trimOrUndefined(process.env[PROFILE_ENV_APPLIED_ID]) === resolvedProfile.id
|
||||
) {
|
||||
applyProviderProfileToProcessEnv(resolvedProfile)
|
||||
}
|
||||
|
||||
return resolvedProfile
|
||||
}
|
||||
|
||||
export function setActiveProviderProfile(
|
||||
profileId: string,
|
||||
): ProviderProfile | null {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import {
|
||||
getGithubEndpointType,
|
||||
isLocalProviderUrl,
|
||||
resolveCodexApiCredentials,
|
||||
resolveProviderRequest,
|
||||
@@ -15,6 +16,51 @@ function isEnvTruthy(value: string | undefined): boolean {
|
||||
return normalized !== '' && normalized !== '0' && normalized !== 'false' && normalized !== 'no'
|
||||
}
|
||||
|
||||
type GithubTokenStatus = 'valid' | 'expired' | 'invalid_format'
|
||||
|
||||
const GITHUB_PAT_PREFIXES = ['ghp_', 'gho_', 'ghs_', 'ghr_', 'github_pat_']
|
||||
|
||||
function checkGithubTokenStatus(
|
||||
token: string,
|
||||
endpointType: 'copilot' | 'models' | 'custom' = 'copilot',
|
||||
): GithubTokenStatus {
|
||||
// PATs work with GitHub Models but not with Copilot API
|
||||
if (GITHUB_PAT_PREFIXES.some(prefix => token.startsWith(prefix))) {
|
||||
if (endpointType === 'copilot') {
|
||||
return 'expired'
|
||||
}
|
||||
return 'valid'
|
||||
}
|
||||
|
||||
const expMatch = token.match(/exp=(\d+)/)
|
||||
if (expMatch) {
|
||||
const expSeconds = Number(expMatch[1])
|
||||
if (!Number.isNaN(expSeconds)) {
|
||||
return Date.now() >= expSeconds * 1000 ? 'expired' : 'valid'
|
||||
}
|
||||
}
|
||||
|
||||
const parts = token.split('.')
|
||||
const looksLikeJwt =
|
||||
parts.length === 3 && parts.every(part => /^[A-Za-z0-9_-]+$/.test(part))
|
||||
if (looksLikeJwt) {
|
||||
try {
|
||||
const normalized = parts[1].replace(/-/g, '+').replace(/_/g, '/')
|
||||
const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4)
|
||||
const json = Buffer.from(padded, 'base64').toString('utf8')
|
||||
const parsed = JSON.parse(json)
|
||||
if (parsed && typeof parsed === 'object' && parsed.exp) {
|
||||
return Date.now() >= (parsed.exp as number) * 1000 ? 'expired' : 'valid'
|
||||
}
|
||||
} catch {
|
||||
return 'invalid_format'
|
||||
}
|
||||
}
|
||||
|
||||
// Keep compatibility with opaque token formats that do not expose expiry.
|
||||
return 'valid'
|
||||
}
|
||||
|
||||
export async function getProviderValidationError(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
options?: {
|
||||
@@ -39,7 +85,19 @@ export async function getProviderValidationError(
|
||||
if (useGithub && !useOpenAI) {
|
||||
const token = (env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()) ?? ''
|
||||
if (!token) {
|
||||
return 'GITHUB_TOKEN or GH_TOKEN is required when CLAUDE_CODE_USE_GITHUB=1.'
|
||||
return 'GitHub Copilot authentication required.\n' +
|
||||
'Run /onboard-github in the CLI to sign in with your GitHub account.\n' +
|
||||
'This will store your OAuth token securely and enable Copilot models.'
|
||||
}
|
||||
const endpointType = getGithubEndpointType(env.OPENAI_BASE_URL)
|
||||
const status = checkGithubTokenStatus(token, endpointType)
|
||||
if (status === 'expired') {
|
||||
return 'GitHub Copilot token has expired.\n' +
|
||||
'Run /onboard-github to sign in again and get a fresh token.'
|
||||
}
|
||||
if (status === 'invalid_format') {
|
||||
return 'GitHub Copilot token is invalid or corrupted.\n' +
|
||||
'Run /onboard-github to sign in again with your GitHub account.'
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -1,11 +1,52 @@
|
||||
import { expect, test } from 'bun:test'
|
||||
import path from 'path'
|
||||
|
||||
import { wrapRipgrepUnavailableError } from './ripgrep.ts'
|
||||
import { resolveRipgrepConfig, wrapRipgrepUnavailableError } from './ripgrep.js'
|
||||
|
||||
const MOCK_BUILTIN_PATH = path.normalize(
|
||||
process.platform === 'win32'
|
||||
? `vendor/ripgrep/${process.arch}-win32/rg.exe`
|
||||
: `vendor/ripgrep/${process.arch}-${process.platform}/rg`,
|
||||
)
|
||||
|
||||
test('ripgrepCommand falls back to system rg when builtin binary is missing', () => {
|
||||
const config = resolveRipgrepConfig({
|
||||
userWantsSystemRipgrep: false,
|
||||
bundledMode: false,
|
||||
builtinCommand: MOCK_BUILTIN_PATH,
|
||||
builtinExists: false,
|
||||
systemExecutablePath: '/usr/bin/rg',
|
||||
processExecPath: '/fake/bun',
|
||||
})
|
||||
|
||||
expect(config).toMatchObject({
|
||||
mode: 'system',
|
||||
command: 'rg',
|
||||
args: [],
|
||||
})
|
||||
})
|
||||
|
||||
test('ripgrepCommand keeps builtin mode when bundled binary exists', () => {
|
||||
const config = resolveRipgrepConfig({
|
||||
userWantsSystemRipgrep: false,
|
||||
bundledMode: false,
|
||||
builtinCommand: MOCK_BUILTIN_PATH,
|
||||
builtinExists: true,
|
||||
systemExecutablePath: '/usr/bin/rg',
|
||||
processExecPath: '/fake/bun',
|
||||
})
|
||||
|
||||
expect(config).toMatchObject({
|
||||
mode: 'builtin',
|
||||
command: MOCK_BUILTIN_PATH,
|
||||
args: [],
|
||||
})
|
||||
})
|
||||
|
||||
test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
||||
const error = wrapRipgrepUnavailableError(
|
||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe' },
|
||||
{ mode: 'builtin', command: 'C:\\fake\\vendor\\ripgrep\\rg.exe', args: [] },
|
||||
'win32',
|
||||
)
|
||||
|
||||
@@ -18,7 +59,7 @@ test('wrapRipgrepUnavailableError explains missing packaged fallback', () => {
|
||||
test('wrapRipgrepUnavailableError explains missing system ripgrep', () => {
|
||||
const error = wrapRipgrepUnavailableError(
|
||||
{ code: 'ENOENT', message: 'spawn rg ENOENT' },
|
||||
{ mode: 'system', command: 'rg' },
|
||||
{ mode: 'system', command: 'rg', args: [] },
|
||||
'linux',
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { ChildProcess, ExecFileException } from 'child_process'
|
||||
import { execFile, spawn } from 'child_process'
|
||||
import { existsSync } from 'fs'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { homedir } from 'os'
|
||||
import * as path from 'path'
|
||||
@@ -30,40 +31,72 @@ type RipgrepConfig = {
|
||||
|
||||
type RipgrepErrorLike = Pick<NodeJS.ErrnoException, 'code' | 'message'>
|
||||
|
||||
const getRipgrepConfig = memoize((): RipgrepConfig => {
|
||||
const userWantsSystemRipgrep = isEnvDefinedFalsy(
|
||||
process.env.USE_BUILTIN_RIPGREP,
|
||||
)
|
||||
function isErrnoException(error: unknown): error is NodeJS.ErrnoException {
|
||||
return error instanceof Error
|
||||
}
|
||||
|
||||
// Try system ripgrep if user wants it
|
||||
if (userWantsSystemRipgrep) {
|
||||
const { cmd: systemPath } = findExecutable('rg', [])
|
||||
if (systemPath !== 'rg') {
|
||||
// SECURITY: Use command name 'rg' instead of systemPath to prevent PATH hijacking
|
||||
// If we used systemPath, a malicious ./rg.exe in current directory could be executed
|
||||
// Using just 'rg' lets the OS resolve it safely with NoDefaultCurrentDirectoryInExePath protection
|
||||
return { mode: 'system', command: 'rg', args: [] }
|
||||
}
|
||||
type ResolveRipgrepConfigArgs = {
|
||||
userWantsSystemRipgrep: boolean
|
||||
bundledMode: boolean
|
||||
builtinCommand: string
|
||||
builtinExists: boolean
|
||||
systemExecutablePath: string
|
||||
processExecPath?: string
|
||||
}
|
||||
|
||||
export function resolveRipgrepConfig({
|
||||
userWantsSystemRipgrep,
|
||||
bundledMode,
|
||||
builtinCommand,
|
||||
builtinExists,
|
||||
systemExecutablePath,
|
||||
processExecPath = process.execPath,
|
||||
}: ResolveRipgrepConfigArgs): RipgrepConfig {
|
||||
if (userWantsSystemRipgrep && systemExecutablePath !== 'rg') {
|
||||
// SECURITY: Use command name 'rg' instead of systemExecutablePath to prevent PATH hijacking
|
||||
return { mode: 'system', command: 'rg', args: [] }
|
||||
}
|
||||
|
||||
// In bundled (native) mode, ripgrep is statically compiled into bun-internal
|
||||
// and dispatches based on argv[0]. We spawn ourselves with argv0='rg'.
|
||||
if (isInBundledMode()) {
|
||||
if (bundledMode) {
|
||||
return {
|
||||
mode: 'embedded',
|
||||
command: process.execPath,
|
||||
command: processExecPath,
|
||||
args: ['--no-config'],
|
||||
argv0: 'rg',
|
||||
}
|
||||
}
|
||||
|
||||
if (builtinExists) {
|
||||
return { mode: 'builtin', command: builtinCommand, args: [] }
|
||||
}
|
||||
|
||||
if (systemExecutablePath !== 'rg') {
|
||||
return { mode: 'system', command: 'rg', args: [] }
|
||||
}
|
||||
|
||||
return { mode: 'builtin', command: builtinCommand, args: [] }
|
||||
}
|
||||
|
||||
const getRipgrepConfig = memoize((): RipgrepConfig => {
|
||||
const userWantsSystemRipgrep = isEnvDefinedFalsy(
|
||||
process.env.USE_BUILTIN_RIPGREP,
|
||||
)
|
||||
const bundledMode = isInBundledMode()
|
||||
const rgRoot = path.resolve(__dirname, 'vendor', 'ripgrep')
|
||||
const command =
|
||||
const builtinCommand =
|
||||
process.platform === 'win32'
|
||||
? path.resolve(rgRoot, `${process.arch}-win32`, 'rg.exe')
|
||||
: path.resolve(rgRoot, `${process.arch}-${process.platform}`, 'rg')
|
||||
const builtinExists = existsSync(builtinCommand)
|
||||
const { cmd: systemExecutablePath } = findExecutable('rg', [])
|
||||
|
||||
return { mode: 'builtin', command, args: [] }
|
||||
return resolveRipgrepConfig({
|
||||
userWantsSystemRipgrep,
|
||||
bundledMode,
|
||||
builtinCommand,
|
||||
builtinExists,
|
||||
systemExecutablePath,
|
||||
})
|
||||
})
|
||||
|
||||
export function ripgrepCommand(): {
|
||||
@@ -324,7 +357,9 @@ async function ripGrepFileCount(
|
||||
if (settled) return
|
||||
settled = true
|
||||
reject(
|
||||
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||
isErrnoException(err) && err.code === 'ENOENT'
|
||||
? wrapRipgrepUnavailableError(err)
|
||||
: err,
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -388,7 +423,9 @@ export async function ripGrepStream(
|
||||
if (settled) return
|
||||
settled = true
|
||||
reject(
|
||||
err.code === 'ENOENT' ? wrapRipgrepUnavailableError(err) : err,
|
||||
isErrnoException(err) && err.code === 'ENOENT'
|
||||
? wrapRipgrepUnavailableError(err)
|
||||
: err,
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -436,7 +473,9 @@ export async function ripGrep(
|
||||
const CRITICAL_ERROR_CODES = ['ENOENT', 'EACCES', 'EPERM']
|
||||
if (CRITICAL_ERROR_CODES.includes(error.code as string)) {
|
||||
reject(
|
||||
error.code === 'ENOENT' ? wrapRipgrepUnavailableError(error) : error,
|
||||
isErrnoException(error) && error.code === 'ENOENT'
|
||||
? wrapRipgrepUnavailableError(error)
|
||||
: error,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
68
src/utils/schemaSanitizer.test.ts
Normal file
68
src/utils/schemaSanitizer.test.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import { describe, expect, test } from 'bun:test'
|
||||
|
||||
import { sanitizeSchemaForOpenAICompat } from './schemaSanitizer'
|
||||
|
||||
describe('sanitizeSchemaForOpenAICompat', () => {
|
||||
test('preserves Grep-like properties.pattern while keeping it required', () => {
|
||||
const schema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
description: 'The regular expression pattern to search for in file contents',
|
||||
},
|
||||
path: { type: 'string' },
|
||||
glob: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
}
|
||||
|
||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
||||
const properties = sanitized.properties as Record<string, unknown> | undefined
|
||||
|
||||
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path', 'glob'])
|
||||
expect(properties?.pattern).toEqual({
|
||||
type: 'string',
|
||||
description: 'The regular expression pattern to search for in file contents',
|
||||
})
|
||||
expect(sanitized.required).toEqual(['pattern'])
|
||||
})
|
||||
|
||||
test('preserves Glob-like properties.pattern while keeping it required', () => {
|
||||
const schema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
description: 'The glob pattern to match files against',
|
||||
},
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['pattern'],
|
||||
}
|
||||
|
||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
||||
const properties = sanitized.properties as Record<string, unknown> | undefined
|
||||
|
||||
expect(Object.keys(properties ?? {})).toEqual(['pattern', 'path'])
|
||||
expect(properties?.pattern).toEqual({
|
||||
type: 'string',
|
||||
description: 'The glob pattern to match files against',
|
||||
})
|
||||
expect(sanitized.required).toEqual(['pattern'])
|
||||
})
|
||||
|
||||
test('strips JSON Schema validator pattern from string schemas', () => {
|
||||
const schema = {
|
||||
type: 'string',
|
||||
pattern: '^[a-z]+$',
|
||||
minLength: 1,
|
||||
}
|
||||
|
||||
const sanitized = sanitizeSchemaForOpenAICompat(schema)
|
||||
|
||||
expect(sanitized).toEqual({
|
||||
type: 'string',
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -33,6 +33,15 @@ function stripSchemaKeywords(schema: unknown, keywords: Set<string>): unknown {
|
||||
|
||||
const result: Record<string, unknown> = {}
|
||||
for (const [key, value] of Object.entries(schema)) {
|
||||
if (key === 'properties' && isSchemaRecord(value)) {
|
||||
const sanitizedProps: Record<string, unknown> = {}
|
||||
for (const [propName, propSchema] of Object.entries(value)) {
|
||||
sanitizedProps[propName] = stripSchemaKeywords(propSchema, keywords)
|
||||
}
|
||||
result[key] = sanitizedProps
|
||||
continue
|
||||
}
|
||||
|
||||
if (keywords.has(key)) {
|
||||
continue
|
||||
}
|
||||
@@ -215,10 +224,13 @@ export function sanitizeSchemaForOpenAICompat(
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(record.required) && isSchemaRecord(record.properties)) {
|
||||
const properties = isSchemaRecord(record.properties)
|
||||
? record.properties
|
||||
: undefined
|
||||
|
||||
if (Array.isArray(record.required) && properties) {
|
||||
record.required = record.required.filter(
|
||||
(value): value is string =>
|
||||
typeof value === 'string' && value in record.properties,
|
||||
(value): value is string => typeof value === 'string' && value in properties,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ export {
|
||||
|
||||
// Also import for use within this file
|
||||
import { type HookCommand, HooksSchema } from '../../schemas/hooks.js'
|
||||
import { AutoFixConfigSchema } from '../../services/autoFix/autoFixConfig.js'
|
||||
import { count } from '../array.js'
|
||||
|
||||
/**
|
||||
@@ -435,6 +436,12 @@ export const SettingsSchema = lazySchema(() =>
|
||||
hooks: HooksSchema()
|
||||
.optional()
|
||||
.describe('Custom commands to run before/after tool executions'),
|
||||
autoFix: AutoFixConfigSchema
|
||||
.optional()
|
||||
.describe(
|
||||
'Auto-fix configuration: automatically run lint/test after AI file edits ' +
|
||||
'and feed errors back for self-repair.',
|
||||
),
|
||||
worktree: z
|
||||
.object({
|
||||
symlinkDirectories: z
|
||||
|
||||
@@ -97,8 +97,12 @@ export function renderToAnsiString(node: React.ReactNode, columns?: number): Pro
|
||||
patchConsole: false
|
||||
});
|
||||
|
||||
// Wait for the component to exit naturally
|
||||
await instance.waitUntilExit();
|
||||
// Wait for the component to exit naturally, with a timeout guard so
|
||||
// tests never hang indefinitely if a render error prevents exit().
|
||||
await Promise.race([
|
||||
instance.waitUntilExit(),
|
||||
new Promise<void>(resolve => setTimeout(resolve, 3000)),
|
||||
]);
|
||||
|
||||
// Extract only the first frame's content to avoid duplication
|
||||
// (Ink outputs multiple frames in non-TTY mode)
|
||||
|
||||
Reference in New Issue
Block a user