test: stabilize suite and add coverage heatmap (#373)

* test: stabilize suite and add coverage heatmap

* ci: run full bun test suite in pr checks
This commit is contained in:
Kevin Codex
2026-04-05 12:44:54 +08:00
committed by GitHub
parent daa3aa27a0
commit 5ef79546e9
16 changed files with 732 additions and 120 deletions

View File

@@ -33,6 +33,9 @@ jobs:
- name: Smoke check - name: Smoke check
run: bun run smoke run: bun run smoke
- name: Full unit test suite
run: bun test --max-concurrency=1
- name: Provider tests - name: Provider tests
run: bun run test:provider run: bun run test:provider

1
.gitignore vendored
View File

@@ -6,3 +6,4 @@ dist/
!.env.example !.env.example
.openclaude-profile.json .openclaude-profile.json
reports/ reports/
coverage/

View File

@@ -196,11 +196,56 @@ node dist/cli.mjs
Helpful commands: Helpful commands:
- `bun run dev` - `bun run dev`
- `bun test`
- `bun run test:coverage`
- `bun run smoke` - `bun run smoke`
- `bun run doctor:runtime` - `bun run doctor:runtime`
- `bun run verify:privacy` - `bun run verify:privacy`
- focused `bun test ...` runs for the areas you touch - focused `bun test ...` runs for the areas you touch
## Testing And Coverage
OpenClaude uses Bun's built-in test runner for unit tests.
Run the full unit suite:
```bash
bun test
```
Generate unit test coverage:
```bash
bun run test:coverage
```
Open the visual coverage report:
```bash
open coverage/index.html
```
If you already have `coverage/lcov.info` and only want to rebuild the UI:
```bash
bun run test:coverage:ui
```
Use focused test runs when you only touch one area:
- `bun run test:provider`
- `bun run test:provider-recommendation`
- `bun test path/to/file.test.ts`
Recommended contributor validation before opening a PR:
- `bun run build`
- `bun run smoke`
- `bun run test:coverage` for broader unit coverage when your change affects shared runtime or provider logic
- focused `bun test ...` runs for the files and flows you changed
Coverage output is written to `coverage/lcov.info`, and OpenClaude also generates a git-activity-style heatmap at `coverage/index.html`.
## Repository Structure ## Repository Structure
- `src/` - core CLI/runtime - `src/` - core CLI/runtime
@@ -231,6 +276,7 @@ Contributions are welcome.
For larger changes, open an issue first so the scope is clear before implementation. Helpful validation commands include: For larger changes, open an issue first so the scope is clear before implementation. Helpful validation commands include:
- `bun run build` - `bun run build`
- `bun run test:coverage`
- `bun run smoke` - `bun run smoke`
- focused `bun test ...` runs for touched areas - focused `bun test ...` runs for touched areas

View File

@@ -1,7 +1,13 @@
import { join } from 'path' import { join, win32 } from 'path'
import { pathToFileURL } from 'url' import { pathToFileURL } from 'url'
export function getDistImportSpecifier(baseDir) { export function getDistImportSpecifier(baseDir) {
const distPath = join(baseDir, '..', 'dist', 'cli.mjs') if (/^[A-Za-z]:\\/.test(baseDir)) {
const distPath = win32.join(baseDir, '..', 'dist', 'cli.mjs')
return `file:///${distPath.replace(/\\/g, '/')}`
}
const joinImpl = join
const distPath = joinImpl(baseDir, '..', 'dist', 'cli.mjs')
return pathToFileURL(distPath).href return pathToFileURL(distPath).href
} }

View File

@@ -31,6 +31,9 @@
"dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:fast": "bun run profile:fast && bun run dev:ollama:fast",
"dev:code": "bun run profile:code && bun run dev:profile", "dev:code": "bun run profile:code && bun run dev:profile",
"start": "node dist/cli.mjs", "start": "node dist/cli.mjs",
"test": "bun test",
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts",
"test:coverage:ui": "bun run scripts/render-coverage-heatmap.ts",
"test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", "test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts",
"typecheck": "tsc --noEmit", "typecheck": "tsc --noEmit",
"smoke": "bun run build && node dist/cli.mjs --version", "smoke": "bun run build && node dist/cli.mjs --version",

View File

@@ -0,0 +1,393 @@
import { mkdir, readFile, writeFile } from 'fs/promises'
import { dirname, resolve } from 'path'
type FileCoverage = {
path: string
found: number
hit: number
chunks: number[]
}
type DirectoryCoverage = {
path: string
found: number
hit: number
}
const LCOV_PATH = resolve(process.cwd(), 'coverage/lcov.info')
const HTML_PATH = resolve(process.cwd(), 'coverage/index.html')
const CHUNK_COUNT = 20
function escapeHtml(value: string): string {
return value
.replaceAll('&', '&')
.replaceAll('<', '&lt;')
.replaceAll('>', '&gt;')
.replaceAll('"', '&quot;')
}
function bucketColor(ratio: number): string {
if (ratio >= 0.9) return '#166534'
if (ratio >= 0.75) return '#15803d'
if (ratio >= 0.5) return '#65a30d'
if (ratio > 0) return '#a3a3a3'
return '#262626'
}
function coverageLabel(ratio: number): string {
return `${Math.round(ratio * 100)}%`
}
function coverageRatio(found: number, hit: number): number {
return found === 0 ? 0 : hit / found
}
function bucketGlyph(ratio: number): string {
if (ratio >= 0.9) return '█'
if (ratio >= 0.75) return '▓'
if (ratio >= 0.5) return '▒'
if (ratio > 0) return '░'
return '·'
}
function terminalBar(chunks: number[]): string {
return chunks.map(bucketGlyph).join('')
}
function summarizeDirectories(files: FileCoverage[]): DirectoryCoverage[] {
const dirs = new Map<string, DirectoryCoverage>()
for (const file of files) {
const dir =
file.path.includes('/') ? file.path.slice(0, file.path.lastIndexOf('/')) : '.'
const current = dirs.get(dir) ?? { path: dir, found: 0, hit: 0 }
current.found += file.found
current.hit += file.hit
dirs.set(dir, current)
}
return [...dirs.values()].sort((a, b) => {
const left = coverageRatio(a.found, a.hit)
const right = coverageRatio(b.found, b.hit)
if (right !== left) return right - left
return b.found - a.found
})
}
function buildTerminalReport(files: FileCoverage[]): string {
const totalFound = files.reduce((sum, file) => sum + file.found, 0)
const totalHit = files.reduce((sum, file) => sum + file.hit, 0)
const totalRatio = coverageRatio(totalFound, totalHit)
const overallChunks = new Array(CHUNK_COUNT).fill(totalRatio)
const topDirectories = summarizeDirectories(files)
.filter(dir => dir.found > 0)
.slice(0, 8)
const lowestFiles = [...files]
.filter(file => file.found >= 20)
.sort((a, b) => {
const left = coverageRatio(a.found, a.hit)
const right = coverageRatio(b.found, b.hit)
if (left !== right) return left - right
return b.found - a.found
})
.slice(0, 10)
const lines = [
'',
'Coverage Activity',
`${terminalBar(overallChunks)} ${coverageLabel(totalRatio)} ${totalHit}/${totalFound} lines ${files.length} files`,
'',
'Top Directories',
]
for (const dir of topDirectories) {
const ratio = coverageRatio(dir.found, dir.hit)
lines.push(
`${terminalBar(new Array(12).fill(ratio))} ${coverageLabel(ratio).padStart(4)} ${String(dir.hit).padStart(5)}/${String(dir.found).padEnd(5)} ${dir.path}`,
)
}
lines.push('', 'Lowest Coverage Files')
for (const file of lowestFiles) {
const ratio = coverageRatio(file.found, file.hit)
lines.push(
`${terminalBar(file.chunks).padEnd(CHUNK_COUNT)} ${coverageLabel(ratio).padStart(4)} ${String(file.hit).padStart(5)}/${String(file.found).padEnd(5)} ${file.path}`,
)
}
lines.push('', `HTML report: ${HTML_PATH}`)
return lines.join('\n')
}
function parseLcov(content: string): FileCoverage[] {
const files: FileCoverage[] = []
const sections = content.split('end_of_record')
for (const rawSection of sections) {
const section = rawSection.trim()
if (!section) continue
const lines = section.split('\n')
let filePath = ''
const lineHits = new Map<number, number>()
for (const line of lines) {
if (line.startsWith('SF:')) {
filePath = line.slice(3).trim()
} else if (line.startsWith('DA:')) {
const [lineNumberText, hitText] = line.slice(3).split(',')
const lineNumber = Number(lineNumberText)
const hits = Number(hitText)
if (Number.isFinite(lineNumber) && Number.isFinite(hits)) {
lineHits.set(lineNumber, hits)
}
}
}
if (!filePath || lineHits.size === 0) continue
const ordered = [...lineHits.entries()].sort((a, b) => a[0] - b[0])
const found = ordered.length
const hit = ordered.filter(([, hits]) => hits > 0).length
const chunkSize = Math.max(1, Math.ceil(found / CHUNK_COUNT))
const chunks: number[] = []
for (let index = 0; index < found; index += chunkSize) {
const slice = ordered.slice(index, index + chunkSize)
const covered = slice.filter(([, hits]) => hits > 0).length
chunks.push(slice.length === 0 ? 0 : covered / slice.length)
}
while (chunks.length < CHUNK_COUNT) {
chunks.push(0)
}
files.push({
path: filePath,
found,
hit,
chunks: chunks.slice(0, CHUNK_COUNT),
})
}
return files.sort((a, b) => {
const left = a.found === 0 ? 0 : a.hit / a.found
const right = b.found === 0 ? 0 : b.hit / b.found
if (right !== left) return right - left
return a.path.localeCompare(b.path)
})
}
function buildHtml(files: FileCoverage[]): string {
const totalFound = files.reduce((sum, file) => sum + file.found, 0)
const totalHit = files.reduce((sum, file) => sum + file.hit, 0)
const totalRatio = totalFound === 0 ? 0 : totalHit / totalFound
const cards = [
['Files', String(files.length)],
['Covered Lines', `${totalHit}/${totalFound}`],
['Line Coverage', coverageLabel(totalRatio)],
]
const rows = files
.map(file => {
const ratio = file.found === 0 ? 0 : file.hit / file.found
const squares = file.chunks
.map(
(chunk, index) =>
`<span class="cell" title="Chunk ${index + 1}: ${coverageLabel(chunk)}" style="background:${bucketColor(chunk)}"></span>`,
)
.join('')
return `
<tr>
<td class="file">${escapeHtml(file.path)}</td>
<td class="percent">${coverageLabel(ratio)}</td>
<td class="lines">${file.hit}/${file.found}</td>
<td class="heatmap">${squares}</td>
</tr>
`
})
.join('')
const summary = cards
.map(
([label, value]) => `
<div class="card">
<div class="card-label">${escapeHtml(label)}</div>
<div class="card-value">${escapeHtml(value)}</div>
</div>
`,
)
.join('')
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>OpenClaude Coverage</title>
<style>
:root {
color-scheme: dark;
--bg: #09090b;
--panel: #111113;
--panel-2: #18181b;
--border: #27272a;
--text: #fafafa;
--muted: #a1a1aa;
}
* { box-sizing: border-box; }
body {
margin: 0;
background: linear-gradient(180deg, #09090b 0%, #0f0f12 100%);
color: var(--text);
font: 14px/1.4 ui-monospace, SFMono-Regular, Menlo, monospace;
}
main {
max-width: 1440px;
margin: 0 auto;
padding: 32px 24px 48px;
}
h1 {
margin: 0 0 8px;
font-size: 32px;
letter-spacing: -0.04em;
}
p {
margin: 0;
color: var(--muted);
}
.summary {
display: grid;
grid-template-columns: repeat(3, minmax(0, 1fr));
gap: 12px;
margin: 24px 0;
}
.card {
background: rgba(24, 24, 27, 0.92);
border: 1px solid var(--border);
border-radius: 16px;
padding: 16px 18px;
}
.card-label {
color: var(--muted);
margin-bottom: 8px;
}
.card-value {
font-size: 28px;
font-weight: 700;
}
.table-wrap {
background: rgba(17, 17, 19, 0.94);
border: 1px solid var(--border);
border-radius: 18px;
overflow: hidden;
}
table {
width: 100%;
border-collapse: collapse;
}
thead th {
text-align: left;
color: var(--muted);
font-weight: 500;
background: rgba(24, 24, 27, 0.95);
border-bottom: 1px solid var(--border);
}
th, td {
padding: 12px 16px;
vertical-align: middle;
}
tbody tr + tr td {
border-top: 1px solid rgba(39, 39, 42, 0.65);
}
.file {
width: 48%;
word-break: break-all;
}
.percent, .lines {
white-space: nowrap;
}
.heatmap {
width: 32%;
min-width: 280px;
}
.cell {
display: inline-block;
width: 12px;
height: 12px;
margin-right: 4px;
border-radius: 3px;
border: 1px solid rgba(255,255,255,0.05);
}
.legend {
display: flex;
align-items: center;
gap: 10px;
margin-top: 16px;
color: var(--muted);
}
.legend-scale {
display: flex;
gap: 4px;
}
@media (max-width: 900px) {
.summary {
grid-template-columns: 1fr;
}
.heatmap {
min-width: 220px;
}
th, td {
padding: 10px 12px;
}
}
</style>
</head>
<body>
<main>
<h1>Coverage Activity</h1>
<p>Git-style heatmap generated from coverage/lcov.info</p>
<section class="summary">${summary}</section>
<section class="table-wrap">
<table>
<thead>
<tr>
<th>File</th>
<th>Coverage</th>
<th>Lines</th>
<th>Activity</th>
</tr>
</thead>
<tbody>${rows}</tbody>
</table>
</section>
<div class="legend">
<span>Less</span>
<div class="legend-scale">
<span class="cell" style="background:#262626"></span>
<span class="cell" style="background:#a3a3a3"></span>
<span class="cell" style="background:#65a30d"></span>
<span class="cell" style="background:#15803d"></span>
<span class="cell" style="background:#166534"></span>
</div>
<span>More</span>
</div>
</main>
</body>
</html>`
}
async function main() {
const content = await readFile(LCOV_PATH, 'utf8')
const files = parseLcov(content)
const html = buildHtml(files)
await mkdir(dirname(HTML_PATH), { recursive: true })
await writeFile(HTML_PATH, html, 'utf8')
console.log(buildTerminalReport(files))
console.log(`coverage heatmap written to ${HTML_PATH}`)
}
await main()

View File

@@ -40,7 +40,7 @@ export default function TextInput(props: Props): React.ReactNode {
// Hoisted to mount-time — this component re-renders on every keystroke. // Hoisted to mount-time — this component re-renders on every keystroke.
const accessibilityEnabled = useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_ACCESSIBILITY), []); const accessibilityEnabled = useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_ACCESSIBILITY), []);
const settings = useSettings(); const settings = useSettings();
const reducedMotion = settings.prefersReducedMotion ?? false; const reducedMotion = settings?.prefersReducedMotion ?? false;
const voiceState = feature('VOICE_MODE') ? const voiceState = feature('VOICE_MODE') ?
// biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant
useVoiceState(s => s.voiceState) : 'idle' as const; useVoiceState(s => s.voiceState) : 'idle' as const;

View File

@@ -1,5 +1,4 @@
import { describe, expect, test, afterEach } from 'bun:test' import { afterEach, describe, expect, mock, test } from 'bun:test'
import { getRateLimitResetDelayMs, parseOpenAIDuration } from './withRetry.js'
import { APIError } from '@anthropic-ai/sdk' import { APIError } from '@anthropic-ai/sdk'
// Helper to build a mock APIError with specific headers // Helper to build a mock APIError with specific headers
@@ -28,42 +27,71 @@ afterEach(() => {
if (originalEnv[key] === undefined) delete process.env[key] if (originalEnv[key] === undefined) delete process.env[key]
else process.env[key] = originalEnv[key] else process.env[key] = originalEnv[key]
} }
mock.restore()
}) })
async function importFreshWithRetryModule(
provider:
| 'firstParty'
| 'openai'
| 'github'
| 'bedrock'
| 'vertex'
| 'gemini'
| 'codex'
| 'foundry' = 'firstParty',
) {
mock.restore()
mock.module('src/utils/model/providers.js', () => ({
getAPIProvider: () => provider,
getAPIProviderForStatsig: () => provider,
}))
return import(`./withRetry.js?ts=${Date.now()}-${Math.random()}`)
}
// --- parseOpenAIDuration --- // --- parseOpenAIDuration ---
describe('parseOpenAIDuration', () => { describe('parseOpenAIDuration', () => {
test('parses seconds: "1s" → 1000', () => { test('parses seconds: "1s" → 1000', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('1s')).toBe(1000) expect(parseOpenAIDuration('1s')).toBe(1000)
}) })
test('parses minutes+seconds: "6m0s" → 360000', () => { test('parses minutes+seconds: "6m0s" → 360000', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('6m0s')).toBe(360000) expect(parseOpenAIDuration('6m0s')).toBe(360000)
}) })
test('parses hours+minutes+seconds: "1h30m0s" → 5400000', () => { test('parses hours+minutes+seconds: "1h30m0s" → 5400000', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('1h30m0s')).toBe(5400000) expect(parseOpenAIDuration('1h30m0s')).toBe(5400000)
}) })
test('parses milliseconds: "500ms" → 500', () => { test('parses milliseconds: "500ms" → 500', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('500ms')).toBe(500) expect(parseOpenAIDuration('500ms')).toBe(500)
}) })
test('parses minutes only: "2m" → 120000', () => { test('parses minutes only: "2m" → 120000', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('2m')).toBe(120000) expect(parseOpenAIDuration('2m')).toBe(120000)
}) })
test('returns null for empty string', () => { test('returns null for empty string', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('')).toBeNull() expect(parseOpenAIDuration('')).toBeNull()
}) })
test('returns null for unrecognized format', () => { test('returns null for unrecognized format', async () => {
const { parseOpenAIDuration } = await importFreshWithRetryModule()
expect(parseOpenAIDuration('invalid')).toBeNull() expect(parseOpenAIDuration('invalid')).toBeNull()
}) })
}) })
// --- getRateLimitResetDelayMs --- // --- getRateLimitResetDelayMs ---
describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => { describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => {
test('reads anthropic-ratelimit-unified-reset Unix timestamp', () => { test('reads anthropic-ratelimit-unified-reset Unix timestamp', async () => {
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('firstParty')
const futureUnixSec = Math.floor(Date.now() / 1000) + 60 const futureUnixSec = Math.floor(Date.now() / 1000) + 60
const error = makeError({ const error = makeError({
'anthropic-ratelimit-unified-reset': String(futureUnixSec), 'anthropic-ratelimit-unified-reset': String(futureUnixSec),
@@ -74,12 +102,16 @@ describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => {
expect(delay!).toBeLessThanOrEqual(60_000) expect(delay!).toBeLessThanOrEqual(60_000)
}) })
test('returns null when header absent', () => { test('returns null when header absent', async () => {
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('firstParty')
const error = makeError({}) const error = makeError({})
expect(getRateLimitResetDelayMs(error)).toBeNull() expect(getRateLimitResetDelayMs(error)).toBeNull()
}) })
test('returns null when reset is in the past', () => { test('returns null when reset is in the past', async () => {
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('firstParty')
const pastUnixSec = Math.floor(Date.now() / 1000) - 10 const pastUnixSec = Math.floor(Date.now() / 1000) - 10
const error = makeError({ const error = makeError({
'anthropic-ratelimit-unified-reset': String(pastUnixSec), 'anthropic-ratelimit-unified-reset': String(pastUnixSec),
@@ -89,15 +121,19 @@ describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => {
}) })
describe('getRateLimitResetDelayMs - OpenAI provider', () => { describe('getRateLimitResetDelayMs - OpenAI provider', () => {
test('reads x-ratelimit-reset-requests duration string', () => { test('reads x-ratelimit-reset-requests duration string', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('openai')
const error = makeError({ 'x-ratelimit-reset-requests': '30s' }) const error = makeError({ 'x-ratelimit-reset-requests': '30s' })
const delay = getRateLimitResetDelayMs(error) const delay = getRateLimitResetDelayMs(error)
expect(delay).toBe(30_000) expect(delay).toBe(30_000)
}) })
test('reads x-ratelimit-reset-tokens and picks the larger delay', () => { test('reads x-ratelimit-reset-tokens and picks the larger delay', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('openai')
const error = makeError({ const error = makeError({
'x-ratelimit-reset-requests': '10s', 'x-ratelimit-reset-requests': '10s',
'x-ratelimit-reset-tokens': '1m0s', 'x-ratelimit-reset-tokens': '1m0s',
@@ -107,29 +143,37 @@ describe('getRateLimitResetDelayMs - OpenAI provider', () => {
expect(delay).toBe(60_000) expect(delay).toBe(60_000)
}) })
test('returns null when no openai rate limit headers present', () => { test('returns null when no openai rate limit headers present', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('openai')
const error = makeError({}) const error = makeError({})
expect(getRateLimitResetDelayMs(error)).toBeNull() expect(getRateLimitResetDelayMs(error)).toBeNull()
}) })
test('works for github provider too', () => { test('works for github provider too', async () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('github')
const error = makeError({ 'x-ratelimit-reset-requests': '5s' }) const error = makeError({ 'x-ratelimit-reset-requests': '5s' })
expect(getRateLimitResetDelayMs(error)).toBe(5_000) expect(getRateLimitResetDelayMs(error)).toBe(5_000)
}) })
}) })
describe('getRateLimitResetDelayMs - providers without reset headers', () => { describe('getRateLimitResetDelayMs - providers without reset headers', () => {
test('returns null for bedrock', () => { test('returns null for bedrock', async () => {
process.env.CLAUDE_CODE_USE_BEDROCK = '1' process.env.CLAUDE_CODE_USE_BEDROCK = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('bedrock')
const error = makeError({ 'anthropic-ratelimit-unified-reset': String(Math.floor(Date.now() / 1000) + 60) }) const error = makeError({ 'anthropic-ratelimit-unified-reset': String(Math.floor(Date.now() / 1000) + 60) })
// Bedrock doesn't use this header — should still return null // Bedrock doesn't use this header — should still return null
expect(getRateLimitResetDelayMs(error)).toBeNull() expect(getRateLimitResetDelayMs(error)).toBeNull()
}) })
test('returns null for vertex', () => { test('returns null for vertex', async () => {
process.env.CLAUDE_CODE_USE_VERTEX = '1' process.env.CLAUDE_CODE_USE_VERTEX = '1'
const { getRateLimitResetDelayMs } =
await importFreshWithRetryModule('vertex')
const error = makeError({}) const error = makeError({})
expect(getRateLimitResetDelayMs(error)).toBeNull() expect(getRateLimitResetDelayMs(error)).toBeNull()
}) })

View File

@@ -4,6 +4,7 @@ import axios from 'axios'
const originalEnv = { ...process.env } const originalEnv = { ...process.env }
async function importFreshModule() { async function importFreshModule() {
mock.restore()
return import(`./officialRegistry.ts?ts=${Date.now()}-${Math.random()}`) return import(`./officialRegistry.ts?ts=${Date.now()}-${Math.random()}`)
} }
@@ -13,11 +14,15 @@ beforeEach(() => {
afterEach(() => { afterEach(() => {
process.env = { ...originalEnv } process.env = { ...originalEnv }
mock.restore()
}) })
describe('prefetchOfficialMcpUrls', () => { describe('prefetchOfficialMcpUrls', () => {
test('does not fetch registry when using OpenAI mode', async () => { test('does not fetch registry when using OpenAI mode', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'openai',
}))
const getSpy = mock(() => Promise.resolve({ data: { servers: [] } })) const getSpy = mock(() => Promise.resolve({ data: { servers: [] } }))
axios.get = getSpy as typeof axios.get axios.get = getSpy as typeof axios.get
@@ -29,6 +34,9 @@ describe('prefetchOfficialMcpUrls', () => {
test('does not fetch registry when using Gemini mode', async () => { test('does not fetch registry when using Gemini mode', async () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'gemini',
}))
const getSpy = mock(() => Promise.resolve({ data: { servers: [] } })) const getSpy = mock(() => Promise.resolve({ data: { servers: [] } }))
axios.get = getSpy as typeof axios.get axios.get = getSpy as typeof axios.get
@@ -43,6 +51,9 @@ describe('prefetchOfficialMcpUrls', () => {
delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'firstParty',
}))
const getSpy = mock(() => const getSpy = mock(() =>
Promise.resolve({ Promise.resolve({
data: { data: {

View File

@@ -4,6 +4,7 @@ import axios from 'axios'
const originalEnv = { ...process.env } const originalEnv = { ...process.env }
async function importFreshModule() { async function importFreshModule() {
mock.restore()
return import(`./utils.ts?ts=${Date.now()}-${Math.random()}`) return import(`./utils.ts?ts=${Date.now()}-${Math.random()}`)
} }
@@ -13,11 +14,15 @@ beforeEach(() => {
afterEach(() => { afterEach(() => {
process.env = { ...originalEnv } process.env = { ...originalEnv }
mock.restore()
}) })
describe('checkDomainBlocklist', () => { describe('checkDomainBlocklist', () => {
test('returns allowed without API call in OpenAI mode', async () => { test('returns allowed without API call in OpenAI mode', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'openai',
}))
const getSpy = mock(() => const getSpy = mock(() =>
Promise.resolve({ status: 200, data: { can_fetch: true } }), Promise.resolve({ status: 200, data: { can_fetch: true } }),
) )
@@ -32,6 +37,9 @@ describe('checkDomainBlocklist', () => {
test('returns allowed without API call in Gemini mode', async () => { test('returns allowed without API call in Gemini mode', async () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'gemini',
}))
const getSpy = mock(() => const getSpy = mock(() =>
Promise.resolve({ status: 200, data: { can_fetch: true } }), Promise.resolve({ status: 200, data: { can_fetch: true } }),
) )
@@ -49,6 +57,9 @@ describe('checkDomainBlocklist', () => {
delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
mock.module('../../utils/model/providers.js', () => ({
getAPIProvider: () => 'firstParty',
}))
const getSpy = mock(() => const getSpy = mock(() =>
Promise.resolve({ status: 200, data: { can_fetch: true } }), Promise.resolve({ status: 200, data: { can_fetch: true } }),
) )

View File

@@ -4,6 +4,7 @@ const originalEnv = { ...process.env }
const originalFetch = globalThis.fetch const originalFetch = globalThis.fetch
async function importFreshModule() { async function importFreshModule() {
mock.restore()
return import(`./apiPreconnect.ts?ts=${Date.now()}-${Math.random()}`) return import(`./apiPreconnect.ts?ts=${Date.now()}-${Math.random()}`)
} }
@@ -14,11 +15,15 @@ beforeEach(() => {
afterEach(() => { afterEach(() => {
process.env = { ...originalEnv } process.env = { ...originalEnv }
globalThis.fetch = originalFetch globalThis.fetch = originalFetch
mock.restore()
}) })
describe('preconnectAnthropicApi', () => { describe('preconnectAnthropicApi', () => {
test('does not fetch when OpenAI mode is enabled', async () => { test('does not fetch when OpenAI mode is enabled', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'openai',
}))
const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 })))
globalThis.fetch = fetchMock as typeof globalThis.fetch globalThis.fetch = fetchMock as typeof globalThis.fetch
@@ -30,6 +35,9 @@ describe('preconnectAnthropicApi', () => {
test('does not fetch when Gemini mode is enabled', async () => { test('does not fetch when Gemini mode is enabled', async () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'gemini',
}))
const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 })))
globalThis.fetch = fetchMock as typeof globalThis.fetch globalThis.fetch = fetchMock as typeof globalThis.fetch
@@ -41,6 +49,9 @@ describe('preconnectAnthropicApi', () => {
test('does not fetch when GitHub mode is enabled', async () => { test('does not fetch when GitHub mode is enabled', async () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'github',
}))
const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 })))
globalThis.fetch = fetchMock as typeof globalThis.fetch globalThis.fetch = fetchMock as typeof globalThis.fetch
@@ -58,6 +69,9 @@ describe('preconnectAnthropicApi', () => {
delete process.env.CLAUDE_CODE_USE_VERTEX delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY delete process.env.CLAUDE_CODE_USE_FOUNDRY
mock.module('./model/providers.js', () => ({
getAPIProvider: () => 'firstParty',
}))
const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 })))
globalThis.fetch = fetchMock as typeof globalThis.fetch globalThis.fetch = fetchMock as typeof globalThis.fetch

View File

@@ -1,31 +1,60 @@
import { afterEach, expect, test } from 'bun:test' import { afterEach, beforeEach, expect, mock, test } from 'bun:test'
import { type MockStorageData = Record<string, unknown>
clearGeminiAccessToken,
readGeminiAccessToken,
saveGeminiAccessToken,
} from './geminiCredentials.ts'
const originalToken = process.env.GEMINI_ACCESS_TOKEN const originalEnv = { ...process.env }
let storageState: MockStorageData = {}
afterEach(() => { async function importFreshModule() {
if (originalToken === undefined) { mock.module('./secureStorage/index.js', () => ({
delete process.env.GEMINI_ACCESS_TOKEN getSecureStorage: () => ({
} else { name: 'mock-secure-storage',
process.env.GEMINI_ACCESS_TOKEN = originalToken read: () => storageState,
} readAsync: async () => storageState,
clearGeminiAccessToken() update: (next: MockStorageData) => {
storageState = next
return { success: true }
},
delete: () => {
storageState = {}
return true
},
}),
}))
return import(`./geminiCredentials.ts?ts=${Date.now()}-${Math.random()}`)
}
beforeEach(() => {
process.env = { ...originalEnv }
storageState = {}
}) })
test('saveGeminiAccessToken stores and reads back the token', () => { afterEach(() => {
process.env = { ...originalEnv }
storageState = {}
mock.restore()
})
test('saveGeminiAccessToken stores and reads back the token', async () => {
const {
readGeminiAccessToken,
saveGeminiAccessToken,
} = await importFreshModule()
const result = saveGeminiAccessToken('token-123') const result = saveGeminiAccessToken('token-123')
expect(result.success).toBe(true) expect(result.success).toBe(true)
expect(readGeminiAccessToken()).toBe('token-123') expect(readGeminiAccessToken()).toBe('token-123')
}) })
test('clearGeminiAccessToken removes the stored token', () => { test('clearGeminiAccessToken removes the stored token', async () => {
const {
clearGeminiAccessToken,
readGeminiAccessToken,
saveGeminiAccessToken,
} = await importFreshModule()
expect(saveGeminiAccessToken('token-123').success).toBe(true) expect(saveGeminiAccessToken('token-123').success).toBe(true)
expect(clearGeminiAccessToken().success).toBe(true) expect(clearGeminiAccessToken().success).toBe(true)
expect(readGeminiAccessToken()).toBeUndefined() expect(readGeminiAccessToken()).toBeUndefined()
}) })

View File

@@ -1,10 +1,5 @@
import { afterEach, expect, test } from 'bun:test' import { afterEach, expect, test } from 'bun:test'
import {
getAPIProvider,
usesAnthropicAccountFlow,
} from './providers.js'
const originalEnv = { const originalEnv = {
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI, CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
@@ -23,6 +18,10 @@ afterEach(() => {
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
}) })
async function importFreshProvidersModule() {
return import(`./providers.js?ts=${Date.now()}-${Math.random()}`)
}
function clearProviderEnv(): void { function clearProviderEnv(): void {
delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
@@ -34,9 +33,12 @@ function clearProviderEnv(): void {
test('first-party provider keeps Anthropic account setup flow enabled', () => { test('first-party provider keeps Anthropic account setup flow enabled', () => {
clearProviderEnv() clearProviderEnv()
return importFreshProvidersModule().then(
({ getAPIProvider, usesAnthropicAccountFlow }) => {
expect(getAPIProvider()).toBe('firstParty') expect(getAPIProvider()).toBe('firstParty')
expect(usesAnthropicAccountFlow()).toBe(true) expect(usesAnthropicAccountFlow()).toBe(true)
},
)
}) })
test.each([ test.each([
@@ -48,19 +50,22 @@ test.each([
['CLAUDE_CODE_USE_FOUNDRY', 'foundry'], ['CLAUDE_CODE_USE_FOUNDRY', 'foundry'],
] as const)( ] as const)(
'%s disables Anthropic account setup flow', '%s disables Anthropic account setup flow',
(envKey, provider) => { async (envKey, provider) => {
clearProviderEnv() clearProviderEnv()
process.env[envKey] = '1' process.env[envKey] = '1'
const { getAPIProvider, usesAnthropicAccountFlow } =
await importFreshProvidersModule()
expect(getAPIProvider()).toBe(provider) expect(getAPIProvider()).toBe(provider)
expect(usesAnthropicAccountFlow()).toBe(false) expect(usesAnthropicAccountFlow()).toBe(false)
}, },
) )
test('GEMINI takes precedence over GitHub when both are set', () => { test('GEMINI takes precedence over GitHub when both are set', async () => {
clearProviderEnv() clearProviderEnv()
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { getAPIProvider } = await importFreshProvidersModule()
expect(getAPIProvider()).toBe('gemini') expect(getAPIProvider()).toBe('gemini')
}) })

View File

@@ -1,13 +1,6 @@
import { afterEach, describe, expect, test } from 'bun:test' import { afterEach, describe, expect, mock, test } from 'bun:test'
import { saveGlobalConfig, type ProviderProfile } from './config.js' import type { ProviderProfile } from './config.js'
import { getAPIProvider } from './model/providers.js'
import {
applyActiveProviderProfileFromConfig,
applyProviderProfileToProcessEnv,
deleteProviderProfile,
getProviderPresetDefaults,
} from './providerProfiles.js'
const originalEnv = { ...process.env } const originalEnv = { ...process.env }
@@ -29,6 +22,7 @@ const RESTORED_KEYS = [
] as const ] as const
afterEach(() => { afterEach(() => {
mock.restore()
for (const key of RESTORED_KEYS) { for (const key of RESTORED_KEYS) {
if (originalEnv[key] === undefined) { if (originalEnv[key] === undefined) {
delete process.env[key] delete process.env[key]
@@ -36,14 +30,6 @@ afterEach(() => {
process.env[key] = originalEnv[key] process.env[key] = originalEnv[key]
} }
} }
saveGlobalConfig(current => ({
...current,
providerProfiles: [],
activeProviderProfileId: undefined,
openaiAdditionalModelOptionsCache: [],
openaiAdditionalModelOptionsCacheByProfile: {},
}))
}) })
function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile { function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile {
@@ -57,10 +43,43 @@ function buildProfile(overrides: Partial<ProviderProfile> = {}): ProviderProfile
} }
} }
async function importFreshProviderModules() {
mock.restore()
let configState = {
providerProfiles: [] as ProviderProfile[],
activeProviderProfileId: undefined as string | undefined,
openaiAdditionalModelOptionsCache: [] as any[],
openaiAdditionalModelOptionsCacheByProfile: {} as Record<string, any[]>,
}
mock.module('./config.js', () => ({
getGlobalConfig: () => configState,
saveGlobalConfig: (
updater: (current: typeof configState) => typeof configState,
) => {
configState = updater(configState)
},
}))
const providerProfiles = await import(
`./providerProfiles.js?ts=${Date.now()}-${Math.random()}`
)
const providers = await import(
`./model/providers.js?ts=${Date.now()}-${Math.random()}`
)
return {
...providerProfiles,
...providers,
}
}
describe('applyProviderProfileToProcessEnv', () => { describe('applyProviderProfileToProcessEnv', () => {
test('openai profile clears competing gemini/github flags', () => { test('openai profile clears competing gemini/github flags', async () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { applyProviderProfileToProcessEnv, getAPIProvider } =
await importFreshProviderModules()
applyProviderProfileToProcessEnv(buildProfile()) applyProviderProfileToProcessEnv(buildProfile())
@@ -70,9 +89,11 @@ describe('applyProviderProfileToProcessEnv', () => {
expect(getAPIProvider()).toBe('openai') expect(getAPIProvider()).toBe('openai')
}) })
test('anthropic profile clears competing gemini/github flags', () => { test('anthropic profile clears competing gemini/github flags', async () => {
process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GEMINI = '1'
process.env.CLAUDE_CODE_USE_GITHUB = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1'
const { applyProviderProfileToProcessEnv, getAPIProvider } =
await importFreshProviderModules()
applyProviderProfileToProcessEnv( applyProviderProfileToProcessEnv(
buildProfile({ buildProfile({
@@ -90,10 +111,12 @@ describe('applyProviderProfileToProcessEnv', () => {
}) })
describe('applyActiveProviderProfileFromConfig', () => { describe('applyActiveProviderProfileFromConfig', () => {
test('does not override explicit startup provider selection', () => { test('does not override explicit startup provider selection', async () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -111,11 +134,13 @@ describe('applyActiveProviderProfileFromConfig', () => {
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
}) })
test('does not override explicit startup selection when profile marker is stale', () => { test('does not override explicit startup selection when profile marker is stale', async () => {
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1' process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1'
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -134,7 +159,7 @@ describe('applyActiveProviderProfileFromConfig', () => {
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
}) })
test('applies active profile when no explicit provider is selected', () => { test('applies active profile when no explicit provider is selected', async () => {
delete process.env.CLAUDE_CODE_USE_OPENAI delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.CLAUDE_CODE_USE_GITHUB
@@ -144,6 +169,8 @@ describe('applyActiveProviderProfileFromConfig', () => {
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
const { applyActiveProviderProfileFromConfig } =
await importFreshProviderModules()
const applied = applyActiveProviderProfileFromConfig({ const applied = applyActiveProviderProfileFromConfig({
providerProfiles: [ providerProfiles: [
@@ -164,8 +191,9 @@ describe('applyActiveProviderProfileFromConfig', () => {
}) })
describe('getProviderPresetDefaults', () => { describe('getProviderPresetDefaults', () => {
test('ollama preset defaults to a local Ollama model', () => { test('ollama preset defaults to a local Ollama model', async () => {
delete process.env.OPENAI_MODEL delete process.env.OPENAI_MODEL
const { getProviderPresetDefaults } = await importFreshProviderModules()
const defaults = getProviderPresetDefaults('ollama') const defaults = getProviderPresetDefaults('ollama')
@@ -175,23 +203,23 @@ describe('getProviderPresetDefaults', () => {
}) })
describe('deleteProviderProfile', () => { describe('deleteProviderProfile', () => {
test('deleting final profile clears provider env when active profile applied it', () => { test('deleting final profile clears provider env when active profile applied it', async () => {
applyProviderProfileToProcessEnv( const {
buildProfile({ addProviderProfile,
id: 'only_profile', deleteProviderProfile,
} =
await importFreshProviderModules()
const profile = addProviderProfile({
name: 'Only Profile',
provider: 'openai',
baseUrl: 'https://api.openai.com/v1', baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o', model: 'gpt-4o',
apiKey: 'sk-test', apiKey: 'sk-test',
}), })
)
saveGlobalConfig(current => ({ expect(profile).not.toBeNull()
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile') const result = deleteProviderProfile(profile!.id)
expect(result.removed).toBe(true) expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined() expect(result.activeProfileId).toBeUndefined()
@@ -215,18 +243,25 @@ describe('deleteProviderProfile', () => {
expect(process.env.ANTHROPIC_API_KEY).toBeUndefined() expect(process.env.ANTHROPIC_API_KEY).toBeUndefined()
}) })
test('deleting final profile preserves explicit startup provider env', () => { test('deleting final profile preserves explicit startup provider env', async () => {
const { addProviderProfile, deleteProviderProfile } =
await importFreshProviderModules()
const profile = addProviderProfile({
name: 'Only Profile',
provider: 'openai',
baseUrl: 'https://api.openai.com/v1',
model: 'gpt-4o',
})
expect(profile).not.toBeNull()
process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = undefined
delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED
process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1'
process.env.OPENAI_MODEL = 'qwen2.5:3b' process.env.OPENAI_MODEL = 'qwen2.5:3b'
saveGlobalConfig(current => ({ const result = deleteProviderProfile(profile!.id)
...current,
providerProfiles: [buildProfile({ id: 'only_profile' })],
activeProviderProfileId: 'only_profile',
}))
const result = deleteProviderProfile('only_profile')
expect(result.removed).toBe(true) expect(result.removed).toBe(true)
expect(result.activeProfileId).toBeUndefined() expect(result.activeProfileId).toBeUndefined()

View File

@@ -1,6 +1,5 @@
import { expect, test, mock, describe, beforeEach, afterEach } from "bun:test"; import { expect, test, mock, describe, beforeEach, afterEach } from "bun:test";
import { getSecureStorage } from "./index.js";
import { linuxSecretStorage } from "./linuxSecretStorage.js"; import { linuxSecretStorage } from "./linuxSecretStorage.js";
import { windowsCredentialStorage } from "./windowsCredentialStorage.js"; import { windowsCredentialStorage } from "./windowsCredentialStorage.js";
import { getSecureStorageServiceName, CREDENTIALS_SERVICE_SUFFIX } from "./macOsKeychainHelpers.js"; import { getSecureStorageServiceName, CREDENTIALS_SERVICE_SUFFIX } from "./macOsKeychainHelpers.js";
@@ -133,24 +132,31 @@ describe("Secure Storage Platform Implementations", () => {
describe("Platform Selection", () => { describe("Platform Selection", () => {
const originalPlatform = process.platform; const originalPlatform = process.platform;
async function importFreshSecureStorage() {
return import(`./index.js?ts=${Date.now()}-${Math.random()}`);
}
afterEach(() => { afterEach(() => {
Object.defineProperty(process, 'platform', { value: originalPlatform }); Object.defineProperty(process, 'platform', { value: originalPlatform });
}); });
test("darwin returns keychain with fallback", () => { test("darwin returns keychain with fallback", async () => {
Object.defineProperty(process, 'platform', { value: 'darwin' }); Object.defineProperty(process, 'platform', { value: 'darwin' });
const { getSecureStorage } = await importFreshSecureStorage();
const storage = getSecureStorage(); const storage = getSecureStorage();
expect(storage.name).toContain("keychain"); expect(storage.name).toContain("keychain");
}); });
test("linux returns libsecret with fallback", () => { test("linux returns libsecret with fallback", async () => {
Object.defineProperty(process, 'platform', { value: 'linux' }); Object.defineProperty(process, 'platform', { value: 'linux' });
const { getSecureStorage } = await importFreshSecureStorage();
const storage = getSecureStorage(); const storage = getSecureStorage();
expect(storage.name).toContain("libsecret"); expect(storage.name).toContain("libsecret");
}); });
test("win32 returns credential-locker with fallback", () => { test("win32 returns credential-locker with fallback", async () => {
Object.defineProperty(process, 'platform', { value: 'win32' }); Object.defineProperty(process, 'platform', { value: 'win32' });
const { getSecureStorage } = await importFreshSecureStorage();
const storage = getSecureStorage(); const storage = getSecureStorage();
expect(storage.name).toContain("credential-locker"); expect(storage.name).toContain("credential-locker");
}); });

View File

@@ -1,6 +1,6 @@
const test = require('node:test'); const test = require('node:test');
const assert = require('node:assert/strict'); const assert = require('node:assert/strict');
const Module = require('node:module'); const { mock } = require('bun:test');
function createStatus(overrides = {}) { function createStatus(overrides = {}) {
return { return {
@@ -30,27 +30,32 @@ function createStatus(overrides = {}) {
function loadExtension() { function loadExtension() {
const extensionPath = require.resolve('./extension'); const extensionPath = require.resolve('./extension');
delete require.cache[extensionPath]; delete require.cache[extensionPath];
mock.module('vscode', () => ({
const originalLoad = Module._load; workspace: {
Module._load = function patchedLoad(request, parent, isMain) { workspaceFolders: [],
if (request === 'vscode') { getConfiguration: () => ({
return { get: (_key, fallback) => fallback,
workspace: {}, }),
window: {}, getWorkspaceFolder: () => null,
env: {}, },
commands: {}, window: {
activeTextEditor: null,
createWebviewPanel: () => ({}),
registerWebviewViewProvider: () => ({ dispose() {} }),
showInformationMessage: async () => undefined,
showErrorMessage: async () => undefined,
},
env: {
openExternal: async () => true,
},
commands: {
registerCommand: () => ({ dispose() {} }),
executeCommand: async () => undefined,
},
Uri: { parse: value => value, file: value => value }, Uri: { parse: value => value, file: value => value },
}; ViewColumn: { Active: 1 },
} }));
return originalLoad.call(this, request, parent, isMain);
};
try {
return require('./extension'); return require('./extension');
} finally {
Module._load = originalLoad;
}
} }
test('renderControlCenterHtml uses the OpenClaude wordmark, status rail, and warm action hierarchy', () => { test('renderControlCenterHtml uses the OpenClaude wordmark, status rail, and warm action hierarchy', () => {