From 5ef79546e97ed0b560fcb9965bf6c1013759ef54 Mon Sep 17 00:00:00 2001 From: Kevin Codex Date: Sun, 5 Apr 2026 12:44:54 +0800 Subject: [PATCH] test: stabilize suite and add coverage heatmap (#373) * test: stabilize suite and add coverage heatmap * ci: run full bun test suite in pr checks --- .github/workflows/pr-checks.yml | 3 + .gitignore | 1 + README.md | 46 ++ bin/import-specifier.mjs | 10 +- package.json | 3 + scripts/render-coverage-heatmap.ts | 393 ++++++++++++++++++ src/components/TextInput.tsx | 2 +- src/services/api/withRetry.test.ts | 80 +++- src/services/mcp/officialRegistry.test.ts | 11 + src/tools/WebFetchTool/domainCheck.test.ts | 11 + src/utils/apiPreconnect.test.ts | 14 + src/utils/geminiCredentials.test.ts | 63 ++- src/utils/model/providers.test.ts | 25 +- src/utils/providerProfiles.test.ts | 127 ++++-- .../secureStorage/platformStorage.test.ts | 14 +- .../openclaude-vscode/src/extension.test.js | 49 ++- 16 files changed, 732 insertions(+), 120 deletions(-) create mode 100644 scripts/render-coverage-heatmap.ts diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index e5da7627..f05311a1 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -33,6 +33,9 @@ jobs: - name: Smoke check run: bun run smoke + - name: Full unit test suite + run: bun test --max-concurrency=1 + - name: Provider tests run: bun run test:provider diff --git a/.gitignore b/.gitignore index 4462c51e..636eaf63 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ dist/ !.env.example .openclaude-profile.json reports/ +coverage/ diff --git a/README.md b/README.md index 8f3f8052..8edea3d6 100644 --- a/README.md +++ b/README.md @@ -196,11 +196,56 @@ node dist/cli.mjs Helpful commands: - `bun run dev` +- `bun test` +- `bun run test:coverage` - `bun run smoke` - `bun run doctor:runtime` - `bun run verify:privacy` - focused `bun test ...` runs for the areas you touch +## Testing And Coverage + +OpenClaude uses Bun's built-in test runner for unit tests. + +Run the full unit suite: + +```bash +bun test +``` + +Generate unit test coverage: + +```bash +bun run test:coverage +``` + +Open the visual coverage report: + +```bash +open coverage/index.html +``` + +If you already have `coverage/lcov.info` and only want to rebuild the UI: + +```bash +bun run test:coverage:ui +``` + +Use focused test runs when you only touch one area: + +- `bun run test:provider` +- `bun run test:provider-recommendation` +- `bun test path/to/file.test.ts` + +Recommended contributor validation before opening a PR: + +- `bun run build` +- `bun run smoke` +- `bun run test:coverage` for broader unit coverage when your change affects shared runtime or provider logic +- focused `bun test ...` runs for the files and flows you changed + +Coverage output is written to `coverage/lcov.info`, and OpenClaude also generates a git-activity-style heatmap at `coverage/index.html`. + ## Repository Structure - `src/` - core CLI/runtime @@ -231,6 +276,7 @@ Contributions are welcome. For larger changes, open an issue first so the scope is clear before implementation. Helpful validation commands include: - `bun run build` +- `bun run test:coverage` - `bun run smoke` - focused `bun test ...` runs for touched areas diff --git a/bin/import-specifier.mjs b/bin/import-specifier.mjs index 4fcc5d3c..ca2ef9a3 100644 --- a/bin/import-specifier.mjs +++ b/bin/import-specifier.mjs @@ -1,7 +1,13 @@ -import { join } from 'path' +import { join, win32 } from 'path' import { pathToFileURL } from 'url' export function getDistImportSpecifier(baseDir) { - const distPath = join(baseDir, '..', 'dist', 'cli.mjs') + if (/^[A-Za-z]:\\/.test(baseDir)) { + const distPath = win32.join(baseDir, '..', 'dist', 'cli.mjs') + return `file:///${distPath.replace(/\\/g, '/')}` + } + + const joinImpl = join + const distPath = joinImpl(baseDir, '..', 'dist', 'cli.mjs') return pathToFileURL(distPath).href } diff --git a/package.json b/package.json index a45fdc05..c9370925 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,9 @@ "dev:fast": "bun run profile:fast && bun run dev:ollama:fast", "dev:code": "bun run profile:code && bun run dev:profile", "start": "node dist/cli.mjs", + "test": "bun test", + "test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-dir=coverage --max-concurrency=1 && bun run scripts/render-coverage-heatmap.ts", + "test:coverage:ui": "bun run scripts/render-coverage-heatmap.ts", "test:provider-recommendation": "bun test src/utils/providerRecommendation.test.ts src/utils/providerProfile.test.ts", "typecheck": "tsc --noEmit", "smoke": "bun run build && node dist/cli.mjs --version", diff --git a/scripts/render-coverage-heatmap.ts b/scripts/render-coverage-heatmap.ts new file mode 100644 index 00000000..e556a54b --- /dev/null +++ b/scripts/render-coverage-heatmap.ts @@ -0,0 +1,393 @@ +import { mkdir, readFile, writeFile } from 'fs/promises' +import { dirname, resolve } from 'path' + +type FileCoverage = { + path: string + found: number + hit: number + chunks: number[] +} + +type DirectoryCoverage = { + path: string + found: number + hit: number +} + +const LCOV_PATH = resolve(process.cwd(), 'coverage/lcov.info') +const HTML_PATH = resolve(process.cwd(), 'coverage/index.html') +const CHUNK_COUNT = 20 + +function escapeHtml(value: string): string { + return value + .replaceAll('&', '&') + .replaceAll('<', '<') + .replaceAll('>', '>') + .replaceAll('"', '"') +} + +function bucketColor(ratio: number): string { + if (ratio >= 0.9) return '#166534' + if (ratio >= 0.75) return '#15803d' + if (ratio >= 0.5) return '#65a30d' + if (ratio > 0) return '#a3a3a3' + return '#262626' +} + +function coverageLabel(ratio: number): string { + return `${Math.round(ratio * 100)}%` +} + +function coverageRatio(found: number, hit: number): number { + return found === 0 ? 0 : hit / found +} + +function bucketGlyph(ratio: number): string { + if (ratio >= 0.9) return '█' + if (ratio >= 0.75) return '▓' + if (ratio >= 0.5) return '▒' + if (ratio > 0) return '░' + return '·' +} + +function terminalBar(chunks: number[]): string { + return chunks.map(bucketGlyph).join('') +} + +function summarizeDirectories(files: FileCoverage[]): DirectoryCoverage[] { + const dirs = new Map() + + for (const file of files) { + const dir = + file.path.includes('/') ? file.path.slice(0, file.path.lastIndexOf('/')) : '.' + const current = dirs.get(dir) ?? { path: dir, found: 0, hit: 0 } + current.found += file.found + current.hit += file.hit + dirs.set(dir, current) + } + + return [...dirs.values()].sort((a, b) => { + const left = coverageRatio(a.found, a.hit) + const right = coverageRatio(b.found, b.hit) + if (right !== left) return right - left + return b.found - a.found + }) +} + +function buildTerminalReport(files: FileCoverage[]): string { + const totalFound = files.reduce((sum, file) => sum + file.found, 0) + const totalHit = files.reduce((sum, file) => sum + file.hit, 0) + const totalRatio = coverageRatio(totalFound, totalHit) + const overallChunks = new Array(CHUNK_COUNT).fill(totalRatio) + const topDirectories = summarizeDirectories(files) + .filter(dir => dir.found > 0) + .slice(0, 8) + const lowestFiles = [...files] + .filter(file => file.found >= 20) + .sort((a, b) => { + const left = coverageRatio(a.found, a.hit) + const right = coverageRatio(b.found, b.hit) + if (left !== right) return left - right + return b.found - a.found + }) + .slice(0, 10) + + const lines = [ + '', + 'Coverage Activity', + `${terminalBar(overallChunks)} ${coverageLabel(totalRatio)} ${totalHit}/${totalFound} lines ${files.length} files`, + '', + 'Top Directories', + ] + + for (const dir of topDirectories) { + const ratio = coverageRatio(dir.found, dir.hit) + lines.push( + `${terminalBar(new Array(12).fill(ratio))} ${coverageLabel(ratio).padStart(4)} ${String(dir.hit).padStart(5)}/${String(dir.found).padEnd(5)} ${dir.path}`, + ) + } + + lines.push('', 'Lowest Coverage Files') + + for (const file of lowestFiles) { + const ratio = coverageRatio(file.found, file.hit) + lines.push( + `${terminalBar(file.chunks).padEnd(CHUNK_COUNT)} ${coverageLabel(ratio).padStart(4)} ${String(file.hit).padStart(5)}/${String(file.found).padEnd(5)} ${file.path}`, + ) + } + + lines.push('', `HTML report: ${HTML_PATH}`) + return lines.join('\n') +} + +function parseLcov(content: string): FileCoverage[] { + const files: FileCoverage[] = [] + const sections = content.split('end_of_record') + + for (const rawSection of sections) { + const section = rawSection.trim() + if (!section) continue + + const lines = section.split('\n') + let filePath = '' + const lineHits = new Map() + + for (const line of lines) { + if (line.startsWith('SF:')) { + filePath = line.slice(3).trim() + } else if (line.startsWith('DA:')) { + const [lineNumberText, hitText] = line.slice(3).split(',') + const lineNumber = Number(lineNumberText) + const hits = Number(hitText) + if (Number.isFinite(lineNumber) && Number.isFinite(hits)) { + lineHits.set(lineNumber, hits) + } + } + } + + if (!filePath || lineHits.size === 0) continue + + const ordered = [...lineHits.entries()].sort((a, b) => a[0] - b[0]) + const found = ordered.length + const hit = ordered.filter(([, hits]) => hits > 0).length + const chunkSize = Math.max(1, Math.ceil(found / CHUNK_COUNT)) + const chunks: number[] = [] + + for (let index = 0; index < found; index += chunkSize) { + const slice = ordered.slice(index, index + chunkSize) + const covered = slice.filter(([, hits]) => hits > 0).length + chunks.push(slice.length === 0 ? 0 : covered / slice.length) + } + + while (chunks.length < CHUNK_COUNT) { + chunks.push(0) + } + + files.push({ + path: filePath, + found, + hit, + chunks: chunks.slice(0, CHUNK_COUNT), + }) + } + + return files.sort((a, b) => { + const left = a.found === 0 ? 0 : a.hit / a.found + const right = b.found === 0 ? 0 : b.hit / b.found + if (right !== left) return right - left + return a.path.localeCompare(b.path) + }) +} + +function buildHtml(files: FileCoverage[]): string { + const totalFound = files.reduce((sum, file) => sum + file.found, 0) + const totalHit = files.reduce((sum, file) => sum + file.hit, 0) + const totalRatio = totalFound === 0 ? 0 : totalHit / totalFound + + const cards = [ + ['Files', String(files.length)], + ['Covered Lines', `${totalHit}/${totalFound}`], + ['Line Coverage', coverageLabel(totalRatio)], + ] + + const rows = files + .map(file => { + const ratio = file.found === 0 ? 0 : file.hit / file.found + const squares = file.chunks + .map( + (chunk, index) => + ``, + ) + .join('') + + return ` + + ${escapeHtml(file.path)} + ${coverageLabel(ratio)} + ${file.hit}/${file.found} + ${squares} + + ` + }) + .join('') + + const summary = cards + .map( + ([label, value]) => ` +
+
${escapeHtml(label)}
+
${escapeHtml(value)}
+
+ `, + ) + .join('') + + return ` + + + + + OpenClaude Coverage + + + +
+

Coverage Activity

+

Git-style heatmap generated from coverage/lcov.info

+
${summary}
+
+ + + + + + + + + + ${rows} +
FileCoverageLinesActivity
+
+
+ Less +
+ + + + + +
+ More +
+
+ +` +} + +async function main() { + const content = await readFile(LCOV_PATH, 'utf8') + const files = parseLcov(content) + const html = buildHtml(files) + await mkdir(dirname(HTML_PATH), { recursive: true }) + await writeFile(HTML_PATH, html, 'utf8') + console.log(buildTerminalReport(files)) + console.log(`coverage heatmap written to ${HTML_PATH}`) +} + +await main() diff --git a/src/components/TextInput.tsx b/src/components/TextInput.tsx index f81c053f..f7c6b2d4 100644 --- a/src/components/TextInput.tsx +++ b/src/components/TextInput.tsx @@ -40,7 +40,7 @@ export default function TextInput(props: Props): React.ReactNode { // Hoisted to mount-time — this component re-renders on every keystroke. const accessibilityEnabled = useMemo(() => isEnvTruthy(process.env.CLAUDE_CODE_ACCESSIBILITY), []); const settings = useSettings(); - const reducedMotion = settings.prefersReducedMotion ?? false; + const reducedMotion = settings?.prefersReducedMotion ?? false; const voiceState = feature('VOICE_MODE') ? // biome-ignore lint/correctness/useHookAtTopLevel: feature() is a compile-time constant useVoiceState(s => s.voiceState) : 'idle' as const; diff --git a/src/services/api/withRetry.test.ts b/src/services/api/withRetry.test.ts index 2bf57555..d9a26d75 100644 --- a/src/services/api/withRetry.test.ts +++ b/src/services/api/withRetry.test.ts @@ -1,5 +1,4 @@ -import { describe, expect, test, afterEach } from 'bun:test' -import { getRateLimitResetDelayMs, parseOpenAIDuration } from './withRetry.js' +import { afterEach, describe, expect, mock, test } from 'bun:test' import { APIError } from '@anthropic-ai/sdk' // Helper to build a mock APIError with specific headers @@ -28,42 +27,71 @@ afterEach(() => { if (originalEnv[key] === undefined) delete process.env[key] else process.env[key] = originalEnv[key] } + mock.restore() }) +async function importFreshWithRetryModule( + provider: + | 'firstParty' + | 'openai' + | 'github' + | 'bedrock' + | 'vertex' + | 'gemini' + | 'codex' + | 'foundry' = 'firstParty', +) { + mock.restore() + mock.module('src/utils/model/providers.js', () => ({ + getAPIProvider: () => provider, + getAPIProviderForStatsig: () => provider, + })) + return import(`./withRetry.js?ts=${Date.now()}-${Math.random()}`) +} + // --- parseOpenAIDuration --- describe('parseOpenAIDuration', () => { - test('parses seconds: "1s" → 1000', () => { + test('parses seconds: "1s" → 1000', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('1s')).toBe(1000) }) - test('parses minutes+seconds: "6m0s" → 360000', () => { + test('parses minutes+seconds: "6m0s" → 360000', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('6m0s')).toBe(360000) }) - test('parses hours+minutes+seconds: "1h30m0s" → 5400000', () => { + test('parses hours+minutes+seconds: "1h30m0s" → 5400000', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('1h30m0s')).toBe(5400000) }) - test('parses milliseconds: "500ms" → 500', () => { + test('parses milliseconds: "500ms" → 500', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('500ms')).toBe(500) }) - test('parses minutes only: "2m" → 120000', () => { + test('parses minutes only: "2m" → 120000', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('2m')).toBe(120000) }) - test('returns null for empty string', () => { + test('returns null for empty string', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('')).toBeNull() }) - test('returns null for unrecognized format', () => { + test('returns null for unrecognized format', async () => { + const { parseOpenAIDuration } = await importFreshWithRetryModule() expect(parseOpenAIDuration('invalid')).toBeNull() }) }) // --- getRateLimitResetDelayMs --- describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => { - test('reads anthropic-ratelimit-unified-reset Unix timestamp', () => { + test('reads anthropic-ratelimit-unified-reset Unix timestamp', async () => { + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('firstParty') const futureUnixSec = Math.floor(Date.now() / 1000) + 60 const error = makeError({ 'anthropic-ratelimit-unified-reset': String(futureUnixSec), @@ -74,12 +102,16 @@ describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => { expect(delay!).toBeLessThanOrEqual(60_000) }) - test('returns null when header absent', () => { + test('returns null when header absent', async () => { + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('firstParty') const error = makeError({}) expect(getRateLimitResetDelayMs(error)).toBeNull() }) - test('returns null when reset is in the past', () => { + test('returns null when reset is in the past', async () => { + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('firstParty') const pastUnixSec = Math.floor(Date.now() / 1000) - 10 const error = makeError({ 'anthropic-ratelimit-unified-reset': String(pastUnixSec), @@ -89,15 +121,19 @@ describe('getRateLimitResetDelayMs - Anthropic (firstParty)', () => { }) describe('getRateLimitResetDelayMs - OpenAI provider', () => { - test('reads x-ratelimit-reset-requests duration string', () => { + test('reads x-ratelimit-reset-requests duration string', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('openai') const error = makeError({ 'x-ratelimit-reset-requests': '30s' }) const delay = getRateLimitResetDelayMs(error) expect(delay).toBe(30_000) }) - test('reads x-ratelimit-reset-tokens and picks the larger delay', () => { + test('reads x-ratelimit-reset-tokens and picks the larger delay', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('openai') const error = makeError({ 'x-ratelimit-reset-requests': '10s', 'x-ratelimit-reset-tokens': '1m0s', @@ -107,29 +143,37 @@ describe('getRateLimitResetDelayMs - OpenAI provider', () => { expect(delay).toBe(60_000) }) - test('returns null when no openai rate limit headers present', () => { + test('returns null when no openai rate limit headers present', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('openai') const error = makeError({}) expect(getRateLimitResetDelayMs(error)).toBeNull() }) - test('works for github provider too', () => { + test('works for github provider too', async () => { process.env.CLAUDE_CODE_USE_GITHUB = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('github') const error = makeError({ 'x-ratelimit-reset-requests': '5s' }) expect(getRateLimitResetDelayMs(error)).toBe(5_000) }) }) describe('getRateLimitResetDelayMs - providers without reset headers', () => { - test('returns null for bedrock', () => { + test('returns null for bedrock', async () => { process.env.CLAUDE_CODE_USE_BEDROCK = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('bedrock') const error = makeError({ 'anthropic-ratelimit-unified-reset': String(Math.floor(Date.now() / 1000) + 60) }) // Bedrock doesn't use this header — should still return null expect(getRateLimitResetDelayMs(error)).toBeNull() }) - test('returns null for vertex', () => { + test('returns null for vertex', async () => { process.env.CLAUDE_CODE_USE_VERTEX = '1' + const { getRateLimitResetDelayMs } = + await importFreshWithRetryModule('vertex') const error = makeError({}) expect(getRateLimitResetDelayMs(error)).toBeNull() }) diff --git a/src/services/mcp/officialRegistry.test.ts b/src/services/mcp/officialRegistry.test.ts index f48245b8..75ab4f04 100644 --- a/src/services/mcp/officialRegistry.test.ts +++ b/src/services/mcp/officialRegistry.test.ts @@ -4,6 +4,7 @@ import axios from 'axios' const originalEnv = { ...process.env } async function importFreshModule() { + mock.restore() return import(`./officialRegistry.ts?ts=${Date.now()}-${Math.random()}`) } @@ -13,11 +14,15 @@ beforeEach(() => { afterEach(() => { process.env = { ...originalEnv } + mock.restore() }) describe('prefetchOfficialMcpUrls', () => { test('does not fetch registry when using OpenAI mode', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'openai', + })) const getSpy = mock(() => Promise.resolve({ data: { servers: [] } })) axios.get = getSpy as typeof axios.get @@ -29,6 +34,9 @@ describe('prefetchOfficialMcpUrls', () => { test('does not fetch registry when using Gemini mode', async () => { process.env.CLAUDE_CODE_USE_GEMINI = '1' + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'gemini', + })) const getSpy = mock(() => Promise.resolve({ data: { servers: [] } })) axios.get = getSpy as typeof axios.get @@ -43,6 +51,9 @@ describe('prefetchOfficialMcpUrls', () => { delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GITHUB + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'firstParty', + })) const getSpy = mock(() => Promise.resolve({ data: { diff --git a/src/tools/WebFetchTool/domainCheck.test.ts b/src/tools/WebFetchTool/domainCheck.test.ts index 42de14e5..15d3bc4c 100644 --- a/src/tools/WebFetchTool/domainCheck.test.ts +++ b/src/tools/WebFetchTool/domainCheck.test.ts @@ -4,6 +4,7 @@ import axios from 'axios' const originalEnv = { ...process.env } async function importFreshModule() { + mock.restore() return import(`./utils.ts?ts=${Date.now()}-${Math.random()}`) } @@ -13,11 +14,15 @@ beforeEach(() => { afterEach(() => { process.env = { ...originalEnv } + mock.restore() }) describe('checkDomainBlocklist', () => { test('returns allowed without API call in OpenAI mode', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'openai', + })) const getSpy = mock(() => Promise.resolve({ status: 200, data: { can_fetch: true } }), ) @@ -32,6 +37,9 @@ describe('checkDomainBlocklist', () => { test('returns allowed without API call in Gemini mode', async () => { process.env.CLAUDE_CODE_USE_GEMINI = '1' + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'gemini', + })) const getSpy = mock(() => Promise.resolve({ status: 200, data: { can_fetch: true } }), ) @@ -49,6 +57,9 @@ describe('checkDomainBlocklist', () => { delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GITHUB + mock.module('../../utils/model/providers.js', () => ({ + getAPIProvider: () => 'firstParty', + })) const getSpy = mock(() => Promise.resolve({ status: 200, data: { can_fetch: true } }), ) diff --git a/src/utils/apiPreconnect.test.ts b/src/utils/apiPreconnect.test.ts index 1cce48c6..5950884a 100644 --- a/src/utils/apiPreconnect.test.ts +++ b/src/utils/apiPreconnect.test.ts @@ -4,6 +4,7 @@ const originalEnv = { ...process.env } const originalFetch = globalThis.fetch async function importFreshModule() { + mock.restore() return import(`./apiPreconnect.ts?ts=${Date.now()}-${Math.random()}`) } @@ -14,11 +15,15 @@ beforeEach(() => { afterEach(() => { process.env = { ...originalEnv } globalThis.fetch = originalFetch + mock.restore() }) describe('preconnectAnthropicApi', () => { test('does not fetch when OpenAI mode is enabled', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' + mock.module('./model/providers.js', () => ({ + getAPIProvider: () => 'openai', + })) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) globalThis.fetch = fetchMock as typeof globalThis.fetch @@ -30,6 +35,9 @@ describe('preconnectAnthropicApi', () => { test('does not fetch when Gemini mode is enabled', async () => { process.env.CLAUDE_CODE_USE_GEMINI = '1' + mock.module('./model/providers.js', () => ({ + getAPIProvider: () => 'gemini', + })) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) globalThis.fetch = fetchMock as typeof globalThis.fetch @@ -41,6 +49,9 @@ describe('preconnectAnthropicApi', () => { test('does not fetch when GitHub mode is enabled', async () => { process.env.CLAUDE_CODE_USE_GITHUB = '1' + mock.module('./model/providers.js', () => ({ + getAPIProvider: () => 'github', + })) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) globalThis.fetch = fetchMock as typeof globalThis.fetch @@ -58,6 +69,9 @@ describe('preconnectAnthropicApi', () => { delete process.env.CLAUDE_CODE_USE_VERTEX delete process.env.CLAUDE_CODE_USE_FOUNDRY + mock.module('./model/providers.js', () => ({ + getAPIProvider: () => 'firstParty', + })) const fetchMock = mock(() => Promise.resolve(new Response(null, { status: 200 }))) globalThis.fetch = fetchMock as typeof globalThis.fetch diff --git a/src/utils/geminiCredentials.test.ts b/src/utils/geminiCredentials.test.ts index 87c5e4ca..9837574b 100644 --- a/src/utils/geminiCredentials.test.ts +++ b/src/utils/geminiCredentials.test.ts @@ -1,31 +1,60 @@ -import { afterEach, expect, test } from 'bun:test' +import { afterEach, beforeEach, expect, mock, test } from 'bun:test' -import { - clearGeminiAccessToken, - readGeminiAccessToken, - saveGeminiAccessToken, -} from './geminiCredentials.ts' +type MockStorageData = Record -const originalToken = process.env.GEMINI_ACCESS_TOKEN +const originalEnv = { ...process.env } +let storageState: MockStorageData = {} -afterEach(() => { - if (originalToken === undefined) { - delete process.env.GEMINI_ACCESS_TOKEN - } else { - process.env.GEMINI_ACCESS_TOKEN = originalToken - } - clearGeminiAccessToken() +async function importFreshModule() { + mock.module('./secureStorage/index.js', () => ({ + getSecureStorage: () => ({ + name: 'mock-secure-storage', + read: () => storageState, + readAsync: async () => storageState, + update: (next: MockStorageData) => { + storageState = next + return { success: true } + }, + delete: () => { + storageState = {} + return true + }, + }), + })) + + return import(`./geminiCredentials.ts?ts=${Date.now()}-${Math.random()}`) +} + +beforeEach(() => { + process.env = { ...originalEnv } + storageState = {} }) -test('saveGeminiAccessToken stores and reads back the token', () => { +afterEach(() => { + process.env = { ...originalEnv } + storageState = {} + mock.restore() +}) + +test('saveGeminiAccessToken stores and reads back the token', async () => { + const { + readGeminiAccessToken, + saveGeminiAccessToken, + } = await importFreshModule() + const result = saveGeminiAccessToken('token-123') expect(result.success).toBe(true) expect(readGeminiAccessToken()).toBe('token-123') }) -test('clearGeminiAccessToken removes the stored token', () => { +test('clearGeminiAccessToken removes the stored token', async () => { + const { + clearGeminiAccessToken, + readGeminiAccessToken, + saveGeminiAccessToken, + } = await importFreshModule() + expect(saveGeminiAccessToken('token-123').success).toBe(true) expect(clearGeminiAccessToken().success).toBe(true) expect(readGeminiAccessToken()).toBeUndefined() }) - diff --git a/src/utils/model/providers.test.ts b/src/utils/model/providers.test.ts index ea03454f..272ed17b 100644 --- a/src/utils/model/providers.test.ts +++ b/src/utils/model/providers.test.ts @@ -1,10 +1,5 @@ import { afterEach, expect, test } from 'bun:test' -import { - getAPIProvider, - usesAnthropicAccountFlow, -} from './providers.js' - const originalEnv = { CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, @@ -23,6 +18,10 @@ afterEach(() => { process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY }) +async function importFreshProvidersModule() { + return import(`./providers.js?ts=${Date.now()}-${Math.random()}`) +} + function clearProviderEnv(): void { delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GITHUB @@ -34,9 +33,12 @@ function clearProviderEnv(): void { test('first-party provider keeps Anthropic account setup flow enabled', () => { clearProviderEnv() - - expect(getAPIProvider()).toBe('firstParty') - expect(usesAnthropicAccountFlow()).toBe(true) + return importFreshProvidersModule().then( + ({ getAPIProvider, usesAnthropicAccountFlow }) => { + expect(getAPIProvider()).toBe('firstParty') + expect(usesAnthropicAccountFlow()).toBe(true) + }, + ) }) test.each([ @@ -48,19 +50,22 @@ test.each([ ['CLAUDE_CODE_USE_FOUNDRY', 'foundry'], ] as const)( '%s disables Anthropic account setup flow', - (envKey, provider) => { + async (envKey, provider) => { clearProviderEnv() process.env[envKey] = '1' + const { getAPIProvider, usesAnthropicAccountFlow } = + await importFreshProvidersModule() expect(getAPIProvider()).toBe(provider) expect(usesAnthropicAccountFlow()).toBe(false) }, ) -test('GEMINI takes precedence over GitHub when both are set', () => { +test('GEMINI takes precedence over GitHub when both are set', async () => { clearProviderEnv() process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1' + const { getAPIProvider } = await importFreshProvidersModule() expect(getAPIProvider()).toBe('gemini') }) diff --git a/src/utils/providerProfiles.test.ts b/src/utils/providerProfiles.test.ts index 877f923f..23718423 100644 --- a/src/utils/providerProfiles.test.ts +++ b/src/utils/providerProfiles.test.ts @@ -1,13 +1,6 @@ -import { afterEach, describe, expect, test } from 'bun:test' +import { afterEach, describe, expect, mock, test } from 'bun:test' -import { saveGlobalConfig, type ProviderProfile } from './config.js' -import { getAPIProvider } from './model/providers.js' -import { - applyActiveProviderProfileFromConfig, - applyProviderProfileToProcessEnv, - deleteProviderProfile, - getProviderPresetDefaults, -} from './providerProfiles.js' +import type { ProviderProfile } from './config.js' const originalEnv = { ...process.env } @@ -29,6 +22,7 @@ const RESTORED_KEYS = [ ] as const afterEach(() => { + mock.restore() for (const key of RESTORED_KEYS) { if (originalEnv[key] === undefined) { delete process.env[key] @@ -36,14 +30,6 @@ afterEach(() => { process.env[key] = originalEnv[key] } } - - saveGlobalConfig(current => ({ - ...current, - providerProfiles: [], - activeProviderProfileId: undefined, - openaiAdditionalModelOptionsCache: [], - openaiAdditionalModelOptionsCacheByProfile: {}, - })) }) function buildProfile(overrides: Partial = {}): ProviderProfile { @@ -57,10 +43,43 @@ function buildProfile(overrides: Partial = {}): ProviderProfile } } +async function importFreshProviderModules() { + mock.restore() + let configState = { + providerProfiles: [] as ProviderProfile[], + activeProviderProfileId: undefined as string | undefined, + openaiAdditionalModelOptionsCache: [] as any[], + openaiAdditionalModelOptionsCacheByProfile: {} as Record, + } + + mock.module('./config.js', () => ({ + getGlobalConfig: () => configState, + saveGlobalConfig: ( + updater: (current: typeof configState) => typeof configState, + ) => { + configState = updater(configState) + }, + })) + + const providerProfiles = await import( + `./providerProfiles.js?ts=${Date.now()}-${Math.random()}` + ) + const providers = await import( + `./model/providers.js?ts=${Date.now()}-${Math.random()}` + ) + + return { + ...providerProfiles, + ...providers, + } +} + describe('applyProviderProfileToProcessEnv', () => { - test('openai profile clears competing gemini/github flags', () => { + test('openai profile clears competing gemini/github flags', async () => { process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1' + const { applyProviderProfileToProcessEnv, getAPIProvider } = + await importFreshProviderModules() applyProviderProfileToProcessEnv(buildProfile()) @@ -70,9 +89,11 @@ describe('applyProviderProfileToProcessEnv', () => { expect(getAPIProvider()).toBe('openai') }) - test('anthropic profile clears competing gemini/github flags', () => { + test('anthropic profile clears competing gemini/github flags', async () => { process.env.CLAUDE_CODE_USE_GEMINI = '1' process.env.CLAUDE_CODE_USE_GITHUB = '1' + const { applyProviderProfileToProcessEnv, getAPIProvider } = + await importFreshProviderModules() applyProviderProfileToProcessEnv( buildProfile({ @@ -90,10 +111,12 @@ describe('applyProviderProfileToProcessEnv', () => { }) describe('applyActiveProviderProfileFromConfig', () => { - test('does not override explicit startup provider selection', () => { + test('does not override explicit startup provider selection', async () => { process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_MODEL = 'qwen2.5:3b' + const { applyActiveProviderProfileFromConfig } = + await importFreshProviderModules() const applied = applyActiveProviderProfileFromConfig({ providerProfiles: [ @@ -111,11 +134,13 @@ describe('applyActiveProviderProfileFromConfig', () => { expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') }) - test('does not override explicit startup selection when profile marker is stale', () => { + test('does not override explicit startup selection when profile marker is stale', async () => { process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = '1' process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_MODEL = 'qwen2.5:3b' + const { applyActiveProviderProfileFromConfig } = + await importFreshProviderModules() const applied = applyActiveProviderProfileFromConfig({ providerProfiles: [ @@ -134,7 +159,7 @@ describe('applyActiveProviderProfileFromConfig', () => { expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b') }) - test('applies active profile when no explicit provider is selected', () => { + test('applies active profile when no explicit provider is selected', async () => { delete process.env.CLAUDE_CODE_USE_OPENAI delete process.env.CLAUDE_CODE_USE_GEMINI delete process.env.CLAUDE_CODE_USE_GITHUB @@ -144,6 +169,8 @@ describe('applyActiveProviderProfileFromConfig', () => { process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_MODEL = 'qwen2.5:3b' + const { applyActiveProviderProfileFromConfig } = + await importFreshProviderModules() const applied = applyActiveProviderProfileFromConfig({ providerProfiles: [ @@ -164,8 +191,9 @@ describe('applyActiveProviderProfileFromConfig', () => { }) describe('getProviderPresetDefaults', () => { - test('ollama preset defaults to a local Ollama model', () => { + test('ollama preset defaults to a local Ollama model', async () => { delete process.env.OPENAI_MODEL + const { getProviderPresetDefaults } = await importFreshProviderModules() const defaults = getProviderPresetDefaults('ollama') @@ -175,23 +203,23 @@ describe('getProviderPresetDefaults', () => { }) describe('deleteProviderProfile', () => { - test('deleting final profile clears provider env when active profile applied it', () => { - applyProviderProfileToProcessEnv( - buildProfile({ - id: 'only_profile', - baseUrl: 'https://api.openai.com/v1', - model: 'gpt-4o', - apiKey: 'sk-test', - }), - ) + test('deleting final profile clears provider env when active profile applied it', async () => { + const { + addProviderProfile, + deleteProviderProfile, + } = + await importFreshProviderModules() + const profile = addProviderProfile({ + name: 'Only Profile', + provider: 'openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + apiKey: 'sk-test', + }) - saveGlobalConfig(current => ({ - ...current, - providerProfiles: [buildProfile({ id: 'only_profile' })], - activeProviderProfileId: 'only_profile', - })) + expect(profile).not.toBeNull() - const result = deleteProviderProfile('only_profile') + const result = deleteProviderProfile(profile!.id) expect(result.removed).toBe(true) expect(result.activeProfileId).toBeUndefined() @@ -215,18 +243,25 @@ describe('deleteProviderProfile', () => { expect(process.env.ANTHROPIC_API_KEY).toBeUndefined() }) - test('deleting final profile preserves explicit startup provider env', () => { + test('deleting final profile preserves explicit startup provider env', async () => { + const { addProviderProfile, deleteProviderProfile } = + await importFreshProviderModules() + const profile = addProviderProfile({ + name: 'Only Profile', + provider: 'openai', + baseUrl: 'https://api.openai.com/v1', + model: 'gpt-4o', + }) + + expect(profile).not.toBeNull() + + process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED = undefined + delete process.env.CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED process.env.CLAUDE_CODE_USE_OPENAI = '1' process.env.OPENAI_BASE_URL = 'http://localhost:11434/v1' process.env.OPENAI_MODEL = 'qwen2.5:3b' - saveGlobalConfig(current => ({ - ...current, - providerProfiles: [buildProfile({ id: 'only_profile' })], - activeProviderProfileId: 'only_profile', - })) - - const result = deleteProviderProfile('only_profile') + const result = deleteProviderProfile(profile!.id) expect(result.removed).toBe(true) expect(result.activeProfileId).toBeUndefined() diff --git a/src/utils/secureStorage/platformStorage.test.ts b/src/utils/secureStorage/platformStorage.test.ts index 059e2035..6b835243 100644 --- a/src/utils/secureStorage/platformStorage.test.ts +++ b/src/utils/secureStorage/platformStorage.test.ts @@ -1,6 +1,5 @@ import { expect, test, mock, describe, beforeEach, afterEach } from "bun:test"; -import { getSecureStorage } from "./index.js"; import { linuxSecretStorage } from "./linuxSecretStorage.js"; import { windowsCredentialStorage } from "./windowsCredentialStorage.js"; import { getSecureStorageServiceName, CREDENTIALS_SERVICE_SUFFIX } from "./macOsKeychainHelpers.js"; @@ -133,24 +132,31 @@ describe("Secure Storage Platform Implementations", () => { describe("Platform Selection", () => { const originalPlatform = process.platform; + async function importFreshSecureStorage() { + return import(`./index.js?ts=${Date.now()}-${Math.random()}`); + } + afterEach(() => { Object.defineProperty(process, 'platform', { value: originalPlatform }); }); - test("darwin returns keychain with fallback", () => { + test("darwin returns keychain with fallback", async () => { Object.defineProperty(process, 'platform', { value: 'darwin' }); + const { getSecureStorage } = await importFreshSecureStorage(); const storage = getSecureStorage(); expect(storage.name).toContain("keychain"); }); - test("linux returns libsecret with fallback", () => { + test("linux returns libsecret with fallback", async () => { Object.defineProperty(process, 'platform', { value: 'linux' }); + const { getSecureStorage } = await importFreshSecureStorage(); const storage = getSecureStorage(); expect(storage.name).toContain("libsecret"); }); - test("win32 returns credential-locker with fallback", () => { + test("win32 returns credential-locker with fallback", async () => { Object.defineProperty(process, 'platform', { value: 'win32' }); + const { getSecureStorage } = await importFreshSecureStorage(); const storage = getSecureStorage(); expect(storage.name).toContain("credential-locker"); }); diff --git a/vscode-extension/openclaude-vscode/src/extension.test.js b/vscode-extension/openclaude-vscode/src/extension.test.js index 474ed715..2c266280 100644 --- a/vscode-extension/openclaude-vscode/src/extension.test.js +++ b/vscode-extension/openclaude-vscode/src/extension.test.js @@ -1,6 +1,6 @@ const test = require('node:test'); const assert = require('node:assert/strict'); -const Module = require('node:module'); +const { mock } = require('bun:test'); function createStatus(overrides = {}) { return { @@ -30,27 +30,32 @@ function createStatus(overrides = {}) { function loadExtension() { const extensionPath = require.resolve('./extension'); delete require.cache[extensionPath]; - - const originalLoad = Module._load; - Module._load = function patchedLoad(request, parent, isMain) { - if (request === 'vscode') { - return { - workspace: {}, - window: {}, - env: {}, - commands: {}, - Uri: { parse: value => value, file: value => value }, - }; - } - - return originalLoad.call(this, request, parent, isMain); - }; - - try { - return require('./extension'); - } finally { - Module._load = originalLoad; - } + mock.module('vscode', () => ({ + workspace: { + workspaceFolders: [], + getConfiguration: () => ({ + get: (_key, fallback) => fallback, + }), + getWorkspaceFolder: () => null, + }, + window: { + activeTextEditor: null, + createWebviewPanel: () => ({}), + registerWebviewViewProvider: () => ({ dispose() {} }), + showInformationMessage: async () => undefined, + showErrorMessage: async () => undefined, + }, + env: { + openExternal: async () => true, + }, + commands: { + registerCommand: () => ({ dispose() {} }), + executeCommand: async () => undefined, + }, + Uri: { parse: value => value, file: value => value }, + ViewColumn: { Active: 1 }, + })); + return require('./extension'); } test('renderControlCenterHtml uses the OpenClaude wordmark, status rail, and warm action hierarchy', () => {