fix: strip comments before scanning for missing imports (#676)
* fix: strip comments before scanning for missing imports
The scanForMissingImports regex matched require() and import() patterns
inside JSDoc comments, causing false-positive missing module detection.
A documented path like `require('./commands/proactive.js')` in a comment
was resolved from the wrong directory, marked as missing, then the global
onResolve handler intercepted ALL imports of that specifier — including
valid ones — replacing them with truthy noop stubs that broke runtime.
Strip block (/* */) and line (//) comments from source before scanning.
* fix: repair 10 pre-existing test failures
- promptIdentity.test.ts: define MACRO global (ISSUES_EXPLAINER etc.)
for test mode where Bun.define build-time replacements aren't active
- context.test.ts: clear OPENAI_MODEL env var in each test — the user's
environment (e.g. OPENAI_MODEL=github_copilot/gpt-5.4) polluted the
provider-qualified lookup, returning wrong context windows
- openclaudePaths.test.ts: set CLAUDE_CONFIG_DIR to force .openclaude
path when ~/.openclaude doesn't exist on the test machine
This commit is contained in:
committed by
GitHub
parent
12dd3755c6
commit
a00b7928de
@@ -9,6 +9,7 @@ import {
|
||||
const originalEnv = {
|
||||
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
||||
CLAUDE_CODE_MAX_OUTPUT_TOKENS: process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS,
|
||||
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
@@ -23,11 +24,17 @@ afterEach(() => {
|
||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS =
|
||||
originalEnv.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
}
|
||||
if (originalEnv.OPENAI_MODEL === undefined) {
|
||||
delete process.env.OPENAI_MODEL
|
||||
} else {
|
||||
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
||||
}
|
||||
})
|
||||
|
||||
test('deepseek-chat uses provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('deepseek-chat')).toBe(128_000)
|
||||
expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({
|
||||
@@ -40,6 +47,7 @@ test('deepseek-chat uses provider-specific context and output caps', () => {
|
||||
test('deepseek-chat clamps oversized max output overrides to the provider limit', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
|
||||
})
|
||||
@@ -47,6 +55,7 @@ test('deepseek-chat clamps oversized max output overrides to the provider limit'
|
||||
test('gpt-4o uses provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
|
||||
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
|
||||
@@ -59,6 +68,7 @@ test('gpt-4o uses provider-specific context and output caps', () => {
|
||||
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
|
||||
})
|
||||
@@ -66,6 +76,7 @@ test('gpt-4o clamps oversized max output overrides to the provider limit', () =>
|
||||
test('gpt-5.4 family uses provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
|
||||
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
|
||||
@@ -98,6 +109,7 @@ test('gpt-5.4 family keeps large max output overrides within provider limits', (
|
||||
test('MiniMax-M2.7 uses explicit provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('MiniMax-M2.7')).toBe(204_800)
|
||||
expect(getModelMaxOutputTokens('MiniMax-M2.7')).toEqual({
|
||||
@@ -110,6 +122,7 @@ test('MiniMax-M2.7 uses explicit provider-specific context and output caps', ()
|
||||
test('unknown openai-compatible models use the 128k fallback window (not 8k, see #635)', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('some-unknown-3p-model')).toBe(128_000)
|
||||
})
|
||||
@@ -117,6 +130,7 @@ test('unknown openai-compatible models use the 128k fallback window (not 8k, see
|
||||
test('MiniMax-M2.5 and M2.1 use explicit provider-specific context and output caps', () => {
|
||||
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
||||
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
|
||||
delete process.env.OPENAI_MODEL
|
||||
|
||||
expect(getContextWindowForModel('MiniMax-M2.5')).toBe(204_800)
|
||||
expect(getContextWindowForModel('MiniMax-M2.5-highspeed')).toBe(204_800)
|
||||
|
||||
@@ -76,7 +76,9 @@ describe('OpenClaude paths', () => {
|
||||
})
|
||||
|
||||
test('local installer uses openclaude wrapper path', async () => {
|
||||
delete process.env.CLAUDE_CONFIG_DIR
|
||||
// Force .openclaude config home so the test doesn't fall back to
|
||||
// ~/.claude when ~/.openclaude doesn't exist on this machine.
|
||||
process.env.CLAUDE_CONFIG_DIR = join(homedir(), '.openclaude')
|
||||
const { getLocalClaudePath } = await importFreshLocalInstaller()
|
||||
|
||||
expect(getLocalClaudePath()).toBe(
|
||||
|
||||
Reference in New Issue
Block a user