Files
orcs-code/src/utils/context.test.ts
gnanam1990 2c6ec0119e fix: prevent keyboard freeze when MCP notification effects fire
React 19 requires `supportsMicrotasks: true` in the reconciler host
config so it can flush state updates from passive effects via
queueMicrotask. Without this, state updates triggered inside
useMcpConnectivityStatus were silently dropped, corrupting React's
internal executionContext and causing all keyboard input to freeze
after the "N MCP server(s) need auth" notification appeared.

Root cause (three-part fix):

1. reconciler.ts: declare supportsMicrotasks + scheduleMicrotask so
   React 19 schedules passive-effect flushes correctly.

2. useMcpConnectivityStatus.tsx: wrap the MCP auth notification effect
   in try/catch so any unexpected throw does not propagate into
   flushPassiveEffects and permanently corrupt executionContext.

3. notifications.tsx: wrap addNotification, removeNotification, and
   processQueue in try/catch for the same reason — these are called
   from 12+ notification hooks across passive effects.

Also fixes a pre-existing test isolation bug in context.test.ts where
assigning `undefined` to process.env produced the string "undefined",
polluting the env for subsequent test files.

Resolves: #169, #205, #77
2026-04-03 07:41:53 +05:30

97 lines
3.1 KiB
TypeScript

import { afterEach, expect, test } from 'bun:test'
import { getMaxOutputTokensForModel } from '../services/api/claude.ts'
import {
getContextWindowForModel,
getModelMaxOutputTokens,
} from './context.ts'
const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
CLAUDE_CODE_MAX_OUTPUT_TOKENS: process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS,
}
afterEach(() => {
if (originalEnv.CLAUDE_CODE_USE_OPENAI === undefined) {
delete process.env.CLAUDE_CODE_USE_OPENAI
} else {
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
}
if (originalEnv.CLAUDE_CODE_MAX_OUTPUT_TOKENS === undefined) {
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
} else {
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS =
originalEnv.CLAUDE_CODE_MAX_OUTPUT_TOKENS
}
})
test('deepseek-chat uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('deepseek-chat')).toBe(128_000)
expect(getModelMaxOutputTokens('deepseek-chat')).toEqual({
default: 8_192,
upperLimit: 8_192,
})
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
})
test('deepseek-chat clamps oversized max output overrides to the provider limit', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
expect(getMaxOutputTokensForModel('deepseek-chat')).toBe(8_192)
})
test('gpt-4o uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('gpt-4o')).toBe(128_000)
expect(getModelMaxOutputTokens('gpt-4o')).toEqual({
default: 16_384,
upperLimit: 16_384,
})
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})
test('gpt-4o clamps oversized max output overrides to the provider limit', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '32000'
expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16_384)
})
test('gpt-5.4 family uses provider-specific context and output caps', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
delete process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS
expect(getContextWindowForModel('gpt-5.4')).toBe(1_050_000)
expect(getModelMaxOutputTokens('gpt-5.4')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
expect(getContextWindowForModel('gpt-5.4-mini')).toBe(400_000)
expect(getModelMaxOutputTokens('gpt-5.4-mini')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
expect(getContextWindowForModel('gpt-5.4-nano')).toBe(400_000)
expect(getModelMaxOutputTokens('gpt-5.4-nano')).toEqual({
default: 128_000,
upperLimit: 128_000,
})
})
test('gpt-5.4 family keeps large max output overrides within provider limits', () => {
process.env.CLAUDE_CODE_USE_OPENAI = '1'
process.env.CLAUDE_CODE_MAX_OUTPUT_TOKENS = '200000'
expect(getMaxOutputTokensForModel('gpt-5.4')).toBe(128_000)
expect(getMaxOutputTokensForModel('gpt-5.4-mini')).toBe(128_000)
expect(getMaxOutputTokensForModel('gpt-5.4-nano')).toBe(128_000)
})