* decouple and fix mistral * fix wrong variable for currentBaseUrl and buildAPIProviderProperties
212 lines
6.3 KiB
TypeScript
212 lines
6.3 KiB
TypeScript
import { afterEach, beforeEach, describe, expect, test } from 'bun:test'
|
|
import {
|
|
parseProviderFlag,
|
|
applyProviderFlag,
|
|
applyProviderFlagFromArgs,
|
|
VALID_PROVIDERS,
|
|
} from './providerFlag.js'
|
|
|
|
const ENV_KEYS = [
|
|
'CLAUDE_CODE_USE_OPENAI',
|
|
'CLAUDE_CODE_USE_GEMINI',
|
|
'CLAUDE_CODE_USE_GITHUB',
|
|
'CLAUDE_CODE_USE_BEDROCK',
|
|
'CLAUDE_CODE_USE_VERTEX',
|
|
'OPENAI_BASE_URL',
|
|
'OPENAI_API_KEY',
|
|
'OPENAI_MODEL',
|
|
'GEMINI_MODEL',
|
|
]
|
|
|
|
const originalEnv: Record<string, string | undefined> = {}
|
|
|
|
beforeEach(() => {
|
|
for (const key of ENV_KEYS) {
|
|
originalEnv[key] = process.env[key]
|
|
delete process.env[key]
|
|
}
|
|
})
|
|
|
|
const RESET_KEYS = [
|
|
'CLAUDE_CODE_USE_OPENAI',
|
|
'CLAUDE_CODE_USE_GEMINI',
|
|
'CLAUDE_CODE_USE_GITHUB',
|
|
'CLAUDE_CODE_USE_BEDROCK',
|
|
'CLAUDE_CODE_USE_VERTEX',
|
|
'OPENAI_BASE_URL',
|
|
'OPENAI_API_KEY',
|
|
'OPENAI_MODEL',
|
|
'GEMINI_MODEL',
|
|
] as const
|
|
|
|
beforeEach(() => {
|
|
for (const key of RESET_KEYS) {
|
|
delete process.env[key]
|
|
}
|
|
})
|
|
|
|
afterEach(() => {
|
|
for (const key of ENV_KEYS) {
|
|
if (originalEnv[key] === undefined) {
|
|
delete process.env[key]
|
|
} else {
|
|
process.env[key] = originalEnv[key]
|
|
}
|
|
}
|
|
})
|
|
|
|
// --- parseProviderFlag ---
|
|
|
|
describe('parseProviderFlag', () => {
|
|
test('returns provider name when --provider flag present', () => {
|
|
expect(parseProviderFlag(['--provider', 'openai'])).toBe('openai')
|
|
})
|
|
|
|
test('returns provider name with --model alongside', () => {
|
|
expect(parseProviderFlag(['--provider', 'gemini', '--model', 'gemini-2.0-flash'])).toBe('gemini')
|
|
})
|
|
|
|
test('returns null when --provider flag absent', () => {
|
|
expect(parseProviderFlag(['--model', 'gpt-4o'])).toBeNull()
|
|
})
|
|
|
|
test('returns null for empty args', () => {
|
|
expect(parseProviderFlag([])).toBeNull()
|
|
})
|
|
|
|
test('returns null when --provider has no value', () => {
|
|
expect(parseProviderFlag(['--provider'])).toBeNull()
|
|
})
|
|
|
|
test('returns null when --provider value starts with --', () => {
|
|
expect(parseProviderFlag(['--provider', '--model'])).toBeNull()
|
|
})
|
|
})
|
|
|
|
// --- applyProviderFlag ---
|
|
|
|
describe('applyProviderFlag - anthropic', () => {
|
|
test('sets no env vars for anthropic (default)', () => {
|
|
const result = applyProviderFlag('anthropic', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBeUndefined()
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - openai', () => {
|
|
test('sets CLAUDE_CODE_USE_OPENAI=1', () => {
|
|
const result = applyProviderFlag('openai', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
|
})
|
|
|
|
test('sets OPENAI_MODEL when --model is provided', () => {
|
|
applyProviderFlag('openai', ['--model', 'gpt-4o'])
|
|
expect(process.env.OPENAI_MODEL).toBe('gpt-4o')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - gemini', () => {
|
|
test('sets CLAUDE_CODE_USE_GEMINI=1', () => {
|
|
const result = applyProviderFlag('gemini', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_GEMINI).toBe('1')
|
|
})
|
|
|
|
test('sets GEMINI_MODEL when --model is provided', () => {
|
|
applyProviderFlag('gemini', ['--model', 'gemini-2.0-flash'])
|
|
expect(process.env.GEMINI_MODEL).toBe('gemini-2.0-flash')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - github', () => {
|
|
test('sets CLAUDE_CODE_USE_GITHUB=1', () => {
|
|
const result = applyProviderFlag('github', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_GITHUB).toBe('1')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - bedrock', () => {
|
|
test('sets CLAUDE_CODE_USE_BEDROCK=1', () => {
|
|
const result = applyProviderFlag('bedrock', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_BEDROCK).toBe('1')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - vertex', () => {
|
|
test('sets CLAUDE_CODE_USE_VERTEX=1', () => {
|
|
const result = applyProviderFlag('vertex', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_VERTEX).toBe('1')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - ollama', () => {
|
|
test('sets CLAUDE_CODE_USE_OPENAI=1 with Ollama defaults when unset', () => {
|
|
delete process.env.OPENAI_BASE_URL
|
|
delete process.env.OPENAI_API_KEY
|
|
|
|
const result = applyProviderFlag('ollama', [])
|
|
expect(result.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
|
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
|
expect(process.env.OPENAI_API_KEY).toBe('ollama')
|
|
})
|
|
|
|
test('sets OPENAI_MODEL when --model is provided', () => {
|
|
applyProviderFlag('ollama', ['--model', 'llama3.2'])
|
|
expect(process.env.OPENAI_MODEL).toBe('llama3.2')
|
|
})
|
|
|
|
test('does not override existing OPENAI_BASE_URL when user set a custom one', () => {
|
|
process.env.OPENAI_BASE_URL = 'http://my-ollama:11434/v1'
|
|
applyProviderFlag('ollama', [])
|
|
expect(process.env.OPENAI_BASE_URL).toBe('http://my-ollama:11434/v1')
|
|
})
|
|
|
|
test('preserves explicit OPENAI_BASE_URL and OPENAI_API_KEY overrides', () => {
|
|
process.env.OPENAI_BASE_URL = 'http://remote-ollama.internal:11434/v1'
|
|
process.env.OPENAI_API_KEY = 'secret-token'
|
|
|
|
applyProviderFlag('ollama', [])
|
|
|
|
expect(process.env.OPENAI_BASE_URL).toBe('http://remote-ollama.internal:11434/v1')
|
|
expect(process.env.OPENAI_API_KEY).toBe('secret-token')
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlag - invalid provider', () => {
|
|
test('returns error for unknown provider', () => {
|
|
const result = applyProviderFlag('unknown-provider', [])
|
|
expect(result.error).toContain('unknown-provider')
|
|
expect(result.error).toContain(VALID_PROVIDERS.join(', '))
|
|
})
|
|
})
|
|
|
|
describe('applyProviderFlagFromArgs', () => {
|
|
test('applies ollama provider and model from argv in one step', () => {
|
|
delete process.env.OPENAI_BASE_URL
|
|
delete process.env.OPENAI_API_KEY
|
|
|
|
const result = applyProviderFlagFromArgs([
|
|
'--provider',
|
|
'ollama',
|
|
'--model',
|
|
'qwen2.5:3b',
|
|
])
|
|
|
|
expect(result?.error).toBeUndefined()
|
|
expect(process.env.CLAUDE_CODE_USE_OPENAI).toBe('1')
|
|
expect(process.env.OPENAI_BASE_URL).toBe('http://localhost:11434/v1')
|
|
expect(process.env.OPENAI_API_KEY).toBe('ollama')
|
|
expect(process.env.OPENAI_MODEL).toBe('qwen2.5:3b')
|
|
})
|
|
|
|
test('returns undefined when --provider is absent', () => {
|
|
expect(applyProviderFlagFromArgs(['--model', 'gpt-4o'])).toBeUndefined()
|
|
})
|
|
})
|