* feat: native Anthropic API mode for Claude models on GitHub Copilot When using Claude models through GitHub Copilot, automatically switch from the OpenAI-compatible shim to Anthropic's native messages API format. The Copilot proxy (api.githubcopilot.com) supports Anthropic's native API for Claude models. This enables cache_control blocks to be sent and honoured, allowing explicit prompt caching control (as opposed to relying solely on server-side auto-caching). Changes: - Add isGithubNativeAnthropicMode() in providers.ts that auto-enables when the resolved model starts with "claude-" and the GitHub provider is active - Create a native Anthropic client in client.ts using the GitHub base URL and Bearer token authentication when native mode is detected - Enable prompt caching in claude.ts for native GitHub mode so cache_control blocks are sent (previously only allowed for firstParty/bedrock/vertex) - CLAUDE_CODE_GITHUB_ANTHROPIC_API=1 env var to force native mode for any model Benefits: - Proper Anthropic message format (no lossy OpenAI translation) - Explicit cache_control blocks for fine-grained caching control - Potentially better Claude model behaviour with native format Related: #515 * fix: scope force flag to Claude models and add isGithubNativeAnthropicMode tests - CLAUDE_CODE_GITHUB_ANTHROPIC_API=1 now returns false for non-Claude models (force flag still useful for aliases like 'github:copilot' with no model resolved yet, where it returns true when model is empty) - Add 7 focused tests covering mode detection: off without GitHub provider, auto-detect via OPENAI_MODEL and resolvedModel, non-Claude model rejection, and force-flag behaviour for claude/non-claude/no-model cases * fix: detect github:copilot:claude- compound format, remove force flag OPENAI_MODEL for GitHub Copilot uses the format 'github:copilot:MODEL' (e.g. 'github:copilot:claude-sonnet-4'), which does not start with 'claude-'. Auto-detection now handles both bare model names and the compound format. The CLAUDE_CODE_GITHUB_ANTHROPIC_API force flag is removed: with proper compound-format detection there is no remaining gap it could fill, and keeping a broad override flag without a concrete use case invites misuse. Tests updated to cover the compound format, generic alias (false), and non-Claude compound model (github:copilot:gpt-4o → false). * fix: use includes('claude-') for model detection, remove force flag Detection was broken for the standard GitHub Copilot compound format 'github:copilot:claude-sonnet-4' which does not start with 'claude-'. Using includes('claude-') handles bare names, compound names, and any future variants without needing updates. The CLAUDE_CODE_GITHUB_ANTHROPIC_API force flag is removed as it was a workaround for the broken detection, not a genuine use case. --------- Co-authored-by: Zartris <14197299+Zartris@users.noreply.github.com>
167 lines
6.2 KiB
TypeScript
167 lines
6.2 KiB
TypeScript
import { afterEach, expect, test } from 'bun:test'
|
|
|
|
const originalEnv = {
|
|
CLAUDE_CODE_USE_GEMINI: process.env.CLAUDE_CODE_USE_GEMINI,
|
|
CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB,
|
|
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
|
|
CLAUDE_CODE_USE_BEDROCK: process.env.CLAUDE_CODE_USE_BEDROCK,
|
|
CLAUDE_CODE_USE_VERTEX: process.env.CLAUDE_CODE_USE_VERTEX,
|
|
CLAUDE_CODE_USE_FOUNDRY: process.env.CLAUDE_CODE_USE_FOUNDRY,
|
|
OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
|
|
OPENAI_API_BASE: process.env.OPENAI_API_BASE,
|
|
OPENAI_MODEL: process.env.OPENAI_MODEL,
|
|
}
|
|
|
|
afterEach(() => {
|
|
process.env.CLAUDE_CODE_USE_GEMINI = originalEnv.CLAUDE_CODE_USE_GEMINI
|
|
process.env.CLAUDE_CODE_USE_GITHUB = originalEnv.CLAUDE_CODE_USE_GITHUB
|
|
process.env.CLAUDE_CODE_USE_OPENAI = originalEnv.CLAUDE_CODE_USE_OPENAI
|
|
process.env.CLAUDE_CODE_USE_BEDROCK = originalEnv.CLAUDE_CODE_USE_BEDROCK
|
|
process.env.CLAUDE_CODE_USE_VERTEX = originalEnv.CLAUDE_CODE_USE_VERTEX
|
|
process.env.CLAUDE_CODE_USE_FOUNDRY = originalEnv.CLAUDE_CODE_USE_FOUNDRY
|
|
process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL
|
|
process.env.OPENAI_API_BASE = originalEnv.OPENAI_API_BASE
|
|
process.env.OPENAI_MODEL = originalEnv.OPENAI_MODEL
|
|
})
|
|
|
|
async function importFreshProvidersModule() {
|
|
return import(`./providers.js?ts=${Date.now()}-${Math.random()}`)
|
|
}
|
|
|
|
function clearProviderEnv(): void {
|
|
delete process.env.CLAUDE_CODE_USE_GEMINI
|
|
delete process.env.CLAUDE_CODE_USE_GITHUB
|
|
delete process.env.CLAUDE_CODE_USE_OPENAI
|
|
delete process.env.CLAUDE_CODE_USE_BEDROCK
|
|
delete process.env.CLAUDE_CODE_USE_VERTEX
|
|
delete process.env.CLAUDE_CODE_USE_FOUNDRY
|
|
delete process.env.OPENAI_BASE_URL
|
|
delete process.env.OPENAI_API_BASE
|
|
delete process.env.OPENAI_MODEL
|
|
}
|
|
|
|
test('first-party provider keeps Anthropic account setup flow enabled', () => {
|
|
clearProviderEnv()
|
|
return importFreshProvidersModule().then(
|
|
({ getAPIProvider, usesAnthropicAccountFlow }) => {
|
|
expect(getAPIProvider()).toBe('firstParty')
|
|
expect(usesAnthropicAccountFlow()).toBe(true)
|
|
},
|
|
)
|
|
})
|
|
|
|
test.each([
|
|
['CLAUDE_CODE_USE_OPENAI', 'openai'],
|
|
['CLAUDE_CODE_USE_GITHUB', 'github'],
|
|
['CLAUDE_CODE_USE_GEMINI', 'gemini'],
|
|
['CLAUDE_CODE_USE_BEDROCK', 'bedrock'],
|
|
['CLAUDE_CODE_USE_VERTEX', 'vertex'],
|
|
['CLAUDE_CODE_USE_FOUNDRY', 'foundry'],
|
|
] as const)(
|
|
'%s disables Anthropic account setup flow',
|
|
async (envKey, provider) => {
|
|
clearProviderEnv()
|
|
process.env[envKey] = '1'
|
|
const { getAPIProvider, usesAnthropicAccountFlow } =
|
|
await importFreshProvidersModule()
|
|
|
|
expect(getAPIProvider()).toBe(provider)
|
|
expect(usesAnthropicAccountFlow()).toBe(false)
|
|
},
|
|
)
|
|
|
|
test('GEMINI takes precedence over GitHub when both are set', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GEMINI = '1'
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
const { getAPIProvider } = await importFreshProvidersModule()
|
|
|
|
expect(getAPIProvider()).toBe('gemini')
|
|
})
|
|
|
|
test('explicit local openai-compatible base URLs stay on the openai provider', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_BASE_URL = 'http://127.0.0.1:8080/v1'
|
|
process.env.OPENAI_MODEL = 'gpt-5.4'
|
|
|
|
const { getAPIProvider } = await importFreshProvidersModule()
|
|
expect(getAPIProvider()).toBe('openai')
|
|
})
|
|
|
|
test('codex aliases still resolve to the codex provider without a non-codex base URL', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_MODEL = 'codexplan'
|
|
|
|
const { getAPIProvider } = await importFreshProvidersModule()
|
|
expect(getAPIProvider()).toBe('codex')
|
|
})
|
|
|
|
test('official OpenAI base URLs now keep provider detection on openai for aliases', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_OPENAI = '1'
|
|
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
|
|
process.env.OPENAI_MODEL = 'gpt-5.4'
|
|
|
|
const { getAPIProvider } = await importFreshProvidersModule()
|
|
expect(getAPIProvider()).toBe('openai')
|
|
})
|
|
|
|
// isGithubNativeAnthropicMode
|
|
|
|
test('isGithubNativeAnthropicMode: false when CLAUDE_CODE_USE_GITHUB is not set', async () => {
|
|
clearProviderEnv()
|
|
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(false)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: true for bare claude- model via OPENAI_MODEL', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'claude-sonnet-4-5'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(true)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: true for github:copilot:claude- compound format', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'github:copilot:claude-sonnet-4'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(true)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: true when resolvedModel is a claude- model', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'github:copilot'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode('claude-haiku-4-5')).toBe(true)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: false for generic github:copilot alias', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'github:copilot'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(false)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: false for non-Claude model', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'gpt-4o'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(false)
|
|
})
|
|
|
|
test('isGithubNativeAnthropicMode: false for github:copilot:gpt- model', async () => {
|
|
clearProviderEnv()
|
|
process.env.CLAUDE_CODE_USE_GITHUB = '1'
|
|
process.env.OPENAI_MODEL = 'github:copilot:gpt-4o'
|
|
const { isGithubNativeAnthropicMode } = await importFreshProvidersModule()
|
|
expect(isGithubNativeAnthropicMode()).toBe(false)
|
|
})
|