feat: add support for GitHub Models provider

- Introduced environment variable CLAUDE_CODE_USE_GITHUB to enable GitHub Models.
- Added checks for GITHUB_TOKEN or GH_TOKEN for authentication.
- Updated base URL handling to include GitHub Models default.
- Enhanced provider detection and error handling for GitHub Models.
- Updated relevant functions and components to accommodate the new provider.
This commit is contained in:
Rithul Kamesh
2026-04-02 11:25:28 +05:30
parent e54c39e3cb
commit 25c5987276
24 changed files with 1069 additions and 42 deletions

View File

@@ -154,7 +154,10 @@ export async function getAnthropicClient({
fetch: resolvedFetch,
}),
}
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)) {
if (
isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI) ||
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
) {
const { createOpenAIShimClient } = await import('./openaiShim.js')
return createOpenAIShimClient({
defaultHeaders,

View File

@@ -14,8 +14,15 @@
* OPENAI_BASE_URL=http://... — base URL (default: https://api.openai.com/v1)
* OPENAI_MODEL=gpt-4o — default model override
* CODEX_API_KEY / ~/.codex/auth.json — Codex auth for codexplan/codexspark
*
* GitHub Models (models.github.ai), OpenAI-compatible:
* CLAUDE_CODE_USE_GITHUB=1 — enable GitHub inference (no need for USE_OPENAI)
* GITHUB_TOKEN or GH_TOKEN — PAT with models access (mapped to Bearer auth)
* OPENAI_MODEL — optional; use github:copilot or openai/gpt-4.1 style IDs
*/
import { isEnvTruthy } from '../../utils/envUtils.js'
import { hydrateGithubModelsTokenFromSecureStorage } from '../../utils/githubModelsCredentials.js'
import {
codexStreamToAnthropic,
collectCodexCompletedResponse,
@@ -30,6 +37,25 @@ import {
resolveProviderRequest,
} from './providerConfig.js'
const GITHUB_MODELS_DEFAULT_BASE = 'https://models.github.ai/inference'
const GITHUB_API_VERSION = '2022-11-28'
const GITHUB_429_MAX_RETRIES = 3
const GITHUB_429_BASE_DELAY_SEC = 1
const GITHUB_429_MAX_DELAY_SEC = 32
function isGithubModelsMode(): boolean {
return isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
}
function formatRetryAfterHint(response: Response): string {
const ra = response.headers.get('retry-after')
return ra ? ` (Retry-After: ${ra})` : ''
}
function sleepMs(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms))
}
// ---------------------------------------------------------------------------
// Types — minimal subset of Anthropic SDK types we need to produce
// ---------------------------------------------------------------------------
@@ -254,9 +280,7 @@ function normalizeSchemaForOpenAI(
function convertTools(
tools: Array<{ name: string; description?: string; input_schema?: Record<string, unknown> }>,
): OpenAITool[] {
const isGemini =
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
const isGemini = isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)
return tools
.filter(t => t.name !== 'ToolSearchTool') // Not relevant for OpenAI
@@ -666,6 +690,12 @@ class OpenAIShimMessages {
body.stream_options = { include_usage: true }
}
const isGithub = isGithubModelsMode()
if (isGithub && body.max_completion_tokens !== undefined) {
body.max_tokens = body.max_completion_tokens
delete body.max_completion_tokens
}
if (params.temperature !== undefined) body.temperature = params.temperature
if (params.top_p !== undefined) body.top_p = params.top_p
@@ -715,6 +745,11 @@ class OpenAIShimMessages {
}
}
if (isGithub) {
headers.Accept = 'application/vnd.github.v3+json'
headers['X-GitHub-Api-Version'] = GITHUB_API_VERSION
}
// Build the chat completions URL
// Azure Cognitive Services / Azure OpenAI require a deployment-specific path
// and an api-version query parameter.
@@ -737,19 +772,42 @@ class OpenAIShimMessages {
chatCompletionsUrl = `${request.baseUrl}/chat/completions`
}
const response = await fetch(chatCompletionsUrl, {
method: 'POST',
const fetchInit = {
method: 'POST' as const,
headers,
body: JSON.stringify(body),
signal: options?.signal,
})
if (!response.ok) {
const errorBody = await response.text().catch(() => 'unknown error')
throw new Error(`OpenAI API error ${response.status}: ${errorBody}`)
}
return response
const maxAttempts = isGithub ? GITHUB_429_MAX_RETRIES : 1
let response: Response | undefined
for (let attempt = 0; attempt < maxAttempts; attempt++) {
response = await fetch(chatCompletionsUrl, fetchInit)
if (response.ok) {
return response
}
if (
isGithub &&
response.status === 429 &&
attempt < maxAttempts - 1
) {
await response.text().catch(() => {})
const delaySec = Math.min(
GITHUB_429_BASE_DELAY_SEC * 2 ** attempt,
GITHUB_429_MAX_DELAY_SEC,
)
await sleepMs(delaySec * 1000)
continue
}
const errorBody = await response.text().catch(() => 'unknown error')
const rateHint =
isGithub && response.status === 429 ? formatRetryAfterHint(response) : ''
throw new Error(
`OpenAI API error ${response.status}: ${errorBody}${rateHint}`,
)
}
throw new Error('OpenAI shim: request loop exited unexpectedly')
}
private _convertNonStreamingResponse(
@@ -759,7 +817,10 @@ class OpenAIShimMessages {
choices?: Array<{
message?: {
role?: string
content?: string | null
content?:
| string
| null
| Array<{ type?: string; text?: string }>
tool_calls?: Array<{
id: string
function: { name: string; arguments: string }
@@ -778,8 +839,25 @@ class OpenAIShimMessages {
const choice = data.choices?.[0]
const content: Array<Record<string, unknown>> = []
if (choice?.message?.content) {
content.push({ type: 'text', text: choice.message.content })
const rawContent = choice?.message?.content
if (typeof rawContent === 'string' && rawContent) {
content.push({ type: 'text', text: rawContent })
} else if (Array.isArray(rawContent) && rawContent.length > 0) {
const parts: string[] = []
for (const part of rawContent) {
if (
part &&
typeof part === 'object' &&
part.type === 'text' &&
typeof part.text === 'string'
) {
parts.push(part.text)
}
}
const joined = parts.join('\n')
if (joined) {
content.push({ type: 'text', text: joined })
}
}
if (choice?.message?.tool_calls) {
@@ -838,12 +916,11 @@ export function createOpenAIShimClient(options: {
maxRetries?: number
timeout?: number
}): unknown {
hydrateGithubModelsTokenFromSecureStorage()
// When Gemini provider is active, map Gemini env vars to OpenAI-compatible ones
// so the existing providerConfig.ts infrastructure picks them up correctly.
if (
process.env.CLAUDE_CODE_USE_GEMINI === '1' ||
process.env.CLAUDE_CODE_USE_GEMINI === 'true'
) {
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GEMINI)) {
process.env.OPENAI_BASE_URL ??=
process.env.GEMINI_BASE_URL ??
'https://generativelanguage.googleapis.com/v1beta/openai'
@@ -852,6 +929,10 @@ export function createOpenAIShimClient(options: {
if (process.env.GEMINI_MODEL && !process.env.OPENAI_MODEL) {
process.env.OPENAI_MODEL = process.env.GEMINI_MODEL
}
} else if (isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)) {
process.env.OPENAI_BASE_URL ??= GITHUB_MODELS_DEFAULT_BASE
process.env.OPENAI_API_KEY ??=
process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? ''
}
const beta = new OpenAIShimBeta({

View File

@@ -0,0 +1,41 @@
import { afterEach, expect, test } from 'bun:test'
import {
DEFAULT_GITHUB_MODELS_API_MODEL,
normalizeGithubModelsApiModel,
resolveProviderRequest,
} from './providerConfig.js'
const originalUseGithub = process.env.CLAUDE_CODE_USE_GITHUB
afterEach(() => {
if (originalUseGithub === undefined) {
delete process.env.CLAUDE_CODE_USE_GITHUB
} else {
process.env.CLAUDE_CODE_USE_GITHUB = originalUseGithub
}
})
test.each([
['copilot', DEFAULT_GITHUB_MODELS_API_MODEL],
['github:copilot', DEFAULT_GITHUB_MODELS_API_MODEL],
['', DEFAULT_GITHUB_MODELS_API_MODEL],
['github:gpt-4o', 'gpt-4o'],
['gpt-4o', 'gpt-4o'],
['github:copilot?reasoning=high', DEFAULT_GITHUB_MODELS_API_MODEL],
] as const)('normalizeGithubModelsApiModel(%s) -> %s', (input, expected) => {
expect(normalizeGithubModelsApiModel(input)).toBe(expected)
})
test('resolveProviderRequest applies GitHub normalization when CLAUDE_CODE_USE_GITHUB=1', () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
expect(r.resolvedModel).toBe('gpt-4o')
expect(r.transport).toBe('chat_completions')
})
test('resolveProviderRequest leaves model unchanged without GitHub flag', () => {
delete process.env.CLAUDE_CODE_USE_GITHUB
const r = resolveProviderRequest({ model: 'github:gpt-4o' })
expect(r.resolvedModel).toBe('github:gpt-4o')
})

View File

@@ -2,8 +2,12 @@ import { existsSync, readFileSync } from 'node:fs'
import { homedir } from 'node:os'
import { join } from 'node:path'
import { isEnvTruthy } from '../../utils/envUtils.js'
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1'
export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex'
/** Default GitHub Models API model when user selects copilot / github:copilot */
export const DEFAULT_GITHUB_MODELS_API_MODEL = 'openai/gpt-4.1'
const CODEX_ALIAS_MODELS: Record<
string,
@@ -171,6 +175,20 @@ export function isCodexBaseUrl(baseUrl: string | undefined): boolean {
}
}
/**
* Normalize user model string for GitHub Models inference (models.github.ai).
* Mirrors runtime devsper `github._normalize_model_id`.
*/
export function normalizeGithubModelsApiModel(requestedModel: string): string {
const noQuery = requestedModel.split('?', 1)[0] ?? requestedModel
const segment =
noQuery.includes(':') ? noQuery.split(':', 2)[1]!.trim() : noQuery.trim()
if (!segment || segment.toLowerCase() === 'copilot') {
return DEFAULT_GITHUB_MODELS_API_MODEL
}
return segment
}
export function resolveProviderRequest(options?: {
model?: string
baseUrl?: string
@@ -192,10 +210,16 @@ export function resolveProviderRequest(options?: {
? 'codex_responses'
: 'chat_completions'
const resolvedModel =
transport === 'chat_completions' &&
isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
? normalizeGithubModelsApiModel(requestedModel)
: descriptor.baseModel
return {
transport,
requestedModel,
resolvedModel: descriptor.baseModel,
resolvedModel,
baseUrl:
(rawBaseUrl ??
(transport === 'codex_responses'