import { afterEach, expect, mock, test } from 'bun:test' async function loadProviderDiscoveryModule() { // @ts-expect-error cache-busting query string for Bun module mocks return import(`./providerDiscovery.js?ts=${Date.now()}-${Math.random()}`) } const originalFetch = globalThis.fetch const originalEnv = { OPENAI_BASE_URL: process.env.OPENAI_BASE_URL, } afterEach(() => { globalThis.fetch = originalFetch process.env.OPENAI_BASE_URL = originalEnv.OPENAI_BASE_URL }) test('lists models from a local openai-compatible /models endpoint', async () => { const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule() globalThis.fetch = mock((input, init) => { const url = typeof input === 'string' ? input : input.url expect(url).toBe('http://localhost:1234/v1/models') expect(init?.headers).toEqual({ Authorization: 'Bearer local-key' }) return Promise.resolve( new Response( JSON.stringify({ data: [ { id: 'qwen2.5-coder-7b-instruct' }, { id: 'llama-3.2-3b-instruct' }, { id: 'qwen2.5-coder-7b-instruct' }, ], }), { status: 200 }, ), ) }) as typeof globalThis.fetch await expect( listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1', apiKey: 'local-key', }), ).resolves.toEqual([ 'qwen2.5-coder-7b-instruct', 'llama-3.2-3b-instruct', ]) }) test('returns null when a local openai-compatible /models request fails', async () => { const { listOpenAICompatibleModels } = await loadProviderDiscoveryModule() globalThis.fetch = mock(() => Promise.resolve(new Response('not available', { status: 503 })), ) as typeof globalThis.fetch await expect( listOpenAICompatibleModels({ baseUrl: 'http://localhost:1234/v1' }), ).resolves.toBeNull() }) test('detects LM Studio from the default localhost port', async () => { const { getLocalOpenAICompatibleProviderLabel } = await loadProviderDiscoveryModule() expect(getLocalOpenAICompatibleProviderLabel('http://localhost:1234/v1')).toBe( 'LM Studio', ) }) test('detects common local openai-compatible providers by hostname', async () => { const { getLocalOpenAICompatibleProviderLabel } = await loadProviderDiscoveryModule() expect( getLocalOpenAICompatibleProviderLabel('http://localai.local:8080/v1'), ).toBe('LocalAI') expect( getLocalOpenAICompatibleProviderLabel('http://vllm.local:8000/v1'), ).toBe('vLLM') }) test('falls back to a generic local openai-compatible label', async () => { const { getLocalOpenAICompatibleProviderLabel } = await loadProviderDiscoveryModule() expect( getLocalOpenAICompatibleProviderLabel('http://127.0.0.1:8080/v1'), ).toBe('Local OpenAI-compatible') }) test('ollama generation readiness reports unreachable when tags endpoint is down', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() const calledUrls: string[] = [] globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url calledUrls.push(url) return Promise.resolve(new Response('not available', { status: 503 })) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', }), ).resolves.toMatchObject({ state: 'unreachable', models: [], }) expect(calledUrls).toEqual([ 'http://localhost:11434/api/tags', ]) }) test('ollama generation readiness reports no models when server is reachable', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() const calledUrls: string[] = [] globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url calledUrls.push(url) return Promise.resolve( new Response(JSON.stringify({ models: [] }), { status: 200, headers: { 'Content-Type': 'application/json' }, }), ) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', }), ).resolves.toMatchObject({ state: 'no_models', models: [], }) expect(calledUrls).toEqual([ 'http://localhost:11434/api/tags', ]) }) test('ollama generation readiness reports generation_failed when requested model is missing', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() const calledUrls: string[] = [] globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url calledUrls.push(url) return Promise.resolve( new Response( JSON.stringify({ models: [{ name: 'llama3.1:8b', size: 1024 }], }), { status: 200, headers: { 'Content-Type': 'application/json' }, }, ), ) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', model: 'qwen2.5-coder:7b', }), ).resolves.toMatchObject({ state: 'generation_failed', probeModel: 'qwen2.5-coder:7b', detail: 'requested model not installed: qwen2.5-coder:7b', }) expect(calledUrls).toEqual(['http://localhost:11434/api/tags']) }) test('ollama generation readiness reports generation failures when chat probe fails', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url if (url.endsWith('/api/tags')) { return Promise.resolve( new Response( JSON.stringify({ models: [{ name: 'qwen2.5-coder:7b', size: 42 }], }), { status: 200, headers: { 'Content-Type': 'application/json' }, }, ), ) } return Promise.resolve(new Response('model not found', { status: 404 })) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', model: 'qwen2.5-coder:7b', }), ).resolves.toMatchObject({ state: 'generation_failed', probeModel: 'qwen2.5-coder:7b', }) }) test('ollama generation readiness reports generation_failed when chat probe returns invalid JSON', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url if (url.endsWith('/api/tags')) { return Promise.resolve( new Response( JSON.stringify({ models: [{ name: 'llama3.1:8b', size: 1024 }], }), { status: 200, headers: { 'Content-Type': 'application/json' }, }, ), ) } return Promise.resolve( new Response('proxy error', { status: 200, headers: { 'Content-Type': 'text/html' }, }), ) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', }), ).resolves.toMatchObject({ state: 'generation_failed', probeModel: 'llama3.1:8b', detail: 'invalid JSON response', }) }) test('ollama generation readiness reports ready when chat probe succeeds', async () => { const { probeOllamaGenerationReadiness } = await loadProviderDiscoveryModule() globalThis.fetch = mock(input => { const url = typeof input === 'string' ? input : input.url if (url.endsWith('/api/tags')) { return Promise.resolve( new Response( JSON.stringify({ models: [{ name: 'llama3.1:8b', size: 1024 }], }), { status: 200, headers: { 'Content-Type': 'application/json' }, }, ), ) } return Promise.resolve( new Response( JSON.stringify({ message: { role: 'assistant', content: 'OK' }, done: true, }), { status: 200, headers: { 'Content-Type': 'application/json' }, }, ), ) }) as typeof globalThis.fetch await expect( probeOllamaGenerationReadiness({ baseUrl: 'http://localhost:11434', }), ).resolves.toMatchObject({ state: 'ready', probeModel: 'llama3.1:8b', }) })