diff --git a/src/utils/model/openaiContextWindows.ts b/src/utils/model/openaiContextWindows.ts index 66db3d35..315b514b 100644 --- a/src/utils/model/openaiContextWindows.ts +++ b/src/utils/model/openaiContextWindows.ts @@ -23,9 +23,13 @@ const OPENAI_CONTEXT_WINDOWS: Record = { 'gpt-4.1-nano': 1_047_576, 'gpt-4-turbo': 128_000, 'gpt-4': 8_192, + 'o1': 200_000, + 'o1-mini': 128_000, + 'o1-preview': 128_000, + 'o1-pro': 200_000, + 'o3': 200_000, 'o3-mini': 200_000, 'o4-mini': 200_000, - 'o3': 200_000, // DeepSeek (V3: 128k context per official docs) 'deepseek-chat': 128_000, @@ -63,6 +67,9 @@ const OPENAI_CONTEXT_WINDOWS: Record = { 'phi4:14b': 16_384, 'gemma2:27b': 8_192, 'codellama:13b': 16_384, + 'llama3.2:1b': 128_000, + 'qwen3:8b': 128_000, + 'codestral': 32_768, } /** @@ -82,9 +89,13 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record = { 'gpt-4.1-nano': 32_768, 'gpt-4-turbo': 4_096, 'gpt-4': 4_096, + 'o1': 100_000, + 'o1-mini': 65_536, + 'o1-preview': 32_768, + 'o1-pro': 100_000, + 'o3': 100_000, 'o3-mini': 100_000, 'o4-mini': 100_000, - 'o3': 100_000, // DeepSeek 'deepseek-chat': 8_192, @@ -120,6 +131,9 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record = { 'phi4:14b': 4_096, 'gemma2:27b': 4_096, 'codellama:13b': 4_096, + 'llama3.2:1b': 4_096, + 'qwen3:8b': 8_192, + 'codestral': 8_192, } function lookupByModel(table: Record, model: string): T | undefined {