diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 9e2dd0ba0b5..100465c67d7 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -586,6 +586,13 @@ export namespace Provider { }) export type Info = z.infer + export function isAzureAnthropic(model: Model): boolean { + return ( + model.providerID === "azure-cognitive-services" && + (model.api.id.includes("claude") || model.api.id.includes("anthropic")) + ) + } + function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model { const m: Model = { id: model.id, @@ -1006,9 +1013,16 @@ export namespace Provider { }) } - // Special case: google-vertex-anthropic uses a subpath import - const bundledKey = - model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm + // Special cases for providers that use different npm packages + if (isAzureAnthropic(model)) { + const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME") + if (resourceName) options["baseURL"] = `https://${resourceName}.services.ai.azure.com/anthropic/v1/` + } + const bundledKey = iife(() => { + if (model.providerID === "google-vertex-anthropic") return "@ai-sdk/google-vertex/anthropic" + if (isAzureAnthropic(model)) return "@ai-sdk/anthropic" + return model.api.npm + }) const bundledFn = BUNDLED_PROVIDERS[bundledKey] if (bundledFn) { log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey }) @@ -1074,8 +1088,11 @@ export namespace Provider { const provider = s.providers[model.providerID] const sdk = await getSDK(model) + // Skip custom model loader for Azure Anthropic models since they use @ai-sdk/anthropic + const useCustomLoader = s.modelLoaders[model.providerID] && !isAzureAnthropic(model) + try { - const language = s.modelLoaders[model.providerID] + const language = useCustomLoader ? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options) : sdk.languageModel(model.api.id) s.models.set(key, language) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index cdb65c79af3..5e88216be83 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -16,6 +16,17 @@ function mimeToModality(mime: string): Modality | undefined { } export namespace ProviderTransform { + function isAzureAnthropic(model: Provider.Model): boolean { + return ( + model.providerID === "azure-cognitive-services" && + (model.api.id.includes("claude") || model.api.id.includes("anthropic")) + ) + } + + function usesAnthropicSDK(model: Provider.Model): boolean { + return model.api.npm === "@ai-sdk/anthropic" || isAzureAnthropic(model) + } + function normalizeMessages( msgs: ModelMessage[], model: Provider.Model, @@ -42,7 +53,7 @@ export namespace ProviderTransform { // Anthropic rejects messages with empty content - filter out empty string messages // and remove empty text/reasoning parts from array content - if (model.api.npm === "@ai-sdk/anthropic") { + if (usesAnthropicSDK(model)) { msgs = msgs .map((msg) => { if (typeof msg.content === "string") { @@ -248,7 +259,7 @@ export namespace ProviderTransform { model.providerID === "anthropic" || model.api.id.includes("anthropic") || model.api.id.includes("claude") || - model.api.npm === "@ai-sdk/anthropic" + usesAnthropicSDK(model) ) { msgs = applyCaching(msgs, model.providerID) } @@ -300,6 +311,23 @@ export namespace ProviderTransform { const id = model.id.toLowerCase() if (id.includes("deepseek") || id.includes("minimax") || id.includes("glm") || id.includes("mistral")) return {} + if (isAzureAnthropic(model)) { + return { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + } + } + switch (model.api.npm) { case "@openrouter/ai-sdk-provider": if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("grok-4")) return {} @@ -570,6 +598,9 @@ export namespace ProviderTransform { } export function providerOptions(model: Provider.Model, options: { [x: string]: any }) { + if (isAzureAnthropic(model)) { + return { ["anthropic" as string]: options } + } switch (model.api.npm) { case "@ai-sdk/github-copilot": case "@ai-sdk/openai": @@ -605,16 +636,27 @@ export namespace ProviderTransform { } } + export function maxOutputTokens(model: Provider.Model, options: Record, globalLimit: number): number export function maxOutputTokens( npm: string, options: Record, modelLimit: number, globalLimit: number, + ): number + export function maxOutputTokens( + arg1: Provider.Model | string, + options: Record, + arg3: number, + arg4?: number, ): number { + const model = typeof arg1 === "object" ? arg1 : null + const npm = model ? model.api.npm : (arg1 as string) + const modelLimit = model ? model.limit.output : arg3 + const globalLimit = model ? arg3 : arg4! const modelCap = modelLimit || globalLimit const standardLimit = Math.min(modelCap, globalLimit) - if (npm === "@ai-sdk/anthropic") { + if (model ? usesAnthropicSDK(model) : npm === "@ai-sdk/anthropic") { const thinking = options?.["thinking"] const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0 const enabled = thinking?.["type"] === "enabled" diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 1029b45ea0d..5b6b34d3c49 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -133,12 +133,7 @@ export namespace LLM { const maxOutputTokens = isCodex ? undefined - : ProviderTransform.maxOutputTokens( - input.model.api.npm, - params.options, - input.model.limit.output, - OUTPUT_TOKEN_MAX, - ) + : ProviderTransform.maxOutputTokens(input.model, params.options, OUTPUT_TOKEN_MAX) const tools = await resolveTools(input)