Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 27 additions & 1 deletion packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import {
type Tool,
type ToolSet,
extractReasoningMiddleware,
tool,
jsonSchema,
} from "ai"
import { clone, mergeDeep, pipe } from "remeda"
import { ProviderTransform } from "@/provider/transform"
Expand Down Expand Up @@ -137,6 +139,18 @@ export namespace LLM {

const tools = await resolveTools(input)

// LiteLLM and some Anthropic proxies require the tools parameter to be present
// when message history contains tool calls, even if no tools are being used.
// Add a dummy tool that is never called to satisfy this validation.
if (Object.keys(tools).length === 0 && hasToolCalls(input.messages)) {
tools["_noop"] = tool({
description:
"Placeholder for LiteLLM/Anthropic proxy compatibility - required when message history contains tool calls but no active tools are needed",
inputSchema: jsonSchema({ type: "object", properties: {} }),
execute: async () => ({ output: "", title: "", metadata: {} }),
})
}

return streamText({
onError(error) {
l.error("stream error", {
Expand Down Expand Up @@ -168,7 +182,7 @@ export namespace LLM {
topP: params.topP,
topK: params.topK,
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
activeTools: Object.keys(tools).filter((x) => x !== "invalid" && x !== "_noop"),
tools,
maxOutputTokens,
abortSignal: input.abort,
Expand Down Expand Up @@ -235,4 +249,16 @@ export namespace LLM {
}
return input.tools
}

// Check if messages contain any tool-call content
// Used to determine if a dummy tool should be added for LiteLLM proxy compatibility
export function hasToolCalls(messages: ModelMessage[]): boolean {
for (const msg of messages) {
if (!Array.isArray(msg.content)) continue
for (const part of msg.content) {
if (part.type === "tool-call" || part.type === "tool-result") return true
}
}
return false
}
}
11 changes: 11 additions & 0 deletions packages/opencode/src/session/message-v2.ts
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,17 @@ export namespace MessageV2 {
errorText: part.state.error,
callProviderMetadata: part.metadata,
})
// Handle pending/running tool calls to prevent dangling tool_use blocks
// Anthropic/Claude APIs require every tool_use to have a corresponding tool_result
if (part.state.status === "pending" || part.state.status === "running")
assistantMessage.parts.push({
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-error",
toolCallId: part.callID,
input: part.state.input,
errorText: "[Tool execution was interrupted]",
callProviderMetadata: part.metadata,
})
}
if (part.type === "reasoning") {
assistantMessage.parts.push({
Expand Down
90 changes: 90 additions & 0 deletions packages/opencode/test/session/llm.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import { describe, expect, test } from "bun:test"
import { LLM } from "../../src/session/llm"
import type { ModelMessage } from "ai"

describe("session.llm.hasToolCalls", () => {
test("returns false for empty messages array", () => {
expect(LLM.hasToolCalls([])).toBe(false)
})

test("returns false for messages with only text content", () => {
const messages: ModelMessage[] = [
{
role: "user",
content: [{ type: "text", text: "Hello" }],
},
{
role: "assistant",
content: [{ type: "text", text: "Hi there" }],
},
]
expect(LLM.hasToolCalls(messages)).toBe(false)
})

test("returns true when messages contain tool-call", () => {
const messages = [
{
role: "user",
content: [{ type: "text", text: "Run a command" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-123",
toolName: "bash",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})

test("returns true when messages contain tool-result", () => {
const messages = [
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-123",
toolName: "bash",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})

test("returns false for messages with string content", () => {
const messages: ModelMessage[] = [
{
role: "user",
content: "Hello world",
},
{
role: "assistant",
content: "Hi there",
},
]
expect(LLM.hasToolCalls(messages)).toBe(false)
})

test("returns true when tool-call is mixed with text content", () => {
const messages = [
{
role: "assistant",
content: [
{ type: "text", text: "Let me run that command" },
{
type: "tool-call",
toolCallId: "call-456",
toolName: "read",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})
})
90 changes: 90 additions & 0 deletions packages/opencode/test/session/message-v2.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -569,4 +569,94 @@ describe("session.message-v2.toModelMessage", () => {

expect(MessageV2.toModelMessage(input)).toStrictEqual([])
})

test("converts pending/running tool calls to error results to prevent dangling tool_use", () => {
const userID = "m-user"
const assistantID = "m-assistant"

const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID),
parts: [
{
...basePart(assistantID, "a1"),
type: "tool",
callID: "call-pending",
tool: "bash",
state: {
status: "pending",
input: { cmd: "ls" },
raw: "",
},
},
{
...basePart(assistantID, "a2"),
type: "tool",
callID: "call-running",
tool: "read",
state: {
status: "running",
input: { path: "/tmp" },
time: { start: 0 },
},
},
] as MessageV2.Part[],
},
]

const result = MessageV2.toModelMessage(input)

expect(result).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-pending",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
{
type: "tool-call",
toolCallId: "call-running",
toolName: "read",
input: { path: "/tmp" },
providerExecuted: undefined,
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-pending",
toolName: "bash",
output: { type: "error-text", value: "[Tool execution was interrupted]" },
},
{
type: "tool-result",
toolCallId: "call-running",
toolName: "read",
output: { type: "error-text", value: "[Tool execution was interrupted]" },
},
],
},
])
})
})
Loading