Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions ccproxy/llms/formatters/anthropic_to_openai/_helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""Shared helpers for Anthropic to OpenAI formatting."""

from __future__ import annotations

import json
from typing import Any

from ccproxy.llms.models import openai as openai_models


def serialize_tool_arguments(tool_input: Any) -> str:
if isinstance(tool_input, str):
return tool_input
try:
return json.dumps(tool_input, ensure_ascii=False)
except Exception:
return json.dumps({"arguments": str(tool_input)})


def build_openai_tool_call(
*,
tool_id: str | None,
tool_name: str | None,
tool_input: Any,
arguments: Any = None,
fallback_index: int = 0,
) -> openai_models.ToolCall:
args_str = (
arguments
if isinstance(arguments, str) and arguments
else serialize_tool_arguments(tool_input)
)
call_id = (
tool_id if isinstance(tool_id, str) and tool_id else f"call_{fallback_index}"
)
name = tool_name if isinstance(tool_name, str) and tool_name else "function"

return openai_models.ToolCall(
id=str(call_id),
function=openai_models.FunctionCall(
name=str(name),
arguments=str(args_str),
),
)
21 changes: 19 additions & 2 deletions ccproxy/llms/formatters/anthropic_to_openai/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from ccproxy.llms.models import anthropic as anthropic_models
from ccproxy.llms.models import openai as openai_models

from ._helpers import build_openai_tool_call


logger = ccproxy.core.logging.get_logger(__name__)

Expand Down Expand Up @@ -101,6 +103,8 @@ def convert__anthropic_message_to_openai_chat__response(
"""Convert Anthropic MessageResponse to an OpenAI ChatCompletionResponse."""
content_blocks = response.content
parts: list[str] = []
tool_calls: list[openai_models.ToolCall] = []

for block in content_blocks:
btype = getattr(block, "type", None)
if btype == "text":
Expand All @@ -117,8 +121,17 @@ def convert__anthropic_message_to_openai_chat__response(
else ""
)
parts.append(f"<thinking{sig_attr}>{thinking}</thinking>")
elif btype == "tool_use":
tool_calls.append(
build_openai_tool_call(
tool_id=getattr(block, "id", None),
tool_name=getattr(block, "name", None),
tool_input=getattr(block, "input", {}) or {},
fallback_index=len(tool_calls),
)
)

content_text = "".join(parts)
content_text = "".join(parts) if parts else None

stop_reason = response.stop_reason
finish_reason = ANTHROPIC_TO_OPENAI_FINISH_REASON.get(
Expand All @@ -127,12 +140,16 @@ def convert__anthropic_message_to_openai_chat__response(

usage_model = convert__anthropic_usage_to_openai_completion__usage(response.usage)

message_dict: dict[str, Any] = {"role": "assistant", "content": content_text}
if tool_calls:
message_dict["tool_calls"] = [call.model_dump() for call in tool_calls]

payload = {
"id": response.id,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": content_text},
"message": message_dict,
"finish_reason": finish_reason,
}
],
Expand Down
26 changes: 9 additions & 17 deletions ccproxy/llms/formatters/anthropic_to_openai/streams.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,9 @@
from ccproxy.llms.models import openai as openai_models
from ccproxy.llms.streaming.accumulators import ClaudeAccumulator

from ._helpers import build_openai_tool_call
from .requests import _build_responses_payload_from_anthropic_request
from .responses import (
convert__anthropic_usage_to_openai_responses__usage,
)
from .responses import convert__anthropic_usage_to_openai_responses__usage


logger = ccproxy.core.logging.get_logger(__name__)
Expand Down Expand Up @@ -100,22 +99,15 @@ def _build_openai_tool_call(
function_payload = (
tool_call.get("function", {}) if isinstance(tool_call, dict) else {}
)
name = function_payload.get("name") or tool_call.get("name") or "function"
tool_name = function_payload.get("name") or tool_call.get("name")
arguments = function_payload.get("arguments")
if not isinstance(arguments, str) or not arguments:
try:
arguments = json.dumps(tool_call.get("input", {}), ensure_ascii=False)
except Exception:
arguments = json.dumps(tool_call.get("input", {}))

tool_id = tool_call.get("id") or f"call_{block_index}"

return openai_models.ToolCall(
id=str(tool_id),
function=openai_models.FunctionCall(
name=str(name),
arguments=str(arguments),
),
return build_openai_tool_call(
tool_id=tool_call.get("id"),
tool_name=tool_name,
tool_input=tool_call.get("input", {}),
arguments=arguments,
fallback_index=block_index,
)

return None
Expand Down
36 changes: 36 additions & 0 deletions tests/unit/llms/formatters/test_anthropic_to_openai_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,42 @@ async def test_convert__anthropic_message_to_openai_chat__response_basic() -> No
assert out.usage.total_tokens == 3


@pytest.mark.asyncio
async def test_convert__anthropic_message_to_openai_chat__response_tool_use() -> None:
resp = anthropic_models.MessageResponse(
id="msg_tool_1",
type="message",
role="assistant",
model="claude-3",
content=[
anthropic_models.ToolUseBlock(
type="tool_use",
id="tool_123",
name="get_weather",
input={"location": "Boston", "units": "metric"},
)
],
stop_reason="tool_use",
stop_sequence=None,
usage=anthropic_models.Usage(input_tokens=3, output_tokens=4),
)

out = convert__anthropic_message_to_openai_chat__response(resp)
assert isinstance(out, openai_models.ChatCompletionResponse)
assert out.choices[0].finish_reason == "tool_calls"
assert out.choices[0].message.content is None

tool_calls = out.choices[0].message.tool_calls
assert tool_calls and len(tool_calls) == 1
tool_call = tool_calls[0]
assert tool_call.id == "tool_123"
assert tool_call.function.name == "get_weather"
assert json.loads(tool_call.function.arguments) == {
"location": "Boston",
"units": "metric",
}


@pytest.mark.asyncio
async def test_convert__anthropic_message_to_openai_responses__stream_minimal() -> None:
register_request(
Expand Down
9 changes: 9 additions & 0 deletions tests/unit/llms/formatters/test_formatter_endpoint_samples.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import json
from collections.abc import AsyncIterator
from typing import Any

Expand Down Expand Up @@ -429,6 +430,7 @@ async def test_openai_responses_stream_to_anthropic(
{
"expected_finish": "tool_calls",
"text_snippet": "weather information",
"tool_names": {"get_weather", "calculate_distance"},
},
),
]
Expand All @@ -452,6 +454,13 @@ def test_anthropic_message_to_openai_chat(
if isinstance(content, str) and expect.get("text_snippet"):
assert expect["text_snippet"] in content

if expect.get("tool_names"):
tool_calls = choice.message.tool_calls or []
tool_names = {tool_call.function.name for tool_call in tool_calls}
assert tool_names == expect["tool_names"]
for tool_call in tool_calls:
assert json.loads(tool_call.function.arguments)


@pytest.mark.unit
def test_anthropic_message_request_to_openai_chat_handles_tools() -> None:
Expand Down
Loading