Skip to content

Commit b15c7fd

Browse files
authored
anthropic[patch]: fix response metadata type (langchain-ai#19683)
1 parent 9c4b6dc commit b15c7fd

File tree

2 files changed

+39
-9
lines changed

2 files changed

+39
-9
lines changed

libs/partners/anthropic/langchain_anthropic/chat_models.py

+8-9
Original file line numberDiff line numberDiff line change
@@ -268,16 +268,15 @@ async def _astream(
268268
await run_manager.on_llm_new_token(text, chunk=chunk)
269269
yield chunk
270270

271-
def _format_output(
272-
self,
273-
data: Any,
274-
**kwargs: Any,
275-
) -> ChatResult:
271+
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
272+
data_dict = data.model_dump()
273+
content = data_dict["content"]
274+
llm_output = {
275+
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
276+
}
276277
return ChatResult(
277-
generations=[
278-
ChatGeneration(message=AIMessage(content=data.content[0].text))
279-
],
280-
llm_output=data,
278+
generations=[ChatGeneration(message=AIMessage(content=content[0]["text"]))],
279+
llm_output=llm_output,
281280
)
282281

283282
def _generate(

libs/partners/anthropic/tests/unit_tests/test_chat_models.py

+31
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,9 @@
33
import os
44

55
import pytest
6+
from anthropic.types import ContentBlock, Message, Usage
7+
from langchain_core.messages import AIMessage
8+
from langchain_core.outputs import ChatGeneration, ChatResult
69

710
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
811

@@ -52,3 +55,31 @@ def test_anthropic_initialization() -> None:
5255
# Verify that chat anthropic can be initialized using a secret key provided
5356
# as a parameter rather than an environment variable.
5457
ChatAnthropic(model="test", anthropic_api_key="test")
58+
59+
60+
def test__format_output() -> None:
61+
anthropic_msg = Message(
62+
id="foo",
63+
content=[ContentBlock(type="text", text="bar")],
64+
model="baz",
65+
role="assistant",
66+
stop_reason=None,
67+
stop_sequence=None,
68+
usage=Usage(input_tokens=2, output_tokens=1),
69+
type="message",
70+
)
71+
expected = ChatResult(
72+
generations=[
73+
ChatGeneration(message=AIMessage("bar")),
74+
],
75+
llm_output={
76+
"id": "foo",
77+
"model": "baz",
78+
"stop_reason": None,
79+
"stop_sequence": None,
80+
"usage": {"input_tokens": 2, "output_tokens": 1},
81+
},
82+
)
83+
llm = ChatAnthropic(model="test", anthropic_api_key="test")
84+
actual = llm._format_output(anthropic_msg)
85+
assert expected == actual

0 commit comments

Comments
 (0)