Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .callbacks import BraintrustCallbackHandler
from .context import set_global_handler
from .context import clear_global_handler, set_global_handler

__all__ = ["BraintrustCallbackHandler", "set_global_handler"]
__all__ = ["BraintrustCallbackHandler", "set_global_handler", "clear_global_handler"]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure we needed this change. should we just kill the source in the repo? the published pypi may be enough. perhaps we can save a tag or branch if we need to provide patch fixes.

3 changes: 2 additions & 1 deletion py/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ install-dev: install-build-deps
python -m uv pip install -r requirements-dev.txt

install-optional:
python -m uv pip install anthropic openai pydantic_ai litellm agno google-genai dspy langsmith
python -m uv pip install anthropic openai pydantic_ai litellm agno google-genai dspy langsmith \
langchain langchain-openai tenacity
python -m uv pip install -e .[temporal,otel]

.DEFAULT_GOAL := help
Expand Down
1 change: 1 addition & 0 deletions py/examples/auto_instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
- Agno
- Claude Agent SDK
- DSPy
- LangChain
"""

import braintrust
Expand Down
35 changes: 35 additions & 0 deletions py/examples/langchain/auto.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""
Example: LangChain with auto_instrument()

This example demonstrates automatic tracing of LangChain operations
using braintrust.auto_instrument().

Run with: python examples/langchain/auto.py
"""

import braintrust

# One-line instrumentation - call this BEFORE importing LangChain
results = braintrust.auto_instrument()
print(f"LangChain instrumented: {results.get('langchain', False)}")

# Initialize logging
logger = braintrust.init_logger(project="langchain-auto-example")

# Now import LangChain - all operations are automatically traced
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

# Create a simple chain
prompt = ChatPromptTemplate.from_template("What is {number} + {number}?")
model = ChatOpenAI(model="gpt-4o-mini")
chain = prompt | model

# Wrap in a span to get a link
with braintrust.start_span(name="langchain_auto_example") as span:
print("Running LangChain chain...")
result = chain.invoke({"number": "5"})
print(f"Result: {result.content}")
span.log(output=result.content)

print(f"\nView trace: {span.link()}")
62 changes: 62 additions & 0 deletions py/examples/langchain/manual.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
"""
Example: LangChain with manual setup

This example demonstrates using setup_langchain() for global handler registration
and BraintrustCallbackHandler for per-call tracing.

Run with: python examples/langchain/manual.py
"""

import braintrust
from braintrust.wrappers.langchain import (
BraintrustCallbackHandler,
set_global_handler,
setup_langchain,
)

# Initialize logging
logger = braintrust.init_logger(project="langchain-manual-example")

# Method 1: Global handler via setup_langchain()
# This registers a handler that traces ALL LangChain operations automatically
print("Method 1: Global handler")
setup_langchain()

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

prompt = ChatPromptTemplate.from_template("What is the capital of {country}?")
model = ChatOpenAI(model="gpt-4o-mini")
chain = prompt | model

# All operations are traced automatically
result = chain.invoke({"country": "France"})
print(f" Capital: {result.content}\n")


# Method 2: Per-call handler
# This is useful when you want more control over which calls are traced
print("Method 2: Per-call handler")

# Create a handler with a specific logger
handler = BraintrustCallbackHandler(logger=logger)

# Pass the handler explicitly to chain.invoke()
result = chain.invoke(
{"country": "Japan"},
config={"callbacks": [handler]}
)
print(f" Capital: {result.content}\n")


# Method 3: Global handler with custom handler instance
print("Method 3: Custom global handler")

# Create a custom handler and set it globally
custom_handler = BraintrustCallbackHandler(logger=logger)
set_global_handler(custom_handler)

result = chain.invoke({"country": "Brazil"})
print(f" Capital: {result.content}\n")

print("Check Braintrust dashboard for traces!")
19 changes: 19 additions & 0 deletions py/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
"openai",
"openai-agents",
# pydantic_ai is NOT included here - it has dedicated test sessions with version-specific handling
# langchain is NOT included here - it has dedicated test sessions with version-specific handling
"autoevals",
"braintrust_core",
"litellm",
Expand Down Expand Up @@ -73,6 +74,8 @@
DSPY_VERSIONS = (LATEST,)
# temporalio 1.19.0+ requires Python >= 3.10; skip Python 3.9 entirely
TEMPORAL_VERSIONS = (LATEST, "1.20.0", "1.19.0")
# langchain requires Python >= 3.10
LANGCHAIN_VERSIONS = (LATEST, "0.3.27")


@nox.session()
Expand Down Expand Up @@ -193,6 +196,20 @@ def test_dspy(session, version):
_run_tests(session, f"{WRAPPER_DIR}/test_dspy.py")


@nox.session()
@nox.parametrize("version", LANGCHAIN_VERSIONS, ids=LANGCHAIN_VERSIONS)
def test_langchain(session, version):
"""Test LangChain integration."""
# langchain requires Python >= 3.10
if sys.version_info < (3, 10):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we don't support 3.9 anymore

session.skip("langchain tests require Python >= 3.10")
_install_test_deps(session)
_install(session, "langchain", version)
session.install("langchain-openai", "langchain-anthropic", "langgraph")
_run_tests(session, f"{WRAPPER_DIR}/test_langchain.py")
_run_core_tests(session)


@nox.session()
@nox.parametrize("version", AUTOEVALS_VERSIONS, ids=AUTOEVALS_VERSIONS)
def test_autoevals(session, version):
Expand Down Expand Up @@ -267,6 +284,8 @@ def pylint(session):
session.install("opentelemetry.instrumentation.openai")
# langsmith is needed for the wrapper module but not in VENDOR_PACKAGES
session.install("langsmith")
# langchain dependencies for the langchain wrapper
session.install("langchain", "langchain-openai", "langchain-anthropic", "langgraph")

result = session.run("git", "ls-files", "**/*.py", silent=True, log=False)
files = result.strip().splitlines()
Expand Down
17 changes: 17 additions & 0 deletions py/src/braintrust/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def auto_instrument(
agno: bool = True,
claude_agent_sdk: bool = True,
dspy: bool = True,
langchain: bool = True,
) -> dict[str, bool]:
"""
Auto-instrument supported AI/ML libraries for Braintrust tracing.
Expand All @@ -54,6 +55,7 @@ def auto_instrument(
agno: Enable Agno instrumentation (default: True)
claude_agent_sdk: Enable Claude Agent SDK instrumentation (default: True)
dspy: Enable DSPy instrumentation (default: True)
langchain: Enable LangChain instrumentation (default: True)

Returns:
Dict mapping integration name to whether it was successfully instrumented.
Expand Down Expand Up @@ -91,6 +93,11 @@ def auto_instrument(
from google.genai import Client
client = Client()
client.models.generate_content(model="gemini-2.0-flash", contents="Hello!")

# LangChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o-mini")
model.invoke("Hello!")
```
"""
results = {}
Expand All @@ -111,6 +118,8 @@ def auto_instrument(
results["claude_agent_sdk"] = _instrument_claude_agent_sdk()
if dspy:
results["dspy"] = _instrument_dspy()
if langchain:
results["langchain"] = _instrument_langchain()

return results

Expand Down Expand Up @@ -177,3 +186,11 @@ def _instrument_dspy() -> bool:

return patch_dspy()
return False


def _instrument_langchain() -> bool:
with _try_patch():
from braintrust.wrappers.langchain import setup_langchain

return setup_langchain()
return False
Loading
Loading