Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 102 additions & 12 deletions libs/langchain/langchain/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,17 +76,20 @@
Union[Literal["any"], list[str], tuple[str, ...]]
] = None,
config_prefix: Optional[str] = None,
context_schema: Optional[type] = None,
**kwargs: Any,
) -> Union[BaseChatModel, _ConfigurableModel]:
"""Initialize a ChatModel in a single line using the model's name and provider.

.. note::
Must have the integration package corresponding to the model provider installed.
You should look at the `provider integration's API reference <https://python.langchain.com/api_reference/reference.html#integrations>`__
You should look at the `provider integration's API reference
<https://python.langchain.com/api_reference/reference.html#integrations>`__
to see what parameters are supported by the model.

Args:
model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can
model: The name of the model, e.g. ``'o3-mini'``,
``'claude-3-5-sonnet-latest'``. You can
also specify model and model provider in a single argument using
``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``.
model_provider: The model provider if not specified as part of model arg (see
Expand Down Expand Up @@ -149,6 +152,10 @@
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
``'config_prefix'`` is an empty string then model will be configurable via
``config["configurable"]["{param}"]``.
context_schema: An optional schema type for runtime context. When provided,
model parameters can be passed via the context argument at runtime instead
of through config["configurable"]. This aligns with LangGraph v0.6+
recommendations for static runtime context.
temperature: Model temperature.
max_tokens: Max output tokens.
timeout: The maximum time (in seconds) to wait for a response from the model
Expand Down Expand Up @@ -221,7 +228,7 @@

configurable_model_with_default = init_chat_model(
"openai:gpt-4o",
configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
configurable_fields="any", # configure params at runtime
config_prefix="foo",
temperature=0,
)
Expand Down Expand Up @@ -289,6 +296,40 @@
)
# Claude-3.5 sonnet response with tools

.. dropdown:: Use context_schema for model configuration

You can use ``context_schema`` to configure models via runtime context instead
of the configurable pattern, which aligns with LangGraph v0.6+ recommendations.

.. code-block:: python

# pip install langchain langchain-openai
from langchain.chat_models import init_chat_model
from typing import NamedTuple

class ModelContext(NamedTuple):
model: str
temperature: float

# Create model with context_schema
configurable_model = init_chat_model(
context_schema=ModelContext,
model_provider="openai"
)

# Configure model via context instead of configurable
configurable_model.invoke(
"What's the weather like?",
context={"model": "gpt-4o", "temperature": 0.7}
)
# GPT-4o response with temperature 0.7

configurable_model.invoke(
"What's the weather like?",
context=ModelContext(model="gpt-3.5-turbo", temperature=0.2)
)
# GPT-3.5-turbo response with temperature 0.2

.. versionadded:: 0.2.7

.. versionchanged:: 0.2.8
Expand All @@ -313,6 +354,12 @@

Support for Deepseek, IBM, Nvidia, and xAI models added.

.. versionchanged:: 0.3.20

Support for ``context_schema`` added. Model parameters can now be passed
via runtime context instead of config["configurable"], aligning with
LangGraph v0.6+ recommendations for static runtime context.

""" # noqa: E501
if not model and not configurable_fields:
configurable_fields = ("model", "model_provider")
Expand All @@ -325,7 +372,7 @@
stacklevel=2,
)

if not configurable_fields:
if not configurable_fields and not context_schema:
return _init_chat_model_helper(
cast("str", model),
model_provider=model_provider,
Expand All @@ -339,6 +386,7 @@
default_config=kwargs,
config_prefix=config_prefix,
configurable_fields=configurable_fields,
context_schema=context_schema,
)


Expand Down Expand Up @@ -558,6 +606,7 @@
default_config: Optional[dict] = None,
configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = "any",
config_prefix: str = "",
context_schema: Optional[type] = None,
queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (),
) -> None:
self._default_config: dict = default_config or {}
Expand All @@ -571,6 +620,7 @@
if config_prefix and not config_prefix.endswith("_")
else config_prefix
)
self._context_schema = context_schema
self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list(
queued_declarative_operations,
)
Expand All @@ -593,6 +643,7 @@
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
context_schema=self._context_schema,
queued_declarative_operations=queued_declarative_operations,
)

Expand All @@ -614,15 +665,53 @@

def _model_params(self, config: Optional[RunnableConfig]) -> dict:
config = ensure_config(config)
model_params = {
k.removeprefix(self._config_prefix): v
for k, v in config.get("configurable", {}).items()
if k.startswith(self._config_prefix)
}
if self._configurable_fields != "any":
model_params = {
k: v for k, v in model_params.items() if k in self._configurable_fields
model_params = {}

# Extract parameters from configurable (legacy approach)
if self._configurable_fields:
configurable_params = {
k.removeprefix(self._config_prefix): v
for k, v in config.get("configurable", {}).items()
if k.startswith(self._config_prefix)
}
if self._configurable_fields != "any":
configurable_params = {
k: v for k, v in configurable_params.items() if k in self._configurable_fields

Check failure on line 679 in libs/langchain/langchain/chat_models/base.py

View workflow job for this annotation

GitHub Actions / lint (libs/langchain, 3.13) / Python 3.13

Ruff (E501)

langchain/chat_models/base.py:679:89: E501 Line too long (98 > 88)

Check failure on line 679 in libs/langchain/langchain/chat_models/base.py

View workflow job for this annotation

GitHub Actions / lint (libs/langchain, 3.9) / Python 3.9

Ruff (E501)

langchain/chat_models/base.py:679:89: E501 Line too long (98 > 88)
}
model_params.update(configurable_params)

# Extract parameters from context (new approach)
if self._context_schema and "context" in config:
context = config["context"]
if context:
# Extract model-related parameters from context
context_params = {}
if hasattr(context, "model"):
context_params["model"] = context.model
if hasattr(context, "model_provider"):
context_params["model_provider"] = context.model_provider
if hasattr(context, "temperature"):
context_params["temperature"] = context.temperature
if hasattr(context, "max_tokens"):
context_params["max_tokens"] = context.max_tokens
if hasattr(context, "timeout"):
context_params["timeout"] = context.timeout
if hasattr(context, "max_retries"):
context_params["max_retries"] = context.max_retries
if hasattr(context, "base_url"):
context_params["base_url"] = context.base_url

# If context is a dict, try to access as dict
if isinstance(context, dict):
for param in [
"model", "model_provider", "temperature", "max_tokens",
"timeout", "max_retries", "base_url"
]:
if param in context:
context_params[param] = context[param]

model_params.update(context_params)

return model_params

def with_config(
Expand Down Expand Up @@ -654,6 +743,7 @@
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
context_schema=self._context_schema,
queued_declarative_operations=queued_declarative_operations,
)

Expand Down
113 changes: 113 additions & 0 deletions libs/langchain/tests/unit_tests/chat_models/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,3 +288,116 @@ def test_configurable_with_default() -> None:
prompt = ChatPromptTemplate.from_messages([("system", "foo")])
chain = prompt | model_with_config
assert isinstance(chain, RunnableSequence)


@pytest.mark.requires("langchain_openai")
@mock.patch.dict(
os.environ,
{"OPENAI_API_KEY": "foo"},
clear=True,
)
def test_context_schema() -> None:
"""Test context_schema functionality with init_chat_model.

Verifies that:
- Context schema parameter is accepted
- Model parameters can be passed via context instead of configurable
- Context parameters override default parameters
- Both dict and object context formats work
"""
from typing import NamedTuple

# Define a context schema
class ModelContext(NamedTuple):
model: str
temperature: float

# Create configurable model with context_schema
model = init_chat_model(
context_schema=ModelContext,
model_provider="openai"
)

# Test that model is configurable
assert hasattr(model, '_context_schema')
assert model._context_schema == ModelContext

# Test that context parameters work with dict format
model_params = model._model_params({
"context": {"model": "gpt-4o", "temperature": 0.5}
})
assert model_params["model"] == "gpt-4o"
assert model_params["temperature"] == 0.5

# Test that context parameters work with object format
context_obj = ModelContext(model="gpt-3.5-turbo", temperature=0.7)
model_params = model._model_params({
"context": context_obj
})
assert model_params["model"] == "gpt-3.5-turbo"
assert model_params["temperature"] == 0.7


@pytest.mark.requires("langchain_openai")
@mock.patch.dict(
os.environ,
{"OPENAI_API_KEY": "foo"},
clear=True,
)
def test_context_schema_with_defaults() -> None:
"""Test context_schema with default parameters."""
from typing import NamedTuple

class ModelContext(NamedTuple):
model: str
temperature: float

# Create model with defaults and context_schema
model = init_chat_model(
model="gpt-4o",
temperature=0.2,
context_schema=ModelContext,
model_provider="openai"
)

# Test that defaults are preserved when no context
model_params = model._model_params({})
assert "model" not in model_params # No context provided

# Test that context overrides defaults
model_params = model._model_params({
"context": {"model": "gpt-3.5-turbo", "temperature": 0.8}
})
assert model_params["model"] == "gpt-3.5-turbo"
assert model_params["temperature"] == 0.8


@pytest.mark.requires("langchain_openai")
@mock.patch.dict(
os.environ,
{"OPENAI_API_KEY": "foo"},
clear=True,
)
def test_context_schema_with_configurable() -> None:
"""Test that context_schema works alongside configurable fields."""
from typing import NamedTuple

class ModelContext(NamedTuple):
model: str
temperature: float

# Create model with both configurable and context_schema
model = init_chat_model(
configurable_fields=["max_tokens"],
context_schema=ModelContext,
model_provider="openai"
)

# Test that both configurable and context work together
model_params = model._model_params({
"configurable": {"max_tokens": 100},
"context": {"model": "gpt-4o", "temperature": 0.5}
})
assert model_params["max_tokens"] == 100 # From configurable
assert model_params["model"] == "gpt-4o" # From context
assert model_params["temperature"] == 0.5 # From context
Loading