From 9d15bfd95741b9de230847d5d225d389f13b8755 Mon Sep 17 00:00:00 2001 From: saheer Date: Sun, 21 Sep 2025 11:21:23 +0530 Subject: [PATCH 1/2] feat(chat_models): add context_schema support to init_chat_model Add support for configuring init_chat_model through context_schema, aligning with LangGraph v0.6+ recommendations for static runtime context. - Add context_schema parameter to init_chat_model function - Modify _ConfigurableModel to extract parameters from runtime context - Support both dict and object context formats - Maintain backward compatibility with existing configurable approach - Add comprehensive tests for new functionality - Update documentation with usage examples Fixes #32954 --- libs/langchain/langchain/chat_models/base.py | 104 ++++++++++++++-- .../tests/unit_tests/chat_models/test_base.py | 113 ++++++++++++++++++ 2 files changed, 208 insertions(+), 9 deletions(-) diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index d0f54b6a01368..940ab98988200 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -76,6 +76,7 @@ def init_chat_model( Union[Literal["any"], list[str], tuple[str, ...]] ] = None, config_prefix: Optional[str] = None, + context_schema: Optional[type] = None, **kwargs: Any, ) -> Union[BaseChatModel, _ConfigurableModel]: """Initialize a ChatModel in a single line using the model's name and provider. @@ -149,6 +150,10 @@ def init_chat_model( ``config["configurable"]["{config_prefix}_{param}"]`` keys. If ``'config_prefix'`` is an empty string then model will be configurable via ``config["configurable"]["{param}"]``. + context_schema: An optional schema type for runtime context. When provided, + model parameters can be passed via the context argument at runtime instead + of through config["configurable"]. This aligns with LangGraph v0.6+ + recommendations for static runtime context. temperature: Model temperature. max_tokens: Max output tokens. timeout: The maximum time (in seconds) to wait for a response from the model @@ -286,6 +291,40 @@ class GetPopulation(BaseModel): ) # Claude-3.5 sonnet response with tools + .. dropdown:: Use context_schema for model configuration + + You can use ``context_schema`` to configure models via runtime context instead + of the configurable pattern, which aligns with LangGraph v0.6+ recommendations. + + .. code-block:: python + + # pip install langchain langchain-openai + from langchain.chat_models import init_chat_model + from typing import NamedTuple + + class ModelContext(NamedTuple): + model: str + temperature: float + + # Create model with context_schema + configurable_model = init_chat_model( + context_schema=ModelContext, + model_provider="openai" + ) + + # Configure model via context instead of configurable + configurable_model.invoke( + "What's the weather like?", + context={"model": "gpt-4o", "temperature": 0.7} + ) + # GPT-4o response with temperature 0.7 + + configurable_model.invoke( + "What's the weather like?", + context=ModelContext(model="gpt-3.5-turbo", temperature=0.2) + ) + # GPT-3.5-turbo response with temperature 0.2 + .. versionadded:: 0.2.7 .. versionchanged:: 0.2.8 @@ -310,6 +349,12 @@ class GetPopulation(BaseModel): Support for Deepseek, IBM, Nvidia, and xAI models added. + .. versionchanged:: 0.3.20 + + Support for ``context_schema`` added. Model parameters can now be passed + via runtime context instead of config["configurable"], aligning with + LangGraph v0.6+ recommendations for static runtime context. + """ # noqa: E501 if not model and not configurable_fields: configurable_fields = ("model", "model_provider") @@ -322,7 +367,7 @@ class GetPopulation(BaseModel): stacklevel=2, ) - if not configurable_fields: + if not configurable_fields and not context_schema: return _init_chat_model_helper( cast("str", model), model_provider=model_provider, @@ -336,6 +381,7 @@ class GetPopulation(BaseModel): default_config=kwargs, config_prefix=config_prefix, configurable_fields=configurable_fields, + context_schema=context_schema, ) @@ -555,6 +601,7 @@ def __init__( default_config: Optional[dict] = None, configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = "any", config_prefix: str = "", + context_schema: Optional[type] = None, queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (), ) -> None: self._default_config: dict = default_config or {} @@ -568,6 +615,7 @@ def __init__( if config_prefix and not config_prefix.endswith("_") else config_prefix ) + self._context_schema = context_schema self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list( queued_declarative_operations, ) @@ -590,6 +638,7 @@ def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel: if isinstance(self._configurable_fields, list) else self._configurable_fields, config_prefix=self._config_prefix, + context_schema=self._context_schema, queued_declarative_operations=queued_declarative_operations, ) @@ -611,15 +660,51 @@ def _model(self, config: Optional[RunnableConfig] = None) -> Runnable: def _model_params(self, config: Optional[RunnableConfig]) -> dict: config = ensure_config(config) - model_params = { - k.removeprefix(self._config_prefix): v - for k, v in config.get("configurable", {}).items() - if k.startswith(self._config_prefix) - } - if self._configurable_fields != "any": - model_params = { - k: v for k, v in model_params.items() if k in self._configurable_fields + model_params = {} + + # Extract parameters from configurable (legacy approach) + if self._configurable_fields: + configurable_params = { + k.removeprefix(self._config_prefix): v + for k, v in config.get("configurable", {}).items() + if k.startswith(self._config_prefix) } + if self._configurable_fields != "any": + configurable_params = { + k: v for k, v in configurable_params.items() if k in self._configurable_fields + } + model_params.update(configurable_params) + + # Extract parameters from context (new approach) + if self._context_schema and "context" in config: + context = config["context"] + if context: + # Extract model-related parameters from context + context_params = {} + if hasattr(context, "model"): + context_params["model"] = context.model + if hasattr(context, "model_provider"): + context_params["model_provider"] = context.model_provider + if hasattr(context, "temperature"): + context_params["temperature"] = context.temperature + if hasattr(context, "max_tokens"): + context_params["max_tokens"] = context.max_tokens + if hasattr(context, "timeout"): + context_params["timeout"] = context.timeout + if hasattr(context, "max_retries"): + context_params["max_retries"] = context.max_retries + if hasattr(context, "base_url"): + context_params["base_url"] = context.base_url + + # If context is a dict, try to access as dict + if isinstance(context, dict): + for param in ["model", "model_provider", "temperature", "max_tokens", + "timeout", "max_retries", "base_url"]: + if param in context: + context_params[param] = context[param] + + model_params.update(context_params) + return model_params def with_config( @@ -651,6 +736,7 @@ def with_config( if isinstance(self._configurable_fields, list) else self._configurable_fields, config_prefix=self._config_prefix, + context_schema=self._context_schema, queued_declarative_operations=queued_declarative_operations, ) diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py index 611f251b8162c..4180e0cfd01d6 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_base.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py @@ -289,3 +289,116 @@ def test_configurable_with_default() -> None: prompt = ChatPromptTemplate.from_messages([("system", "foo")]) chain = prompt | model_with_config assert isinstance(chain, RunnableSequence) + + +@pytest.mark.requires("langchain_openai") +@mock.patch.dict( + os.environ, + {"OPENAI_API_KEY": "foo"}, + clear=True, +) +def test_context_schema() -> None: + """Test context_schema functionality with init_chat_model. + + Verifies that: + - Context schema parameter is accepted + - Model parameters can be passed via context instead of configurable + - Context parameters override default parameters + - Both dict and object context formats work + """ + from typing import NamedTuple + + # Define a context schema + class ModelContext(NamedTuple): + model: str + temperature: float + + # Create configurable model with context_schema + model = init_chat_model( + context_schema=ModelContext, + model_provider="openai" + ) + + # Test that model is configurable + assert hasattr(model, '_context_schema') + assert model._context_schema == ModelContext + + # Test that context parameters work with dict format + model_params = model._model_params({ + "context": {"model": "gpt-4o", "temperature": 0.5} + }) + assert model_params["model"] == "gpt-4o" + assert model_params["temperature"] == 0.5 + + # Test that context parameters work with object format + context_obj = ModelContext(model="gpt-3.5-turbo", temperature=0.7) + model_params = model._model_params({ + "context": context_obj + }) + assert model_params["model"] == "gpt-3.5-turbo" + assert model_params["temperature"] == 0.7 + + +@pytest.mark.requires("langchain_openai") +@mock.patch.dict( + os.environ, + {"OPENAI_API_KEY": "foo"}, + clear=True, +) +def test_context_schema_with_defaults() -> None: + """Test context_schema with default parameters.""" + from typing import NamedTuple + + class ModelContext(NamedTuple): + model: str + temperature: float + + # Create model with defaults and context_schema + model = init_chat_model( + model="gpt-4o", + temperature=0.2, + context_schema=ModelContext, + model_provider="openai" + ) + + # Test that defaults are preserved when no context + model_params = model._model_params({}) + assert "model" not in model_params # No context provided + + # Test that context overrides defaults + model_params = model._model_params({ + "context": {"model": "gpt-3.5-turbo", "temperature": 0.8} + }) + assert model_params["model"] == "gpt-3.5-turbo" + assert model_params["temperature"] == 0.8 + + +@pytest.mark.requires("langchain_openai") +@mock.patch.dict( + os.environ, + {"OPENAI_API_KEY": "foo"}, + clear=True, +) +def test_context_schema_with_configurable() -> None: + """Test that context_schema works alongside configurable fields.""" + from typing import NamedTuple + + class ModelContext(NamedTuple): + model: str + temperature: float + + # Create model with both configurable and context_schema + model = init_chat_model( + configurable_fields=["max_tokens"], + context_schema=ModelContext, + model_provider="openai" + ) + + # Test that both configurable and context work together + model_params = model._model_params({ + "configurable": {"max_tokens": 100}, + "context": {"model": "gpt-4o", "temperature": 0.5} + }) + assert model_params["max_tokens"] == 100 # From configurable + assert model_params["model"] == "gpt-4o" # From context + assert model_params["temperature"] == 0.5 # From context From 8f8fba33e306e16bd2a1c5a84a56d2ae553155e7 Mon Sep 17 00:00:00 2001 From: saheer Date: Sun, 21 Sep 2025 11:34:18 +0530 Subject: [PATCH 2/2] fix: address line length linting issues - Break long lines in docstrings and parameter lists - Shorten inline comments for better readability - Maintain backward compatibility logic consistency --- libs/langchain/langchain/chat_models/base.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index 940ab98988200..e6f39832df236 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -83,11 +83,13 @@ def init_chat_model( .. note:: Must have the integration package corresponding to the model provider installed. - You should look at the `provider integration's API reference `__ + You should look at the `provider integration's API reference + `__ to see what parameters are supported by the model. Args: - model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can + model: The name of the model, e.g. ``'o3-mini'``, + ``'claude-3-5-sonnet-latest'``. You can also specify model and model provider in a single argument using ``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``. model_provider: The model provider if not specified as part of model arg (see @@ -226,7 +228,7 @@ def init_chat_model( configurable_model_with_default = init_chat_model( "openai:gpt-4o", - configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime. + configurable_fields="any", # configure params at runtime config_prefix="foo", temperature=0, ) @@ -698,8 +700,10 @@ def _model_params(self, config: Optional[RunnableConfig]) -> dict: # If context is a dict, try to access as dict if isinstance(context, dict): - for param in ["model", "model_provider", "temperature", "max_tokens", - "timeout", "max_retries", "base_url"]: + for param in [ + "model", "model_provider", "temperature", "max_tokens", + "timeout", "max_retries", "base_url" + ]: if param in context: context_params[param] = context[param]