Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,35 @@ For either method, you can set the required credentials as environment variables
export GOOGLE_APPLICATION_CREDENTIALS='/path/to/your/service-account-key.json'
```

#### Option 3: [MiniMax](https://www.minimaxi.com/) (Alternative LLM provider)

The samples also support [MiniMax](https://www.minimaxi.com/) as an
alternative LLM backend. MiniMax offers powerful models such as MiniMax-M2.7
with up to 1M context window, accessible via an OpenAI-compatible API.

1. Obtain a MiniMax API key from [MiniMax Platform](https://platform.minimaxi.com/).
2. Set the required environment variables.

- **As environment variables:**

```sh
export LLM_PROVIDER='minimax'
export MINIMAX_API_KEY='your_minimax_key'
```

- **In a `.env` file:**

```sh
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

low

For .env file examples, it's more accurate to use the env language specifier or no specifier at all, rather than sh. .env files have their own simple key-value syntax and are not shell scripts.

Suggested change
```sh
```env

LLM_PROVIDER='minimax'
MINIMAX_API_KEY='your_minimax_key'
```

You can optionally override the default model (MiniMax-M2.7):

```sh
export LLM_MODEL='MiniMax-M2.7-highspeed'
```

### How to Run a Scenario

To run a specific scenario, follow the instructions in its `README.md`. It will
Expand Down
1 change: 1 addition & 0 deletions samples/python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ dependencies = [
"google-adk",
"google-genai",
"httpx",
"openai",
"requests",
"ap2"
]
Expand Down
12 changes: 10 additions & 2 deletions samples/python/src/common/base_server_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,13 @@
from a2a.types import TextPart
from a2a.utils import message
from ap2.types.mandate import PAYMENT_MANDATE_DATA_KEY
from google import genai
from ap2.types.mandate import PaymentMandate
from common import message_utils
from common import watch_log
from common.a2a_extension_utils import EXTENSION_URI
from common.function_call_resolver import FunctionCallResolver
from common.llm_config import LLMProvider
from common.llm_config import get_provider
from common.validation import validate_payment_mandate_signature

DataPartContent = dict[str, Any]
Expand All @@ -68,7 +69,14 @@ def __init__(
self._supported_extension_uris = {ext.uri for ext in supported_extensions}
else:
self._supported_extension_uris = set()
self._client = genai.Client()

provider = get_provider()
if provider == LLMProvider.GOOGLE:
from google import genai
self._client = genai.Client()
else:
self._client = None

self._tools = tools
self._tool_resolver = FunctionCallResolver(
self._client, self._tools, system_prompt
Expand Down
71 changes: 51 additions & 20 deletions samples/python/src/common/function_call_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@

The FunctionCallResolver uses a LLM to determine which tool to
use based on the instructions provided.

Supports both Google GenAI and MiniMax (via OpenAI-compatible API) backends.
Set ``LLM_PROVIDER=minimax`` and ``MINIMAX_API_KEY`` to use MiniMax.
"""

import logging
Expand All @@ -26,6 +29,10 @@
from google import genai
from google.genai import types

from common.llm_config import LLMProvider
from common.llm_config import get_model
from common.llm_config import get_provider


DataPartContent = dict[str, Any]
Tool = Callable[[list[DataPartContent], TaskUpdater, Task | None], Any]
Expand All @@ -36,35 +43,42 @@ class FunctionCallResolver:

def __init__(
self,
llm_client: genai.Client,
llm_client: genai.Client | None,
tools: list[Tool],
instructions: str = "You are a helpful assistant.",
):
"""Initialization.

Args:
llm_client: The LLM client.
llm_client: The LLM client. May be ``None`` when using a non-Google
provider (e.g. MiniMax).
tools: The list of tools that a request can be resolved to.
instructions: The instructions to guide the LLM.
"""
self._provider = get_provider()
self._model = get_model()
self._tools = tools
self._instructions = instructions
self._client = llm_client
function_declarations = [
types.FunctionDeclaration(
name=tool.__name__, description=tool.__doc__
)
for tool in tools
]
self._config = types.GenerateContentConfig(
system_instruction=instructions,
tools=[types.Tool(function_declarations=function_declarations)],
automatic_function_calling=types.AutomaticFunctionCallingConfig(
disable=True
),
# Force the model to call 'any' function, instead of chatting.
tool_config=types.ToolConfig(
function_calling_config=types.FunctionCallingConfig(mode="ANY")
),
)

if self._provider == LLMProvider.GOOGLE:
function_declarations = [
types.FunctionDeclaration(
name=tool.__name__, description=tool.__doc__
)
for tool in tools
]
self._config = types.GenerateContentConfig(
system_instruction=instructions,
tools=[types.Tool(function_declarations=function_declarations)],
automatic_function_calling=types.AutomaticFunctionCallingConfig(
disable=True
),
# Force the model to call 'any' function, instead of chatting.
tool_config=types.ToolConfig(
function_calling_config=types.FunctionCallingConfig(mode="ANY")
),
)

def determine_tool_to_use(self, prompt: str) -> str:
"""Determines which tool to use based on a user's prompt.
Expand All @@ -79,9 +93,15 @@ def determine_tool_to_use(self, prompt: str) -> str:
The name of the tool function that the model has determined should be
called. If no suitable tool is found, it returns "Unknown".
"""
if self._provider == LLMProvider.MINIMAX:
return self._determine_tool_minimax(prompt)

return self._determine_tool_google(prompt)

def _determine_tool_google(self, prompt: str) -> str:
"""Resolve the tool using Google GenAI."""
response = self._client.models.generate_content(
model="gemini-2.5-flash",
model=self._model,
contents=prompt,
config=self._config,
)
Expand All @@ -98,3 +118,14 @@ def determine_tool_to_use(self, prompt: str) -> str:
return part.function_call.name

return "Unknown"

def _determine_tool_minimax(self, prompt: str) -> str:
"""Resolve the tool using MiniMax via OpenAI-compatible API."""
from common.minimax_client import minimax_resolve_function_call

return minimax_resolve_function_call(
model=self._model,
tools=self._tools,
system_prompt=self._instructions,
user_prompt=prompt,
)
67 changes: 67 additions & 0 deletions samples/python/src/common/llm_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Centralized LLM provider configuration.

Reads the LLM_PROVIDER and LLM_MODEL environment variables to determine which
LLM backend to use. Supported providers:

* ``google`` – Google GenAI / Gemini (default)
* ``minimax`` – MiniMax via OpenAI-compatible API
"""

import enum
import os


class LLMProvider(enum.Enum):
"""Supported LLM provider backends."""

GOOGLE = "google"
MINIMAX = "minimax"


# Default model names per provider.
_DEFAULT_MODELS: dict[LLMProvider, str] = {
LLMProvider.GOOGLE: "gemini-2.5-flash",
LLMProvider.MINIMAX: "MiniMax-M2.7",
}


def get_provider() -> LLMProvider:
"""Return the configured LLM provider.

Reads the ``LLM_PROVIDER`` environment variable (case-insensitive).
Falls back to ``LLMProvider.GOOGLE`` when unset.
"""
raw = os.environ.get("LLM_PROVIDER", "google").strip().lower()
try:
return LLMProvider(raw)
except ValueError:
raise ValueError(
f"Unsupported LLM_PROVIDER '{raw}'. "
f"Supported values: {[p.value for p in LLMProvider]}"
)


def get_model() -> str:
"""Return the configured model name.

Uses ``LLM_MODEL`` when set, otherwise falls back to the default model
for the active provider.
"""
explicit = os.environ.get("LLM_MODEL", "").strip()
if explicit:
return explicit
return _DEFAULT_MODELS[get_provider()]
Loading