Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 136 additions & 0 deletions samples/python/scenarios/a2a/human-present/x402/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
#!/bin/bash

# A script to automate the execution of the x402 payment example.
# It starts all necessary servers and agents in the background,
# and then runs the client.

# Exit immediately if any command exits with a non-zero status.
set -e

# Default payment method
PAYMENT_METHOD="x402"

# Parse command-line arguments
while [[ "$#" -gt 0 ]]; do
case $1 in
--payment-method) PAYMENT_METHOD="${2:?--payment-method requires an argument}"; shift ;;
*) echo "Unknown parameter passed: $1"; exit 1 ;;
esac
shift
done

export PAYMENT_METHOD

# The directory containing the agents.
AGENTS_DIR="samples/python/src/roles"
# A directory to store logs.
LOG_DIR=".logs"

if [ ! -d "$AGENTS_DIR" ]; then
echo "Error: Directory '$AGENTS_DIR' not found."
echo "Please run this script from the root of the repository."
exit 1
fi

if [ -f .env ]; then
set -a
source .env
set +a
fi

USE_VERTEXAI=$(printf "%s" "${GOOGLE_GENAI_USE_VERTEXAI}" | tr '[:upper:]' '[:lower:]')
if [ -z "${GOOGLE_API_KEY}" ] && [ "${USE_VERTEXAI}" != "true" ]; then
echo "Please set your GOOGLE_API_KEY environment variable before running."
echo "Alternatively, set GOOGLE_GENAI_USE_VERTEXAI=true to use Vertex AI with ADC."
exit 1
fi

# Set up and activate a virtual environment.
echo "Setting up the Python virtual environment..."

if [ ! -d ".venv" ]; then
uv venv
fi

# Detect the correct activation script path based on the operating system
case "$OSTYPE" in
msys* | cygwin*)
# Windows (Git Bash, MSYS2, or Cygwin)
source .venv/Scripts/activate
;;
*)
# Unix/Linux/macOS
source .venv/bin/activate
;;
esac
echo "Virtual environment activated."

echo "Installing project in editable mode..."
uv pip install -e .

# Create a directory for log files.
mkdir -p "$LOG_DIR"

# This function is called automatically when the script exits (for any reason)
# to ensure all background processes are terminated.
cleanup() {
echo ""
echo "Shutting down background processes..."
if [ ${#pids[@]} -ne 0 ]; then
# Kill all processes using their PIDs stored in the array.
# The 2>/dev/null suppresses "Terminated" messages or errors if a process is already gone.
kill "${pids[@]}" 2>/dev/null
wait "${pids[@]}" 2>/dev/null
fi
echo "Cleanup complete."
}

# Trap the EXIT signal to call the cleanup function. This ensures cleanup
# runs whether the script finishes successfully, fails, or is interrupted.
trap cleanup EXIT

# Explicitly sync to ensures the virtual environment is up to date.
echo "Syncing virtual environment with uv sync..."
if uv sync --package ap2-samples; then
echo "Virtual environment synced successfully."
else
echo "Error: uv sync failed. Aborting deployment."
exit 1
fi

# Clear old logs.
echo "Clearing the logs directory..."
rm -rf "$LOG_DIR"
mkdir -p "$LOG_DIR"

# Start all the remote agents & servers.
pids=()

echo ""
echo "Starting remote servers and agents as background processes..."

# uv sync is explicitly run before starting any agents.
# Prevent servers starting in parallel from colliding by trying to sync again.
UV_RUN_CMD=("uv" "run" "--no-sync")

if [ -f ".env" ]; then
UV_RUN_CMD+=("--env-file" ".env")
fi

echo "-> Starting the Merchant Agent (port:8001 log:$LOG_DIR/merchant_agent.log)..."
"${UV_RUN_CMD[@]}" --package ap2-samples python -m roles.merchant_agent >"$LOG_DIR/merchant_agent.log" 2>&1 &
pids+=($!)

echo "-> Starting the Credentials Provider (port:8002 log:$LOG_DIR/credentials_provider_agent.log)..."
"${UV_RUN_CMD[@]}" --package ap2-samples python -m roles.credentials_provider_agent >"$LOG_DIR/credentials_provider_agent.log" 2>&1 &
pids+=($!)

echo "-> Starting the Card Processor Agent (port:8003 log:$LOG_DIR/mpp_agent.log)..."
"${UV_RUN_CMD[@]}" --package ap2-samples python -m roles.merchant_payment_processor_agent >"$LOG_DIR/mpp_agent.log" 2>&1 &
pids+=($!)

echo ""
echo "All remote servers are starting."

echo "Starting the Shopping Agent..."
"${UV_RUN_CMD[@]}" --package ap2-samples adk web --host 0.0.0.0 "$AGENTS_DIR"
20 changes: 20 additions & 0 deletions samples/python/src/common/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Central configuration for LLM model settings."""

import os

DEFAULT_MODEL = "gemini-2.5-flash"
MODEL = os.environ.get("MODEL", DEFAULT_MODEL)
3 changes: 2 additions & 1 deletion samples/python/src/common/function_call_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from google import genai
from google.genai import types

from common.config import MODEL

DataPartContent = dict[str, Any]
Tool = Callable[[list[DataPartContent], TaskUpdater, Task | None], Any]
Expand Down Expand Up @@ -81,7 +82,7 @@ def determine_tool_to_use(self, prompt: str) -> str:
"""

response = self._client.models.generate_content(
model="gemini-2.5-flash",
model=MODEL,
contents=prompt,
config=self._config,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
from ap2.types.payment_request import PaymentOptions
from ap2.types.payment_request import PaymentRequest
from common import message_utils
from common.config import MODEL
from common.system_utils import DEBUG_MODE_INSTRUCTIONS


Expand All @@ -68,7 +69,7 @@ async def find_items_workflow(
""" % DEBUG_MODE_INSTRUCTIONS

llm_response = llm_client.models.generate_content(
model="gemini-2.5-flash",
model=MODEL,
contents=prompt,
config={
"response_mime_type": "application/json",
Expand Down
3 changes: 2 additions & 1 deletion samples/python/src/roles/shopping_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,14 @@
from .subagents.payment_method_collector.agent import payment_method_collector
from .subagents.shipping_address_collector.agent import shipping_address_collector
from .subagents.shopper.agent import shopper
from common.config import MODEL
from common.retrying_llm_agent import RetryingLlmAgent
from common.system_utils import DEBUG_MODE_INSTRUCTIONS


root_agent = RetryingLlmAgent(
max_retries=5,
model="gemini-2.5-flash",
model=MODEL,
name="root_agent",
instruction="""
You are a shopping agent responsible for helping users find and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,13 @@
"""

from . import tools
from common.config import MODEL
from common.retrying_llm_agent import RetryingLlmAgent
from common.system_utils import DEBUG_MODE_INSTRUCTIONS


payment_method_collector = RetryingLlmAgent(
model="gemini-2.5-flash",
model=MODEL,
name="payment_method_collector",
max_retries=5,
instruction="""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,12 @@
"""

from . import tools
from common.config import MODEL
from common.retrying_llm_agent import RetryingLlmAgent
from common.system_utils import DEBUG_MODE_INSTRUCTIONS

shipping_address_collector = RetryingLlmAgent(
model="gemini-2.5-flash",
model=MODEL,
name="shipping_address_collector",
max_retries=5,
instruction="""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,13 @@
"""

from . import tools
from common.config import MODEL
from common.retrying_llm_agent import RetryingLlmAgent
from common.system_utils import DEBUG_MODE_INSTRUCTIONS


shopper = RetryingLlmAgent(
model="gemini-2.5-flash",
model=MODEL,
name="shopper",
max_retries=5,
instruction="""
Expand Down
Loading