Skip to content

Commit bab0dfd

Browse files
authored
AssistantAgent to support Workbench (#6393)
Finishing up the work on workbench. ```python import asyncio from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.tools.mcp import StdioServerParams, McpWorkbench async def main() -> None: params = StdioServerParams( command="uvx", args=["mcp-server-fetch"], read_timeout_seconds=60, ) # You can also use `start()` and `stop()` to manage the session. async with McpWorkbench(server_params=params) as workbench: model_client = OpenAIChatCompletionClient(model="gpt-4.1-nano") assistant = AssistantAgent( name="Assistant", model_client=model_client, workbench=workbench, reflect_on_tool_use=True, ) await Console(assistant.run_stream(task="Go to https://github.com/microsoft/autogen and tell me what you see.")) asyncio.run(main()) ```
1 parent 0c9fd64 commit bab0dfd

File tree

3 files changed

+227
-37
lines changed

3 files changed

+227
-37
lines changed

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

+103-35
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
ModelFamily,
3333
SystemMessage,
3434
)
35-
from autogen_core.tools import BaseTool, FunctionTool
35+
from autogen_core.tools import BaseTool, FunctionTool, StaticWorkbench, Workbench
3636
from pydantic import BaseModel
3737
from typing_extensions import Self
3838

@@ -66,6 +66,7 @@ class AssistantAgentConfig(BaseModel):
6666
name: str
6767
model_client: ComponentModel
6868
tools: List[ComponentModel] | None
69+
workbench: ComponentModel | None = None
6970
handoffs: List[HandoffBase | str] | None = None
7071
model_context: ComponentModel | None = None
7172
memory: List[ComponentModel] | None = None
@@ -168,6 +169,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
168169
name (str): The name of the agent.
169170
model_client (ChatCompletionClient): The model client to use for inference.
170171
tools (List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent.
172+
workbench (Workbench | None, optional): The workbench to use for the agent.
173+
Tools cannot be used when workbench is set and vice versa.
171174
handoffs (List[HandoffBase | str] | None, optional): The handoff configurations for the agent,
172175
allowing it to transfer to other agents by responding with a :class:`HandoffMessage`.
173176
The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`.
@@ -334,7 +337,45 @@ async def main() -> None:
334337
335338
asyncio.run(main())
336339
337-
**Example 4: agent with structured output and tool**
340+
**Example 4: agent with Model-Context Protocol (MCP) workbench**
341+
342+
The following example demonstrates how to create an assistant agent with
343+
a model client and an :class:`~autogen_ext.tools.mcp.McpWorkbench` for
344+
interacting with a Model-Context Protocol (MCP) server.
345+
346+
.. code-block:: python
347+
348+
import asyncio
349+
from autogen_agentchat.agents import AssistantAgent
350+
from autogen_agentchat.ui import Console
351+
from autogen_ext.models.openai import OpenAIChatCompletionClient
352+
from autogen_ext.tools.mcp import StdioServerParams, McpWorkbench
353+
354+
355+
async def main() -> None:
356+
params = StdioServerParams(
357+
command="uvx",
358+
args=["mcp-server-fetch"],
359+
read_timeout_seconds=60,
360+
)
361+
362+
# You can also use `start()` and `stop()` to manage the session.
363+
async with McpWorkbench(server_params=params) as workbench:
364+
model_client = OpenAIChatCompletionClient(model="gpt-4.1-nano")
365+
assistant = AssistantAgent(
366+
name="Assistant",
367+
model_client=model_client,
368+
workbench=workbench,
369+
reflect_on_tool_use=True,
370+
)
371+
await Console(
372+
assistant.run_stream(task="Go to https://github.com/microsoft/autogen and tell me what you see.")
373+
)
374+
375+
376+
asyncio.run(main())
377+
378+
**Example 5: agent with structured output and tool**
338379
339380
The following example demonstrates how to create an assistant agent with
340381
a model client configured to use structured output and a tool.
@@ -404,7 +445,7 @@ async def main() -> None:
404445
---------- assistant ----------
405446
{"thoughts":"The user expresses a clear positive emotion by stating they are happy today, suggesting an upbeat mood.","response":"happy"}
406447
407-
**Example 5: agent with bounded model context**
448+
**Example 6: agent with bounded model context**
408449
409450
The following example shows how to use a
410451
:class:`~autogen_core.model_context.BufferedChatCompletionContext`
@@ -465,7 +506,7 @@ async def main() -> None:
465506
That's great! Blue is often associated with calmness and serenity. Do you have a specific shade of blue that you like, or any particular reason why it's your favorite?
466507
No, you didn't ask a question. I apologize for any misunderstanding. If you have something specific you'd like to discuss or ask, feel free to let me know!
467508
468-
**Example 6: agent with memory**
509+
**Example 7: agent with memory**
469510
470511
The following example shows how to use a list-based memory with the assistant agent.
471512
The memory is preloaded with some initial content.
@@ -525,7 +566,7 @@ async def main() -> None:
525566
526567
Serve it with a side salad or some garlic bread to complete the meal! Enjoy your dinner!
527568
528-
**Example 7: agent with `o1-mini`**
569+
**Example 8: agent with `o1-mini`**
529570
530571
The following example shows how to use `o1-mini` model with the assistant agent.
531572
@@ -561,7 +602,7 @@ async def main() -> None:
561602
See `o1 beta limitations <https://platform.openai.com/docs/guides/reasoning#beta-limitations>`_ for more details.
562603
563604
564-
**Example 8: agent using reasoning model with custom model context.**
605+
**Example 9: agent using reasoning model with custom model context.**
565606
566607
The following example shows how to use a reasoning model (DeepSeek R1) with the assistant agent.
567608
The model context is used to filter out the thought field from the assistant message.
@@ -628,6 +669,7 @@ def __init__(
628669
model_client: ChatCompletionClient,
629670
*,
630671
tools: List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
672+
workbench: Workbench | None = None,
631673
handoffs: List[HandoffBase | str] | None = None,
632674
model_context: ChatCompletionContext | None = None,
633675
description: str = "An agent that provides assistance with ability to use tools.",
@@ -711,6 +753,13 @@ def __init__(
711753
f"Handoff names: {handoff_tool_names}; tool names: {tool_names}"
712754
)
713755

756+
if workbench is not None:
757+
if self._tools:
758+
raise ValueError("Tools cannot be used with a workbench.")
759+
self._workbench = workbench
760+
else:
761+
self._workbench = StaticWorkbench(self._tools)
762+
714763
if model_context is not None:
715764
self._model_context = model_context
716765
else:
@@ -774,7 +823,7 @@ async def on_messages_stream(
774823
model_context = self._model_context
775824
memory = self._memory
776825
system_messages = self._system_messages
777-
tools = self._tools
826+
workbench = self._workbench
778827
handoff_tools = self._handoff_tools
779828
handoffs = self._handoffs
780829
model_client = self._model_client
@@ -807,7 +856,7 @@ async def on_messages_stream(
807856
model_client_stream=model_client_stream,
808857
system_messages=system_messages,
809858
model_context=model_context,
810-
tools=tools,
859+
workbench=workbench,
811860
handoff_tools=handoff_tools,
812861
agent_name=agent_name,
813862
cancellation_token=cancellation_token,
@@ -844,7 +893,7 @@ async def on_messages_stream(
844893
agent_name=agent_name,
845894
system_messages=system_messages,
846895
model_context=model_context,
847-
tools=tools,
896+
workbench=workbench,
848897
handoff_tools=handoff_tools,
849898
handoffs=handoffs,
850899
model_client=model_client,
@@ -898,7 +947,7 @@ async def _call_llm(
898947
model_client_stream: bool,
899948
system_messages: List[SystemMessage],
900949
model_context: ChatCompletionContext,
901-
tools: List[BaseTool[Any, Any]],
950+
workbench: Workbench,
902951
handoff_tools: List[BaseTool[Any, Any]],
903952
agent_name: str,
904953
cancellation_token: CancellationToken,
@@ -910,13 +959,13 @@ async def _call_llm(
910959
all_messages = await model_context.get_messages()
911960
llm_messages = cls._get_compatible_context(model_client=model_client, messages=system_messages + all_messages)
912961

913-
all_tools = tools + handoff_tools
962+
tools = (await workbench.list_tools()) + handoff_tools
914963

915964
if model_client_stream:
916965
model_result: Optional[CreateResult] = None
917966
async for chunk in model_client.create_stream(
918967
llm_messages,
919-
tools=all_tools,
968+
tools=tools,
920969
json_output=output_content_type,
921970
cancellation_token=cancellation_token,
922971
):
@@ -932,7 +981,7 @@ async def _call_llm(
932981
else:
933982
model_result = await model_client.create(
934983
llm_messages,
935-
tools=all_tools,
984+
tools=tools,
936985
cancellation_token=cancellation_token,
937986
json_output=output_content_type,
938987
)
@@ -947,7 +996,7 @@ async def _process_model_result(
947996
agent_name: str,
948997
system_messages: List[SystemMessage],
949998
model_context: ChatCompletionContext,
950-
tools: List[BaseTool[Any, Any]],
999+
workbench: Workbench,
9511000
handoff_tools: List[BaseTool[Any, Any]],
9521001
handoffs: Dict[str, HandoffBase],
9531002
model_client: ChatCompletionClient,
@@ -1006,7 +1055,7 @@ async def _process_model_result(
10061055
*[
10071056
cls._execute_tool_call(
10081057
tool_call=call,
1009-
tools=tools,
1058+
workbench=workbench,
10101059
handoff_tools=handoff_tools,
10111060
agent_name=agent_name,
10121061
cancellation_token=cancellation_token,
@@ -1238,32 +1287,16 @@ def _summarize_tool_use(
12381287
@staticmethod
12391288
async def _execute_tool_call(
12401289
tool_call: FunctionCall,
1241-
tools: List[BaseTool[Any, Any]],
1290+
workbench: Workbench,
12421291
handoff_tools: List[BaseTool[Any, Any]],
12431292
agent_name: str,
12441293
cancellation_token: CancellationToken,
12451294
) -> Tuple[FunctionCall, FunctionExecutionResult]:
12461295
"""Execute a single tool call and return the result."""
1296+
# Load the arguments from the tool call.
12471297
try:
1248-
all_tools = tools + handoff_tools
1249-
if not all_tools:
1250-
raise ValueError("No tools are available.")
1251-
tool = next((t for t in all_tools if t.name == tool_call.name), None)
1252-
if tool is None:
1253-
raise ValueError(f"The tool '{tool_call.name}' is not available.")
1254-
arguments: Dict[str, Any] = json.loads(tool_call.arguments) if tool_call.arguments else {}
1255-
result = await tool.run_json(arguments, cancellation_token)
1256-
result_as_str = tool.return_value_as_string(result)
1257-
return (
1258-
tool_call,
1259-
FunctionExecutionResult(
1260-
content=result_as_str,
1261-
call_id=tool_call.id,
1262-
is_error=False,
1263-
name=tool_call.name,
1264-
),
1265-
)
1266-
except Exception as e:
1298+
arguments = json.loads(tool_call.arguments)
1299+
except json.JSONDecodeError as e:
12671300
return (
12681301
tool_call,
12691302
FunctionExecutionResult(
@@ -1274,6 +1307,39 @@ async def _execute_tool_call(
12741307
),
12751308
)
12761309

1310+
# Check if the tool call is a handoff.
1311+
# TODO: consider creating a combined workbench to handle both handoff and normal tools.
1312+
for handoff_tool in handoff_tools:
1313+
if tool_call.name == handoff_tool.name:
1314+
# Run handoff tool call.
1315+
result = await handoff_tool.run_json(arguments, cancellation_token)
1316+
result_as_str = handoff_tool.return_value_as_string(result)
1317+
return (
1318+
tool_call,
1319+
FunctionExecutionResult(
1320+
content=result_as_str,
1321+
call_id=tool_call.id,
1322+
is_error=False,
1323+
name=tool_call.name,
1324+
),
1325+
)
1326+
1327+
# Handle normal tool call using workbench.
1328+
result = await workbench.call_tool(
1329+
name=tool_call.name,
1330+
arguments=arguments,
1331+
cancellation_token=cancellation_token,
1332+
)
1333+
return (
1334+
tool_call,
1335+
FunctionExecutionResult(
1336+
content=result.to_text(),
1337+
call_id=tool_call.id,
1338+
is_error=result.is_error,
1339+
name=tool_call.name,
1340+
),
1341+
)
1342+
12771343
async def on_reset(self, cancellation_token: CancellationToken) -> None:
12781344
"""Reset the assistant agent to its initialization state."""
12791345
await self._model_context.clear()
@@ -1304,6 +1370,7 @@ def _to_config(self) -> AssistantAgentConfig:
13041370
name=self.name,
13051371
model_client=self._model_client.dump_component(),
13061372
tools=[tool.dump_component() for tool in self._tools],
1373+
workbench=self._workbench.dump_component() if self._workbench else None,
13071374
handoffs=list(self._handoffs.values()) if self._handoffs else None,
13081375
model_context=self._model_context.dump_component(),
13091376
memory=[memory.dump_component() for memory in self._memory] if self._memory else None,
@@ -1336,6 +1403,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self:
13361403
name=config.name,
13371404
model_client=ChatCompletionClient.load_component(config.model_client),
13381405
tools=[BaseTool.load_component(tool) for tool in config.tools] if config.tools else None,
1406+
workbench=Workbench.load_component(config.workbench) if config.workbench else None,
13391407
handoffs=config.handoffs,
13401408
model_context=ChatCompletionContext.load_component(config.model_context) if config.model_context else None,
13411409
memory=[Memory.load_component(memory) for memory in config.memory] if config.memory else None,

0 commit comments

Comments
 (0)