32
32
ModelFamily ,
33
33
SystemMessage ,
34
34
)
35
- from autogen_core .tools import BaseTool , FunctionTool
35
+ from autogen_core .tools import BaseTool , FunctionTool , StaticWorkbench , Workbench
36
36
from pydantic import BaseModel
37
37
from typing_extensions import Self
38
38
@@ -66,6 +66,7 @@ class AssistantAgentConfig(BaseModel):
66
66
name : str
67
67
model_client : ComponentModel
68
68
tools : List [ComponentModel ] | None
69
+ workbench : ComponentModel | None = None
69
70
handoffs : List [HandoffBase | str ] | None = None
70
71
model_context : ComponentModel | None = None
71
72
memory : List [ComponentModel ] | None = None
@@ -168,6 +169,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
168
169
name (str): The name of the agent.
169
170
model_client (ChatCompletionClient): The model client to use for inference.
170
171
tools (List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent.
172
+ workbench (Workbench | None, optional): The workbench to use for the agent.
173
+ Tools cannot be used when workbench is set and vice versa.
171
174
handoffs (List[HandoffBase | str] | None, optional): The handoff configurations for the agent,
172
175
allowing it to transfer to other agents by responding with a :class:`HandoffMessage`.
173
176
The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`.
@@ -334,7 +337,45 @@ async def main() -> None:
334
337
335
338
asyncio.run(main())
336
339
337
- **Example 4: agent with structured output and tool**
340
+ **Example 4: agent with Model-Context Protocol (MCP) workbench**
341
+
342
+ The following example demonstrates how to create an assistant agent with
343
+ a model client and an :class:`~autogen_ext.tools.mcp.McpWorkbench` for
344
+ interacting with a Model-Context Protocol (MCP) server.
345
+
346
+ .. code-block:: python
347
+
348
+ import asyncio
349
+ from autogen_agentchat.agents import AssistantAgent
350
+ from autogen_agentchat.ui import Console
351
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
352
+ from autogen_ext.tools.mcp import StdioServerParams, McpWorkbench
353
+
354
+
355
+ async def main() -> None:
356
+ params = StdioServerParams(
357
+ command="uvx",
358
+ args=["mcp-server-fetch"],
359
+ read_timeout_seconds=60,
360
+ )
361
+
362
+ # You can also use `start()` and `stop()` to manage the session.
363
+ async with McpWorkbench(server_params=params) as workbench:
364
+ model_client = OpenAIChatCompletionClient(model="gpt-4.1-nano")
365
+ assistant = AssistantAgent(
366
+ name="Assistant",
367
+ model_client=model_client,
368
+ workbench=workbench,
369
+ reflect_on_tool_use=True,
370
+ )
371
+ await Console(
372
+ assistant.run_stream(task="Go to https://github.com/microsoft/autogen and tell me what you see.")
373
+ )
374
+
375
+
376
+ asyncio.run(main())
377
+
378
+ **Example 5: agent with structured output and tool**
338
379
339
380
The following example demonstrates how to create an assistant agent with
340
381
a model client configured to use structured output and a tool.
@@ -404,7 +445,7 @@ async def main() -> None:
404
445
---------- assistant ----------
405
446
{"thoughts":"The user expresses a clear positive emotion by stating they are happy today, suggesting an upbeat mood.","response":"happy"}
406
447
407
- **Example 5 : agent with bounded model context**
448
+ **Example 6 : agent with bounded model context**
408
449
409
450
The following example shows how to use a
410
451
:class:`~autogen_core.model_context.BufferedChatCompletionContext`
@@ -465,7 +506,7 @@ async def main() -> None:
465
506
That's great! Blue is often associated with calmness and serenity. Do you have a specific shade of blue that you like, or any particular reason why it's your favorite?
466
507
No, you didn't ask a question. I apologize for any misunderstanding. If you have something specific you'd like to discuss or ask, feel free to let me know!
467
508
468
- **Example 6 : agent with memory**
509
+ **Example 7 : agent with memory**
469
510
470
511
The following example shows how to use a list-based memory with the assistant agent.
471
512
The memory is preloaded with some initial content.
@@ -525,7 +566,7 @@ async def main() -> None:
525
566
526
567
Serve it with a side salad or some garlic bread to complete the meal! Enjoy your dinner!
527
568
528
- **Example 7 : agent with `o1-mini`**
569
+ **Example 8 : agent with `o1-mini`**
529
570
530
571
The following example shows how to use `o1-mini` model with the assistant agent.
531
572
@@ -561,7 +602,7 @@ async def main() -> None:
561
602
See `o1 beta limitations <https://platform.openai.com/docs/guides/reasoning#beta-limitations>`_ for more details.
562
603
563
604
564
- **Example 8 : agent using reasoning model with custom model context.**
605
+ **Example 9 : agent using reasoning model with custom model context.**
565
606
566
607
The following example shows how to use a reasoning model (DeepSeek R1) with the assistant agent.
567
608
The model context is used to filter out the thought field from the assistant message.
@@ -628,6 +669,7 @@ def __init__(
628
669
model_client : ChatCompletionClient ,
629
670
* ,
630
671
tools : List [BaseTool [Any , Any ] | Callable [..., Any ] | Callable [..., Awaitable [Any ]]] | None = None ,
672
+ workbench : Workbench | None = None ,
631
673
handoffs : List [HandoffBase | str ] | None = None ,
632
674
model_context : ChatCompletionContext | None = None ,
633
675
description : str = "An agent that provides assistance with ability to use tools." ,
@@ -711,6 +753,13 @@ def __init__(
711
753
f"Handoff names: { handoff_tool_names } ; tool names: { tool_names } "
712
754
)
713
755
756
+ if workbench is not None :
757
+ if self ._tools :
758
+ raise ValueError ("Tools cannot be used with a workbench." )
759
+ self ._workbench = workbench
760
+ else :
761
+ self ._workbench = StaticWorkbench (self ._tools )
762
+
714
763
if model_context is not None :
715
764
self ._model_context = model_context
716
765
else :
@@ -774,7 +823,7 @@ async def on_messages_stream(
774
823
model_context = self ._model_context
775
824
memory = self ._memory
776
825
system_messages = self ._system_messages
777
- tools = self ._tools
826
+ workbench = self ._workbench
778
827
handoff_tools = self ._handoff_tools
779
828
handoffs = self ._handoffs
780
829
model_client = self ._model_client
@@ -807,7 +856,7 @@ async def on_messages_stream(
807
856
model_client_stream = model_client_stream ,
808
857
system_messages = system_messages ,
809
858
model_context = model_context ,
810
- tools = tools ,
859
+ workbench = workbench ,
811
860
handoff_tools = handoff_tools ,
812
861
agent_name = agent_name ,
813
862
cancellation_token = cancellation_token ,
@@ -844,7 +893,7 @@ async def on_messages_stream(
844
893
agent_name = agent_name ,
845
894
system_messages = system_messages ,
846
895
model_context = model_context ,
847
- tools = tools ,
896
+ workbench = workbench ,
848
897
handoff_tools = handoff_tools ,
849
898
handoffs = handoffs ,
850
899
model_client = model_client ,
@@ -898,7 +947,7 @@ async def _call_llm(
898
947
model_client_stream : bool ,
899
948
system_messages : List [SystemMessage ],
900
949
model_context : ChatCompletionContext ,
901
- tools : List [ BaseTool [ Any , Any ]] ,
950
+ workbench : Workbench ,
902
951
handoff_tools : List [BaseTool [Any , Any ]],
903
952
agent_name : str ,
904
953
cancellation_token : CancellationToken ,
@@ -910,13 +959,13 @@ async def _call_llm(
910
959
all_messages = await model_context .get_messages ()
911
960
llm_messages = cls ._get_compatible_context (model_client = model_client , messages = system_messages + all_messages )
912
961
913
- all_tools = tools + handoff_tools
962
+ tools = ( await workbench . list_tools ()) + handoff_tools
914
963
915
964
if model_client_stream :
916
965
model_result : Optional [CreateResult ] = None
917
966
async for chunk in model_client .create_stream (
918
967
llm_messages ,
919
- tools = all_tools ,
968
+ tools = tools ,
920
969
json_output = output_content_type ,
921
970
cancellation_token = cancellation_token ,
922
971
):
@@ -932,7 +981,7 @@ async def _call_llm(
932
981
else :
933
982
model_result = await model_client .create (
934
983
llm_messages ,
935
- tools = all_tools ,
984
+ tools = tools ,
936
985
cancellation_token = cancellation_token ,
937
986
json_output = output_content_type ,
938
987
)
@@ -947,7 +996,7 @@ async def _process_model_result(
947
996
agent_name : str ,
948
997
system_messages : List [SystemMessage ],
949
998
model_context : ChatCompletionContext ,
950
- tools : List [ BaseTool [ Any , Any ]] ,
999
+ workbench : Workbench ,
951
1000
handoff_tools : List [BaseTool [Any , Any ]],
952
1001
handoffs : Dict [str , HandoffBase ],
953
1002
model_client : ChatCompletionClient ,
@@ -1006,7 +1055,7 @@ async def _process_model_result(
1006
1055
* [
1007
1056
cls ._execute_tool_call (
1008
1057
tool_call = call ,
1009
- tools = tools ,
1058
+ workbench = workbench ,
1010
1059
handoff_tools = handoff_tools ,
1011
1060
agent_name = agent_name ,
1012
1061
cancellation_token = cancellation_token ,
@@ -1238,32 +1287,16 @@ def _summarize_tool_use(
1238
1287
@staticmethod
1239
1288
async def _execute_tool_call (
1240
1289
tool_call : FunctionCall ,
1241
- tools : List [ BaseTool [ Any , Any ]] ,
1290
+ workbench : Workbench ,
1242
1291
handoff_tools : List [BaseTool [Any , Any ]],
1243
1292
agent_name : str ,
1244
1293
cancellation_token : CancellationToken ,
1245
1294
) -> Tuple [FunctionCall , FunctionExecutionResult ]:
1246
1295
"""Execute a single tool call and return the result."""
1296
+ # Load the arguments from the tool call.
1247
1297
try :
1248
- all_tools = tools + handoff_tools
1249
- if not all_tools :
1250
- raise ValueError ("No tools are available." )
1251
- tool = next ((t for t in all_tools if t .name == tool_call .name ), None )
1252
- if tool is None :
1253
- raise ValueError (f"The tool '{ tool_call .name } ' is not available." )
1254
- arguments : Dict [str , Any ] = json .loads (tool_call .arguments ) if tool_call .arguments else {}
1255
- result = await tool .run_json (arguments , cancellation_token )
1256
- result_as_str = tool .return_value_as_string (result )
1257
- return (
1258
- tool_call ,
1259
- FunctionExecutionResult (
1260
- content = result_as_str ,
1261
- call_id = tool_call .id ,
1262
- is_error = False ,
1263
- name = tool_call .name ,
1264
- ),
1265
- )
1266
- except Exception as e :
1298
+ arguments = json .loads (tool_call .arguments )
1299
+ except json .JSONDecodeError as e :
1267
1300
return (
1268
1301
tool_call ,
1269
1302
FunctionExecutionResult (
@@ -1274,6 +1307,39 @@ async def _execute_tool_call(
1274
1307
),
1275
1308
)
1276
1309
1310
+ # Check if the tool call is a handoff.
1311
+ # TODO: consider creating a combined workbench to handle both handoff and normal tools.
1312
+ for handoff_tool in handoff_tools :
1313
+ if tool_call .name == handoff_tool .name :
1314
+ # Run handoff tool call.
1315
+ result = await handoff_tool .run_json (arguments , cancellation_token )
1316
+ result_as_str = handoff_tool .return_value_as_string (result )
1317
+ return (
1318
+ tool_call ,
1319
+ FunctionExecutionResult (
1320
+ content = result_as_str ,
1321
+ call_id = tool_call .id ,
1322
+ is_error = False ,
1323
+ name = tool_call .name ,
1324
+ ),
1325
+ )
1326
+
1327
+ # Handle normal tool call using workbench.
1328
+ result = await workbench .call_tool (
1329
+ name = tool_call .name ,
1330
+ arguments = arguments ,
1331
+ cancellation_token = cancellation_token ,
1332
+ )
1333
+ return (
1334
+ tool_call ,
1335
+ FunctionExecutionResult (
1336
+ content = result .to_text (),
1337
+ call_id = tool_call .id ,
1338
+ is_error = result .is_error ,
1339
+ name = tool_call .name ,
1340
+ ),
1341
+ )
1342
+
1277
1343
async def on_reset (self , cancellation_token : CancellationToken ) -> None :
1278
1344
"""Reset the assistant agent to its initialization state."""
1279
1345
await self ._model_context .clear ()
@@ -1304,6 +1370,7 @@ def _to_config(self) -> AssistantAgentConfig:
1304
1370
name = self .name ,
1305
1371
model_client = self ._model_client .dump_component (),
1306
1372
tools = [tool .dump_component () for tool in self ._tools ],
1373
+ workbench = self ._workbench .dump_component () if self ._workbench else None ,
1307
1374
handoffs = list (self ._handoffs .values ()) if self ._handoffs else None ,
1308
1375
model_context = self ._model_context .dump_component (),
1309
1376
memory = [memory .dump_component () for memory in self ._memory ] if self ._memory else None ,
@@ -1336,6 +1403,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self:
1336
1403
name = config .name ,
1337
1404
model_client = ChatCompletionClient .load_component (config .model_client ),
1338
1405
tools = [BaseTool .load_component (tool ) for tool in config .tools ] if config .tools else None ,
1406
+ workbench = Workbench .load_component (config .workbench ) if config .workbench else None ,
1339
1407
handoffs = config .handoffs ,
1340
1408
model_context = ChatCompletionContext .load_component (config .model_context ) if config .model_context else None ,
1341
1409
memory = [Memory .load_component (memory ) for memory in config .memory ] if config .memory else None ,
0 commit comments