Skip to content

Commit d96aaeb

Browse files
authored
Update agent documentation (#6394)
* Replace on_messages and on_messages_stream with run and run_stream to unify interface documentation with teams * Remove magentic-one-cli from homepage as it has not been maintained and improved for a while.
1 parent bab0dfd commit d96aaeb

File tree

4 files changed

+172
-353
lines changed

4 files changed

+172
-353
lines changed

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

+43-70
Original file line numberDiff line numberDiff line change
@@ -90,10 +90,20 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
9090
the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response`
9191
object as the last item before closing the generator.
9292
93+
The :meth:`BaseChatAgent.run` method returns a :class:`~autogen_agentchat.base.TaskResult`
94+
containing the messages produced by the agent. In the list of messages,
95+
:attr:`~autogen_agentchat.base.TaskResult.messages`,
96+
the last message is the final response message.
97+
98+
The :meth:`BaseChatAgent.run_stream` method creates an async generator that produces
99+
the inner messages as they are created, and the :class:`~autogen_agentchat.base.TaskResult`
100+
object as the last item before closing the generator.
101+
93102
.. attention::
94103
95104
The caller must only pass the new messages to the agent on each call
96-
to the :meth:`on_messages` or :meth:`on_messages_stream` method.
105+
to the :meth:`on_messages`, :meth:`on_messages_stream`, :meth:`BaseChatAgent.run`,
106+
or :meth:`BaseChatAgent.run_stream` methods.
97107
The agent maintains its state between calls to these methods.
98108
Do not pass the entire conversation history to the agent on each call.
99109
@@ -215,10 +225,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
215225
.. code-block:: python
216226
217227
import asyncio
218-
from autogen_core import CancellationToken
219228
from autogen_ext.models.openai import OpenAIChatCompletionClient
220229
from autogen_agentchat.agents import AssistantAgent
221-
from autogen_agentchat.messages import TextMessage
222230
223231
224232
async def main() -> None:
@@ -228,10 +236,8 @@ async def main() -> None:
228236
)
229237
agent = AssistantAgent(name="assistant", model_client=model_client)
230238
231-
response = await agent.on_messages(
232-
[TextMessage(content="What is the capital of France?", source="user")], CancellationToken()
233-
)
234-
print(response)
239+
result = await agent.run(task="Name two cities in North America.")
240+
print(result)
235241
236242
237243
asyncio.run(main())
@@ -246,8 +252,6 @@ async def main() -> None:
246252
import asyncio
247253
from autogen_ext.models.openai import OpenAIChatCompletionClient
248254
from autogen_agentchat.agents import AssistantAgent
249-
from autogen_agentchat.messages import TextMessage
250-
from autogen_core import CancellationToken
251255
252256
253257
async def main() -> None:
@@ -261,9 +265,7 @@ async def main() -> None:
261265
model_client_stream=True,
262266
)
263267
264-
stream = agent.on_messages_stream(
265-
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
266-
)
268+
stream = agent.run_stream(task="Name two cities in North America.")
267269
async for message in stream:
268270
print(message)
269271
@@ -272,27 +274,23 @@ async def main() -> None:
272274
273275
.. code-block:: text
274276
275-
source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'
276-
source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'
277-
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
278-
source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'
279-
source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'
280-
source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'
281-
source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'
282-
source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'
283-
source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'
284-
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
285-
source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'
286-
source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'
287-
source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'
288-
source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'
289-
source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'
290-
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
291-
source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'
292-
source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'
293-
source='assistant' models_usage=None content=' TERMIN' type='ModelClientStreamingChunkEvent'
294-
source='assistant' models_usage=None content='ATE' type='ModelClientStreamingChunkEvent'
295-
Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada. TERMINATE', type='TextMessage'), inner_messages=[])
277+
source='user' models_usage=None metadata={} content='Name two cities in North America.' type='TextMessage'
278+
source='assistant' models_usage=None metadata={} content='Two' type='ModelClientStreamingChunkEvent'
279+
source='assistant' models_usage=None metadata={} content=' cities' type='ModelClientStreamingChunkEvent'
280+
source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent'
281+
source='assistant' models_usage=None metadata={} content=' North' type='ModelClientStreamingChunkEvent'
282+
source='assistant' models_usage=None metadata={} content=' America' type='ModelClientStreamingChunkEvent'
283+
source='assistant' models_usage=None metadata={} content=' are' type='ModelClientStreamingChunkEvent'
284+
source='assistant' models_usage=None metadata={} content=' New' type='ModelClientStreamingChunkEvent'
285+
source='assistant' models_usage=None metadata={} content=' York' type='ModelClientStreamingChunkEvent'
286+
source='assistant' models_usage=None metadata={} content=' City' type='ModelClientStreamingChunkEvent'
287+
source='assistant' models_usage=None metadata={} content=' and' type='ModelClientStreamingChunkEvent'
288+
source='assistant' models_usage=None metadata={} content=' Toronto' type='ModelClientStreamingChunkEvent'
289+
source='assistant' models_usage=None metadata={} content='.' type='ModelClientStreamingChunkEvent'
290+
source='assistant' models_usage=None metadata={} content=' TERMIN' type='ModelClientStreamingChunkEvent'
291+
source='assistant' models_usage=None metadata={} content='ATE' type='ModelClientStreamingChunkEvent'
292+
source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) metadata={} content='Two cities in North America are New York City and Toronto. TERMINATE' type='TextMessage'
293+
messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), metadata={}, content='Two cities in North America are New York City and Toronto. TERMINATE', type='TextMessage')] stop_reason=None
296294
297295
298296
**Example 3: agent with tools**
@@ -312,9 +310,7 @@ async def main() -> None:
312310
import asyncio
313311
from autogen_ext.models.openai import OpenAIChatCompletionClient
314312
from autogen_agentchat.agents import AssistantAgent
315-
from autogen_agentchat.messages import TextMessage
316313
from autogen_agentchat.ui import Console
317-
from autogen_core import CancellationToken
318314
319315
320316
async def get_current_time() -> str:
@@ -327,12 +323,7 @@ async def main() -> None:
327323
# api_key = "your_openai_api_key"
328324
)
329325
agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time])
330-
331-
await Console(
332-
agent.on_messages_stream(
333-
[TextMessage(content="What is the current time?", source="user")], CancellationToken()
334-
)
335-
)
326+
await Console(agent.run_stream(task="What is the current time?"))
336327
337328
338329
asyncio.run(main())
@@ -390,9 +381,7 @@ async def main() -> None:
390381
from typing import Literal
391382
392383
from autogen_agentchat.agents import AssistantAgent
393-
from autogen_agentchat.messages import TextMessage
394384
from autogen_agentchat.ui import Console
395-
from autogen_core import CancellationToken
396385
from autogen_core.tools import FunctionTool
397386
from autogen_ext.models.openai import OpenAIChatCompletionClient
398387
from pydantic import BaseModel
@@ -430,7 +419,7 @@ def sentiment_analysis(text: str) -> str:
430419
431420
432421
async def main() -> None:
433-
stream = agent.on_messages_stream([TextMessage(content="I am happy today!", source="user")], CancellationToken())
422+
stream = agent.run_stream(task="I am happy today!")
434423
await Console(stream)
435424
436425
@@ -458,8 +447,6 @@ async def main() -> None:
458447
import asyncio
459448
460449
from autogen_agentchat.agents import AssistantAgent
461-
from autogen_agentchat.messages import TextMessage
462-
from autogen_core import CancellationToken
463450
from autogen_core.model_context import BufferedChatCompletionContext
464451
from autogen_ext.models.openai import OpenAIChatCompletionClient
465452
@@ -482,20 +469,14 @@ async def main() -> None:
482469
system_message="You are a helpful assistant.",
483470
)
484471
485-
response = await agent.on_messages(
486-
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
487-
)
488-
print(response.chat_message.content) # type: ignore
472+
result = await agent.run(task="Name two cities in North America.")
473+
print(result.messages[-1].content) # type: ignore
489474
490-
response = await agent.on_messages(
491-
[TextMessage(content="My favorite color is blue.", source="user")], CancellationToken()
492-
)
493-
print(response.chat_message.content) # type: ignore
475+
result = await agent.run(task="My favorite color is blue.")
476+
print(result.messages[-1].content) # type: ignore
494477
495-
response = await agent.on_messages(
496-
[TextMessage(content="Did I ask you any question?", source="user")], CancellationToken()
497-
)
498-
print(response.chat_message.content) # type: ignore
478+
result = await agent.run(task="Did I ask you any question?")
479+
print(result.messages[-1].content) # type: ignore
499480
500481
501482
asyncio.run(main())
@@ -518,8 +499,6 @@ async def main() -> None:
518499
import asyncio
519500
520501
from autogen_agentchat.agents import AssistantAgent
521-
from autogen_agentchat.messages import TextMessage
522-
from autogen_core import CancellationToken
523502
from autogen_core.memory import ListMemory, MemoryContent
524503
from autogen_ext.models.openai import OpenAIChatCompletionClient
525504
@@ -544,10 +523,8 @@ async def main() -> None:
544523
system_message="You are a helpful assistant.",
545524
)
546525
547-
response = await agent.on_messages(
548-
[TextMessage(content="One idea for a dinner.", source="user")], CancellationToken()
549-
)
550-
print(response.chat_message.content) # type: ignore
526+
result = await agent.run(task="What is a good dinner idea?")
527+
print(result.messages[-1].content) # type: ignore
551528
552529
553530
asyncio.run(main())
@@ -573,10 +550,8 @@ async def main() -> None:
573550
.. code-block:: python
574551
575552
import asyncio
576-
from autogen_core import CancellationToken
577553
from autogen_ext.models.openai import OpenAIChatCompletionClient
578554
from autogen_agentchat.agents import AssistantAgent
579-
from autogen_agentchat.messages import TextMessage
580555
581556
582557
async def main() -> None:
@@ -587,10 +562,8 @@ async def main() -> None:
587562
# The system message is not supported by the o1 series model.
588563
agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None)
589564
590-
response = await agent.on_messages(
591-
[TextMessage(content="What is the capital of France?", source="user")], CancellationToken()
592-
)
593-
print(response)
565+
result = await agent.run(task="What is the capital of France?")
566+
print(result.messages[-1].content) # type: ignore
594567
595568
596569
asyncio.run(main())

python/packages/autogen-core/docs/src/index.md

+7-33
Original file line numberDiff line numberDiff line change
@@ -46,47 +46,21 @@ A framework for building AI agents and applications
4646
::::{grid}
4747
:gutter: 2
4848

49-
:::{grid-item-card}
50-
:shadow: none
51-
:margin: 2 0 0 0
52-
:columns: 12 12 6 6
53-
54-
<div class="sd-card-title sd-font-weight-bold docutils">
55-
56-
{fas}`book;pst-color-primary`
57-
Magentic-One CLI [![PyPi magentic-one-cli](https://img.shields.io/badge/PyPi-magentic--one--cli-blue?logo=pypi)](https://pypi.org/project/magentic-one-cli/)
58-
</div>
59-
A console-based multi-agent assistant for web and file-based tasks.
60-
Built on AgentChat.
61-
62-
```bash
63-
pip install -U magentic-one-cli
64-
m1 "Find flights from Seattle to Paris and format the result in a table"
65-
```
66-
67-
+++
68-
69-
```{button-ref} user-guide/agentchat-user-guide/magentic-one
70-
:color: secondary
71-
72-
Get Started
73-
```
74-
75-
:::
76-
7749
:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-blue?logo=pypi)](https://pypi.org/project/autogenstudio/)
7850
:shadow: none
7951
:margin: 2 0 0 0
80-
:columns: 12 12 6 6
52+
:columns: 12 12 12 12
8153

82-
An app for prototyping and managing agents without writing code.
54+
An web-based UI for prototyping with agents without writing code.
8355
Built on AgentChat.
8456

8557
```bash
8658
pip install -U autogenstudio
8759
autogenstudio ui --port 8080 --appdir ./myapp
8860
```
8961

62+
_Start here if you are new to AutoGen and want to prototype with agents without writing code._
63+
9064
+++
9165

9266
```{button-ref} user-guide/autogenstudio-user-guide/index
@@ -124,7 +98,7 @@ async def main() -> None:
12498
asyncio.run(main())
12599
```
126100

127-
_Start here if you are building conversational agents. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._
101+
_Start here if you are prototyping with agents using Python. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._
128102

129103
+++
130104

@@ -147,7 +121,7 @@ An event-driven programming framework for building scalable multi-agent AI syste
147121
* Research on multi-agent collaboration.
148122
* Distributed agents for multi-language applications.
149123

150-
_Start here if you are building workflows or distributed agent systems._
124+
_Start here if you are getting serious about building multi-agent systems._
151125

152126
+++
153127

@@ -167,7 +141,7 @@ Get Started
167141
Implementations of Core and AgentChat components that interface with external services or other libraries.
168142
You can find and use community extensions or create your own. Examples of built-in extensions:
169143

170-
* {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter` for using LangChain tools.
144+
* {py:class}`~autogen_ext.tools.mcp.McpWorkbench` for using Model-Context Protocol (MCP) servers.
171145
* {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent` for using Assistant API.
172146
* {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for running model-generated code in a Docker container.
173147
* {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime` for distributed agents.

python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb

+4-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n",
1717
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n",
1818
"\n",
19-
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n",
19+
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent.\n",
20+
"This method is called by {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to stream messages.\n",
21+
"If this method is not implemented, the agent\n",
2022
"uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n",
2123
"that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n",
2224
"yields all messages in the response."
@@ -731,7 +733,7 @@
731733
"name": "python",
732734
"nbconvert_exporter": "python",
733735
"pygments_lexer": "ipython3",
734-
"version": "3.12.3"
736+
"version": "3.12.7"
735737
}
736738
},
737739
"nbformat": 4,

0 commit comments

Comments
 (0)