diff --git a/.openpublishing.publish.config.json b/.openpublishing.publish.config.json index e7ee10dd..70a1d247 100644 --- a/.openpublishing.publish.config.json +++ b/.openpublishing.publish.config.json @@ -10,7 +10,10 @@ "type_mapping": { "Conceptual": "Content" }, - "build_entry_point": "docs" + "build_entry_point": "docs", + "xref_query_tags": [ + "/dotnet" + ] }, { "docset_name": "semantic-kernel", @@ -25,7 +28,10 @@ "Conceptual": "Content" }, "build_entry_point": "docs", - "template_folder": "_themes" + "template_folder": "_themes", + "xref_query_tags": [ + "/dotnet" + ] } ], "notification_subscribers": [ @@ -75,4 +81,4 @@ "template_folder": "_themes.pdf" } } -} \ No newline at end of file +} diff --git a/agent-framework/TOC.yml b/agent-framework/TOC.yml index 52a59ee2..78b60f69 100644 --- a/agent-framework/TOC.yml +++ b/agent-framework/TOC.yml @@ -1,7 +1,7 @@ items: - name: Agent Framework href: overview/agent-framework-overview.md -- name: Quickstart Guide +- name: Quick Start Guide href: tutorials/quick-start.md expanded: true - name: Tutorials @@ -19,4 +19,5 @@ items: - name: Migration Guide href: migration-guide/TOC.yml - name: API Reference Guide - items: + href: api-docs/TOC.yml + expanded: true diff --git a/agent-framework/api-docs/TOC.yml b/agent-framework/api-docs/TOC.yml new file mode 100644 index 00000000..77ddc364 --- /dev/null +++ b/agent-framework/api-docs/TOC.yml @@ -0,0 +1,4 @@ +- name: .NET API reference + href: /dotnet/api/microsoft.agents.ai +- name: Python API reference + href: /python/api/agent-framework-core/agent_framework diff --git a/agent-framework/index.yml b/agent-framework/index.yml index 4978fd08..26fb80a3 100644 --- a/agent-framework/index.yml +++ b/agent-framework/index.yml @@ -17,16 +17,16 @@ metadata: productDirectory: items: - - title: Getting Started - imageSrc: /agent-framework/media/getting-started.svg + - title: Overview + imageSrc: /agent-framework/media/overview.svg links: - url: /agent-framework/overview/agent-framework-overview text: Introduction to Agent Framework - - title: Quick Start + - title: Get Started imageSrc: /agent-framework/media/quick-start.svg links: - url: /agent-framework/tutorials/quick-start - text: Agent Framework Quick Start + text: Agent Framework Quick-Start Guide - url: /agent-framework/tutorials/overview text: Learn how to use Agent Framework - url: https://github.com/microsoft/agent-framework/tree/main/python/samples diff --git a/agent-framework/media/getting-started.svg b/agent-framework/media/overview.svg similarity index 100% rename from agent-framework/media/getting-started.svg rename to agent-framework/media/overview.svg diff --git a/agent-framework/migration-guide/from-autogen/index.md b/agent-framework/migration-guide/from-autogen/index.md index 43e259ab..2a2d6838 100644 --- a/agent-framework/migration-guide/from-autogen/index.md +++ b/agent-framework/migration-guide/from-autogen/index.md @@ -68,11 +68,11 @@ many important features came from external contributors. [Microsoft Agent Framework](https://github.com/microsoft/agent-framework) is a new multi-language SDK for building AI agents and workflows using LLMs. It represents a significant evolution of the ideas pioneered in AutoGen -and incorporates lessons learned from real-world usage. It is developed -by the core AutoGen team and Semantic Kernel team at Microsoft, +and incorporates lessons learned from real-world usage. It's developed +by the core AutoGen and Semantic Kernel teams at Microsoft, and is designed to be a new foundation for building AI applications going forward. -What follows is a practical migration path: we’ll start by grounding on what stays the same and what changes at a glance, then cover model client setup, single‑agent features, and finally multi‑agent orchestration with concrete code side‑by‑side. Along the way, links to runnable samples in the Agent Framework repo help you validate each step. +This guide describes a practical migration path: it starts by covering what stays the same and what changes at a glance. Then, it covers model client setup, single‑agent features, and finally multi‑agent orchestration with concrete code side‑by‑side. Along the way, links to runnable samples in the Agent Framework repo help you validate each step. ## Key Similarities and Differences @@ -277,10 +277,10 @@ Stateless by default: quick demo ```python # Without a thread (two independent invocations) r1 = await agent.run("What's 2+2?") -print(r1.text) # e.g., "4" +print(r1.text) # for example, "4" r2 = await agent.run("What about that number times 10?") -print(r2.text) # Likely ambiguous without prior context; may not be "40" +print(r2.text) # Likely ambiguous without prior context; cannot be "40" # With a thread (shared context across calls) thread = agent.get_new_thread() @@ -487,9 +487,10 @@ Requirements and caveats: - Hosted tools are only available on models/accounts that support them. Verify entitlements and model support for your provider before enabling these tools. - Configuration differs by provider; follow the prerequisites in each sample for setup and permissions. -- Not every model supports every hosted tool (e.g., web search vs code interpreter). Choose a compatible model in your environment. +- Not every model supports every hosted tool (for example, web search vs code interpreter). Choose a compatible model in your environment. -**Note**: AutoGen supports local code execution tools, but this feature is planned for future Agent Framework versions. +> [!NOTE] +> AutoGen supports local code execution tools, but this feature is planned for future Agent Framework versions. **Key Difference**: Agent Framework handles tool iteration automatically at the agent level. Unlike AutoGen's `max_tool_iterations` parameter, Agent Framework agents continue tool execution until completion by default, with built-in safety mechanisms to prevent infinite loops. @@ -599,7 +600,7 @@ coordinator = ChatAgent( ) ``` -Explicit migration note: In AutoGen, set `parallel_tool_calls=False` on the coordinator’s model client when wrapping agents as tools to avoid concurrency issues when invoking the same agent instance. +Explicit migration note: In AutoGen, set `parallel_tool_calls=False` on the coordinator's model client when wrapping agents as tools to avoid concurrency issues when invoking the same agent instance. In Agent Framework, `as_tool()` does not require disabling parallel tool calls as agents are stateless by default. @@ -652,7 +653,7 @@ For detailed middleware examples, see: ### Custom Agents -Sometimes you don’t want a model-backed agent at all—you want a deterministic or API-backed agent with custom logic. Both frameworks support building custom agents, but the patterns differ. +Sometimes you don't want a model-backed agent at all—you want a deterministic or API-backed agent with custom logic. Both frameworks support building custom agents, but the patterns differ. #### AutoGen: Subclass BaseChatAgent @@ -739,7 +740,7 @@ Notes: --- -Next, let’s look at multi‑agent orchestration—the area where the frameworks differ most. +Next, let's look at multi‑agent orchestration—the area where the frameworks differ most. ## Multi-Agent Feature Mapping @@ -1033,8 +1034,8 @@ workflow = ( What to notice: -- GraphFlow broadcasts messages and uses conditional transitions. Join behavior is configured via target‑side `activation` and per‑edge `activation_group`/`activation_condition` (e.g., group both edges into `join_d` with `activation_condition="any"`). -- Workflow routes data explicitly; use `target_id` to select downstream executors. Join behavior lives in the receiving executor (e.g., yield on first input vs wait for all), or via orchestration builders/aggregators. +- GraphFlow broadcasts messages and uses conditional transitions. Join behavior is configured via target‑side `activation` and per‑edge `activation_group`/`activation_condition` (for example, group both edges into `join_d` with `activation_condition="any"`). +- Workflow routes data explicitly; use `target_id` to select downstream executors. Join behavior lives in the receiving executor (for example, yield on first input vs wait for all), or via orchestration builders/aggregators. - Executors in Workflow are free‑form: wrap a `ChatAgent`, a function, or a sub‑workflow and mix them within the same graph. #### Key Differences diff --git a/agent-framework/migration-guide/from-semantic-kernel/index.md b/agent-framework/migration-guide/from-semantic-kernel/index.md index 74fc0d2f..d41ee6f1 100644 --- a/agent-framework/migration-guide/from-semantic-kernel/index.md +++ b/agent-framework/migration-guide/from-semantic-kernel/index.md @@ -11,43 +11,41 @@ ms.service: agent-framework # Semantic Kernel to Agent Framework Migration Guide -## Benefits of Microsoft Agent Framework compared to Semantic Kernel Agent Framework +## Benefits of Microsoft Agent Framework -- **Simplified API**: Reduced complexity and boilerplate code -- **Better Performance**: Optimized object creation and memory usage -- **Unified Interface**: Consistent patterns across different AI providers -- **Enhanced Developer Experience**: More intuitive and discoverable APIs +- **Simplified API**: Reduced complexity and boilerplate code. +- **Better Performance**: Optimized object creation and memory usage. +- **Unified Interface**: Consistent patterns across different AI providers. +- **Enhanced Developer Experience**: More intuitive and discoverable APIs. ::: zone pivot="programming-language-csharp" -## Key differences - -Here is a summary of the key differences between the Semantic Kernel Agent Framework and the Microsoft Agent Framework to help you migrate your code. +The following sections summarize the key differences between Semantic Kernel Agent Framework and Microsoft Agent Framework to help you migrate your code. -### 1. Namespace Updates +## 1. Namespace Updates -#### Semantic Kernel +### Semantic Kernel ```csharp using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents; ``` -#### Agent Framework +### Agent Framework Agent Framework namespaces are under `Microsoft.Agents.AI`. -Agent Framework uses the core AI message and content types from `Microsoft.Extensions.AI` for communication between components. +Agent Framework uses the core AI message and content types from for communication between components. ```csharp using Microsoft.Extensions.AI; using Microsoft.Agents.AI; ``` -### 2. Agent Creation Simplification +## 2. Agent Creation Simplification -#### Semantic Kernel +### Semantic Kernel -Every agent in Semantic Kernel depends on a `Kernel` instance and will have +Every agent in Semantic Kernel depends on a `Kernel` instance and has an empty `Kernel` if not provided. ```csharp @@ -70,7 +68,7 @@ PersistentAgent definition = await azureAgentClient.Administration.CreateAgentAs AzureAIAgent agent = new(definition, azureAgentClient); ``` -#### Agent Framework +### Agent Framework Agent creation in Agent Framework is made simpler with extensions provided by all main providers. @@ -80,15 +78,15 @@ AIAgent azureFoundryAgent = await persistentAgentsClient.CreateAIAgentAsync(inst AIAgent openAIAssistantAgent = await assistantClient.CreateAIAgentAsync(instructions: ParrotInstructions); ``` -Additionally for hosted agent providers you can also use the `GetAIAgent` to retrieve an agent from an existing hosted agent. +Additionally, for hosted agent providers you can also use the `GetAIAgent` method to retrieve an agent from an existing hosted agent. ```csharp AIAgent azureFoundryAgent = await persistentAgentsClient.GetAIAgentAsync(agentId); ``` -### 3. Agent Thread Creation +## 3. Agent Thread Creation -#### Semantic Kernel +### Semantic Kernel The caller has to know the thread type and create it manually. @@ -99,52 +97,54 @@ AgentThread thread = new AzureAIAgentThread(this.Client); AgentThread thread = new OpenAIResponseAgentThread(this.Client); ``` -#### Agent Framework +### Agent Framework The agent is responsible for creating the thread. ```csharp -// New +// New. AgentThread thread = agent.GetNewThread(); ``` -### 4. Hosted Agent Thread Cleanup +## 4. Hosted Agent Thread Cleanup This case applies exclusively to a few AI providers that still provide hosted threads. -#### Semantic Kernel +### Semantic Kernel -Threads have a `self` deletion method +Threads have a `self` deletion method. + +OpenAI Assistants Provider: -i.e: OpenAI Assistants Provider ```csharp await thread.DeleteAsync(); ``` -#### Agent Framework +### Agent Framework > [!NOTE] -> OpenAI Responses introduced a new conversation model that simplifies how conversations are handled. This simplifies hosted thread management compared to the now deprecated OpenAI Assistants model. For more information see the [OpenAI Assistants migration guide](https://platform.openai.com/docs/assistants/migration). +> OpenAI Responses introduced a new conversation model that simplifies how conversations are handled. This change simplifies hosted thread management compared to the now deprecated OpenAI Assistants model. For more information, see the [OpenAI Assistants migration guide](https://platform.openai.com/docs/assistants/migration). -Agent Framework doesn't have a thread deletion API in the `AgentThread` type as not all providers support hosted threads or thread deletion and this will become more common as more providers shift to responses based architectures. +Agent Framework doesn't have a thread deletion API in the `AgentThread` type as not all providers support hosted threads or thread deletion. This design will become more common as more providers shift to responses-based architectures. -If you require thread deletion and the provider allows this, the caller **should** keep track of the created threads and delete them later when necessary via the provider's sdk. +If you require thread deletion and the provider allows it, the caller **should** keep track of the created threads and delete them later when necessary via the provider's SDK. + +OpenAI Assistants Provider: -i.e: OpenAI Assistants Provider ```csharp await assistantClient.DeleteThreadAsync(thread.ConversationId); ``` -### 5. Tool Registration +## 5. Tool Registration -#### Semantic Kernel +### Semantic Kernel -In semantic kernel to expose a function as a tool you must: +To expose a function as a tool, you must: 1. Decorate the function with a `[KernelFunction]` attribute. -2. Have a `Plugin` class or use the `KernelPluginFactory` to wrap the function. -3. Have a `Kernel` to add your plugin to. -4. Pass the `Kernel` to the agent. +1. Have a `Plugin` class or use the `KernelPluginFactory` to wrap the function. +1. Have a `Kernel` to add your plugin to. +1. Pass the `Kernel` to the agent. ```csharp KernelFunction function = KernelFunctionFactory.CreateFromMethod(GetWeather); @@ -155,19 +155,19 @@ kernel.Plugins.Add(plugin); ChatCompletionAgent agent = new() { Kernel = kernel, ... }; ``` -#### Agent Framework +### Agent Framework -In agent framework in a single call you can register tools directly in the agent creation process. +In Agent Framework, in a single call you can register tools directly in the agent creation process. ```csharp AIAgent agent = chatClient.CreateAIAgent(tools: [AIFunctionFactory.Create(GetWeather)]); ``` -### 6. Agent Non-Streaming Invocation +## 6. Agent Non-Streaming Invocation -Key differences can be seen in the method names from `Invoke` to `Run`, return types and parameters `AgentRunOptions`. +Key differences can be seen in the method names from `Invoke` to `Run`, return types, and parameters `AgentRunOptions`. -#### Semantic Kernel +### Semantic Kernel The Non-Streaming uses a streaming pattern `IAsyncEnumerable>` for returning multiple agent messages. @@ -178,22 +178,22 @@ await foreach (AgentResponseItem result in agent.InvokeAsync } ``` -#### Agent Framework +### Agent Framework The Non-Streaming returns a single `AgentRunResponse` with the agent response that can contain multiple messages. The text result of the run is available in `AgentRunResponse.Text` or `AgentRunResponse.ToString()`. -All messages created as part of the response is returned in the `AgentRunResponse.Messages` list. -This may include tool call messages, function results, reasoning updates and final results. +All messages created as part of the response are returned in the `AgentRunResponse.Messages` list. +This might include tool call messages, function results, reasoning updates, and final results. ```csharp AgentRunResponse agentResponse = await agent.RunAsync(userInput, thread); ``` -### 7. Agent Streaming Invocation +## 7. Agent Streaming Invocation -Key differences in the method names from `Invoke` to `Run`, return types and parameters `AgentRunOptions`. +The key differences are in the method names from `Invoke` to `Run`, return types, and parameters `AgentRunOptions`. -#### Semantic Kernel +### Semantic Kernel ```csharp await foreach (StreamingChatMessageContent update in agent.InvokeStreamingAsync(userInput, thread)) @@ -202,11 +202,11 @@ await foreach (StreamingChatMessageContent update in agent.InvokeStreamingAsync( } ``` -#### Agent Framework +### Agent Framework -Similar streaming API pattern with the key difference being that it returns `AgentRunResponseUpdate` objects including more agent related information per update. +Agent Framework has a similar streaming API pattern, with the key difference being that it returns `AgentRunResponseUpdate` objects that include more agent-related information per update. -All updates produced by any service underlying the AIAgent is returned. The textual result of the agent is available by concatenating the `AgentRunResponse.Text` values. +All updates produced by any service underlying the AIAgent are returned. The textual result of the agent is available by concatenating the `AgentRunResponse.Text` values. ```csharp await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync(userInput, thread)) @@ -215,53 +215,52 @@ await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync(userInpu } ``` -### 8. Tool Function Signatures +## 8. Tool Function Signatures -**Problem**: SK plugin methods need `[KernelFunction]` attributes +**Problem**: Semantic Kernel plugin methods need `[KernelFunction]` attributes. ```csharp public class MenuPlugin { - [KernelFunction] // Required for SK + [KernelFunction] // Required. public static MenuItem[] GetMenu() => ...; } ``` -**Solution**: AF can use methods directly without attributes +**Solution**: Agent Framework can use methods directly without attributes. ```csharp public class MenuTools { - [Description("Get menu items")] // Optional description + [Description("Get menu items")] // Optional description. public static MenuItem[] GetMenu() => ...; } ``` -### 9. Options Configuration +## 9. Options Configuration -**Problem**: Complex options setup in SK +**Problem**: Complex options setup in Semantic Kernel. ```csharp OpenAIPromptExecutionSettings settings = new() { MaxTokens = 1000 }; AgentInvokeOptions options = new() { KernelArguments = new(settings) }; ``` -**Solution**: Simplified options in AF +**Solution**: Simplified options in Agent Framework. ```csharp ChatClientAgentRunOptions options = new(new() { MaxOutputTokens = 1000 }); ``` > [!IMPORTANT] -> This example shows passing implementation specific options to a `ChatClientAgent`. Not all `AIAgents` support `ChatClientAgentRunOptions`. -> `ChatClientAgent` is provided to build agents based on underlying inference services, and therefore supports inference options like `MaxOutputTokens`. +> This example shows passing implementation-specific options to a `ChatClientAgent`. Not all `AIAgents` support `ChatClientAgentRunOptions`. `ChatClientAgent` is provided to build agents based on underlying inference services, and therefore supports inference options like `MaxOutputTokens`. -### 10. Dependency Injection +## 10. Dependency Injection -#### Semantic Kernel +### Semantic Kernel -A `Kernel` registration is required in the service container to be able to create an agent -as every agent abstractions needs to be initialized with a `Kernel` property. +A `Kernel` registration is required in the service container to be able to create an agent, +as every agent abstraction needs to be initialized with a `Kernel` property. Semantic Kernel uses the `Agent` type as the base abstraction class for agents. @@ -272,45 +271,45 @@ serviceContainer.AddKeyedSingleton( (sp, key) => new ChatCompletionAgent() { - // Passing the kernel is required + // Passing the kernel is required. Kernel = sp.GetRequiredService(), }); ``` -#### Agent Framework +### Agent Framework -The Agent framework provides the `AIAgent` type as the base abstraction class. +Agent Framework provides the `AIAgent` type as the base abstraction class. ```csharp services.AddKeyedSingleton(() => client.CreateAIAgent(...)); ``` -### 11. **Agent Type Consolidation** +## 11. Agent Type Consolidation -#### Semantic Kernel +### Semantic Kernel -Semantic kernel provides specific agent classes for various services, e.g. +Semantic Kernel provides specific agent classes for various services, for example: - `ChatCompletionAgent` for use with chat-completion-based inference services. - `OpenAIAssistantAgent` for use with the OpenAI Assistants service. - `AzureAIAgent` for use with the Azure AI Foundry Agents service. -#### Agent Framework +### Agent Framework -The agent framework supports all the above mentioned services via a single agent type, `ChatClientAgent`. +Agent Framework supports all the mentioned services via a single agent type, `ChatClientAgent`. -`ChatClientAgent` can be used to build agents using any underlying service that provides an SDK implementing the `Microsoft.Extensions.AI.IChatClient` interface. +`ChatClientAgent` can be used to build agents using any underlying service that provides an SDK that implements the `IChatClient` interface. ::: zone-end ::: zone pivot="programming-language-python" ## Key differences -Here is a summary of the key differences between the Semantic Kernel Agent Framework and the Microsoft Agent Framework to help you migrate your code. +Here is a summary of the key differences between the Semantic Kernel Agent Framework and Microsoft Agent Framework to help you migrate your code. -### 1. Package and Import Updates +## 1. Package and import updates -#### Semantic Kernel +### Semantic Kernel Semantic Kernel packages are installed as `semantic-kernel` and imported as `semantic_kernel`. The package also has a number of `extras` that you can install to install the different dependencies for different AI providers and other features. @@ -319,7 +318,7 @@ from semantic_kernel import Kernel from semantic_kernel.agents import ChatCompletionAgent ``` -#### Agent Framework +### Agent Framework Agent Framework package is installed as `agent-framework` and imported as `agent_framework`. Agent Framework is built up differently, it has a core package `agent-framework-core` that contains the core functionality, and then there are multiple packages that rely on that core package, such as `agent-framework-azure-ai`, `agent-framework-mem0`, `agent-framework-copilotstudio`, etc. When you run `pip install agent-framework` it will install the core package and *all* packages, so that you can get started with all the features quickly. When you are ready to reduce the number of packages because you know what you need, you can install only the packages you need, so for instance if you only plan to use Azure AI Foundry and Mem0 you can install only those two packages: `pip install agent-framework-azure-ai agent-framework-mem0`, `agent-framework-core` is a dependency to those two, so will automatically be installed. @@ -336,19 +335,21 @@ Many of the most commonly used types are imported directly from `agent_framework from agent_framework import ChatMessage, ChatAgent ``` -### 2. Agent Type Consolidation +## 2. Agent Type Consolidation -#### Semantic Kernel -Semantic Kernel provides specific agent classes for various services, e.g. ChatCompletionAgent, AzureAIAgent, OpenAIAssistantAgent, etc. See [Agent types in Semantic Kernel](/semantic-kernel/Frameworks/agent/agent-types/azure-ai-agent). +### Semantic Kernel -#### Agent Framework -In Agent Framework the majority of agents are built using the `ChatAgent` which can be used with all the `ChatClient` based services, such as Azure AI Foundry, OpenAI ChatCompletion and OpenAI Responses. We currently have two other agents, `CopilotStudioAgent` for use with Copilot Studio and `A2AAgent` for use with A2A. +Semantic Kernel provides specific agent classes for various services, for example, ChatCompletionAgent, AzureAIAgent, OpenAIAssistantAgent, etc. See [Agent types in Semantic Kernel](/semantic-kernel/Frameworks/agent/agent-types/azure-ai-agent). + +### Agent Framework + +In Agent Framework, the majority of agents are built using the `ChatAgent` which can be used with all the `ChatClient` based services, such as Azure AI Foundry, OpenAI ChatCompletion, and OpenAI Responses. There are two additional agents: `CopilotStudioAgent` for use with Copilot Studio and `A2AAgent` for use with A2A. All the built-in agents are based on the BaseAgent (`from agent_framework import BaseAgent`). And all agents are consistent with the `AgentProtocol` (`from agent_framework import AgentProtocol`) interface. -### 2. Agent Creation Simplification +## 3. Agent Creation Simplification -#### Semantic Kernel +### Semantic Kernel Every agent in Semantic Kernel depends on a `Kernel` instance and will have an empty `Kernel` if not provided. @@ -364,8 +365,7 @@ agent = ChatCompletionAgent( ) ``` - -#### Agent Framework +### Agent Framework Agent creation in Agent Framework can be done in two ways, directly: @@ -375,7 +375,8 @@ from agent_framework import ChatMessage, ChatAgent agent = ChatAgent(chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant") ``` -or, with the convenience methods provided by chat clients: + +Or, with the convenience methods provided by chat clients: ```python from agent_framework.azure import AzureOpenAIChatClient @@ -383,11 +384,11 @@ from azure.identity import AzureCliCredential agent = AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent(instructions="You are a helpful assistant") ``` -The direct method, exposes all possible parameters you can set for your agent, while the convenience method has a subset, you can still pass in the same set of parameters, because internally we call the direct method. +The direct method exposes all possible parameters you can set for your agent. While the convenience method has a subset, you can still pass in the same set of parameters, because it calls the direct method internally. -### 3. Agent Thread Creation +## 4. Agent Thread Creation -#### Semantic Kernel +### Semantic Kernel The caller has to know the thread type and create it manually. @@ -397,7 +398,7 @@ from semantic_kernel.agents import ChatHistoryAgentThread thread = ChatHistoryAgentThread() ``` -#### Agent Framework +### Agent Framework The agent can be asked to create a new thread for you. @@ -406,15 +407,13 @@ agent = ... thread = agent.get_new_thread() ``` -a thread is then created in one of three ways: -1. if the agent has a thread_id (or conversation_id or something similar) set, it will create a thread in the underlying service with that id. - Once a thread has a `service_thread_id`, you can no longer use it to store messages in memory. - And this only applies to agents that have a service-side thread concept. such as Azure AI Foundry Agents and OpenAI Assistants. -2. if the agent has a `chat_message_store_factory` set, it will use that factory to create a message store and use that to create an in-memory thread. - It can then no longer be used with a agent with the `store` parameter set to `True`. -3. if neither of the above is set, we consider it `uninitialized` and depending on how it is used, it will either become a in-memory thread or a service thread. +A thread is then created in one of three ways: + +1. If the agent has a `thread_id` (or `conversation_id` or something similar) set, it will create a thread in the underlying service with that ID. Once a thread has a `service_thread_id`, you can no longer use it to store messages in memory. This only applies to agents that have a service-side thread concept. such as Azure AI Foundry Agents and OpenAI Assistants. +2. If the agent has a `chat_message_store_factory` set, it will use that factory to create a message store and use that to create an in-memory thread. It can then no longer be used with a agent with the `store` parameter set to `True`. +3. If neither of the previous settings is set, it's considered `uninitialized` and depending on how it is used, it will either become a in-memory thread or a service thread. -#### Agent Framework +### Agent Framework > [!NOTE] > OpenAI Responses introduced a new conversation model that simplifies how conversations are handled. This simplifies hosted thread management compared to the now deprecated OpenAI Assistants model. For more information see the [OpenAI Assistants migration guide](https://platform.openai.com/docs/assistants/migration). @@ -423,22 +422,23 @@ Agent Framework doesn't have a thread deletion API in the `AgentThread` type as If you require thread deletion and the provider allows this, the caller **should** keep track of the created threads and delete them later when necessary via the provider's sdk. -i.e: OpenAI Assistants Provider +OpenAI Assistants Provider: + ```python -# OpenAI Assistants threads have self-deletion method in SK +# OpenAI Assistants threads have self-deletion method in Semantic Kernel await thread.delete_async() ``` -### 5. Tool Registration +## 5. Tool Registration -#### Semantic Kernel +### Semantic Kernel -In semantic kernel to expose a function as a tool you must: +To expose a function as a tool, you must: 1. Decorate the function with a `@kernel_function` decorator. -2. Have a `Plugin` class or use the kernel plugin factory to wrap the function. -3. Have a `Kernel` to add your plugin to. -4. Pass the `Kernel` to the agent. +1. Have a `Plugin` class or use the kernel plugin factory to wrap the function. +1. Have a `Kernel` to add your plugin to. +1. Pass the `Kernel` to the agent. ```python from semantic_kernel.functions import kernel_function @@ -456,11 +456,11 @@ agent = ChatCompletionAgent( ) ``` -#### Agent Framework +### Agent Framework -In agent framework in a single call you can register tools directly in the agent creation process. But we no longer have the concept of a plugin, to wrap multiple functions, but you can still do that if you want to. +In a single call, you can register tools directly in the agent creation process. Agent Framework doesn't have the concept of a plugin to wrap multiple functions, but you can still do that if desired. -The simplest way to create a tool is just to create a python function: +The simplest way to create a tool is just to create a Python function: ```python def get_weather(location: str) -> str: @@ -469,7 +469,9 @@ def get_weather(location: str) -> str: agent = chat_client.create_agent(tools=get_weather) ``` -> Note: the `tools` parameter is present on both the agent creation, the `run` and `run_stream` methods, as well as the `get_response` and `get_streaming_response` methods, it allows you to supply tools both as a list or a single function. + +> [!NOTE] +> The `tools` parameter is present on both the agent creation, the `run` and `run_stream` methods, as well as the `get_response` and `get_streaming_response` methods, it allows you to supply tools both as a list or a single function. The name of the function will then become the name of the tool, and the docstring will become the description of the tool, you can also add a description to the parameters: @@ -495,7 +497,7 @@ def get_weather(location: Annotated[str, "The location to get the weather for."] This also works when you create a class with multiple tools as methods. -When creating the agent, we can now provide the function tool to the agent, by passing it to the `tools` parameter. +When creating the agent, you can now provide the function tool to the agent by passing it to the `tools` parameter. ```python class Plugin: @@ -520,15 +522,17 @@ agent = chat_client.create_agent(tools=[plugin.get_weather, plugin.get_weather_d print("Plugin state:", plugin.state) ``` -> Note: the functions within the class can also be decorated with `@ai_function` to customize the name and description of the tools. + +> [!NOTE] +> The functions within the class can also be decorated with `@ai_function` to customize the name and description of the tools. This mechanism is also useful for tools that need additional input that cannot be supplied by the LLM, such as connections, secrets, etc. -### 6. Agent Non-Streaming Invocation +## 6. Agent Non-Streaming Invocation -Key differences can be seen in the method names from `invoke` to `run`, return types (e.g. `AgentRunResponse`) and parameters. +Key differences can be seen in the method names from `invoke` to `run`, return types (for example, `AgentRunResponse`) and parameters. -#### Semantic Kernel +### Semantic Kernel The Non-Streaming invoke uses an async iterator pattern for returning multiple agent messages. @@ -540,18 +544,20 @@ async for response in agent.invoke( print(f"# {response.role}: {response}") thread = response.thread ``` -And we had a convenience method to get the final response: + +And there was a convenience method to get the final response: + ```python response = await agent.get_response(messages="How do I reset my bike tire?", thread=thread) print(f"# {response.role}: {response}") ``` -#### Agent Framework +### Agent Framework The Non-Streaming run returns a single `AgentRunResponse` with the agent response that can contain multiple messages. The text result of the run is available in `response.text` or `str(response)`. All messages created as part of the response are returned in the `response.messages` list. -This may include tool call messages, function results, reasoning updates and final results. +This might include tool call messages, function results, reasoning updates and final results. ```python agent = ... @@ -561,11 +567,11 @@ print("Agent response:", response.text) ``` -### 7. Agent Streaming Invocation +## 7. Agent Streaming Invocation Key differences in the method names from `invoke` to `run_stream`, return types (`AgentRunResponseUpdate`) and parameters. -#### Semantic Kernel +### Semantic Kernel ```python async for update in agent.invoke_stream( @@ -576,7 +582,7 @@ async for update in agent.invoke_stream( print(update.message.content, end="", flush=True) ``` -#### Agent Framework +### Agent Framework Similar streaming API pattern with the key difference being that it returns `AgentRunResponseUpdate` objects including more agent related information per update. @@ -603,10 +609,9 @@ full_response = AgentRunResponse.from_agent_response_generator(agent.run_stream( print("Full agent response:", full_response.text) ``` +## 8. Options Configuration -### 9. Options Configuration - -**Problem**: Complex options setup in SK +**Problem**: Complex options setup in Semantic Kernel ```python from semantic_kernel.connectors.ai.open_ai import OpenAIPromptExecutionSettings @@ -617,9 +622,9 @@ arguments = KernelArguments(settings) response = await agent.get_response(user_input, thread=thread, arguments=arguments) ``` -**Solution**: Simplified options in AF +**Solution**: Simplified options in Agent Framework -In agent framework, we allow the passing of all parameters directly to the relevant methods, so that you do not have to import anything extra, or create any options objects, unless you want to. Internally we use a `ChatOptions` object for `ChatClients` and `ChatAgents`, that you can also create and pass in if you want to. This is also created in a `ChatAgent` to hold the options and can be overridden per call. +Agent Framework allows the passing of all parameters directly to the relevant methods, so that you don't have to import anything extra, or create any options objects, unless you want to. Internally, it uses a `ChatOptions` object for `ChatClients` and `ChatAgents`, which you can also create and pass in if you want to. This is also created in a `ChatAgent` to hold the options and can be overridden per call. ```python agent = ... @@ -627,7 +632,8 @@ agent = ... response = await agent.run(user_input, thread, max_tokens=1000, frequency_penalty=0.5) ``` -> Note: The above is specific to a `ChatAgent`, because other agents may have different options, they should all accepts `messages` as a parameter, since that is defined in the `AgentProtocol`. +> [!NOTE] +> The above is specific to a `ChatAgent`, because other agents might have different options, they should all accepts `messages` as a parameter, since that is defined in the `AgentProtocol`. ::: zone-end diff --git a/agent-framework/overview/agent-framework-overview.md b/agent-framework/overview/agent-framework-overview.md index 3cbd5c16..f3d0f471 100644 --- a/agent-framework/overview/agent-framework-overview.md +++ b/agent-framework/overview/agent-framework-overview.md @@ -10,20 +10,16 @@ ms.service: agent-framework # Microsoft Agent Framework -The [Microsoft Agent Framework](https://github.com/microsoft/agent-framework) +[Microsoft Agent Framework](https://github.com/microsoft/agent-framework) is an open-source development kit for building **AI agents** and **multi-agent workflows** for .NET and Python. -It brings together and extends ideas from the [Semantic Kernel](https://github.com/microsoft/semantic-kernel) +It brings together and extends ideas from [Semantic Kernel](https://github.com/microsoft/semantic-kernel) and [AutoGen](https://github.com/microsoft/autogen) projects, combining their strengths while adding new capabilities. Built by the same teams, it is the unified foundation for building AI agents going forward. -The Agent Framework offers two primary categories of capabilities: +Agent Framework offers two primary categories of capabilities: -- [AI Agents](#ai-agents): individual agents that use LLMs to process user inputs, - call tools and MCP servers to perform actions, and generate responses. Agents support - model providers including Azure OpenAI, OpenAI, and Azure AI. -- [Workflows](#workflows): graph-based workflows that connect multiple agents - and functions to perform complex, multi-step tasks. Workflows support type-based routing, - nesting, checkpointing, and request/response patterns for human-in-the-loop scenarios. +- [AI agents](#ai-agents): Individual agents that use LLMs to process user inputs, call tools and MCP servers to perform actions, and generate responses. Agents support model providers including Azure OpenAI, OpenAI, and Azure AI. +- [Workflows](#workflows): Graph-based workflows that connect multiple agents and functions to perform complex, multi-step tasks. Workflows support type-based routing, nesting, checkpointing, and request/response patterns for human-in-the-loop scenarios. The framework also provides foundational building blocks, including model clients (chat completions and responses), an agent thread for state management, context providers for agent memory, @@ -37,10 +33,10 @@ interactive, robust, and safe AI applications. and [AutoGen](https://github.com/microsoft/autogen) pioneered the concepts of AI agents and multi-agent orchestration. The Agent Framework is the direct successor, created by the same teams. It combines AutoGen's simple abstractions for single- and multi-agent patterns with Semantic Kernel's enterprise-grade features such as thread-based state management, type safety, filters, telemetry, and extensive model and embedding support. Beyond merging the two, -the Agent Framework introduces workflows that give developers explicit control over +Agent Framework introduces workflows that give developers explicit control over multi-agent execution paths, plus a robust state management system for long-running and human-in-the-loop scenarios. -In short, the Agent Framework is the next generation of +In short, Agent Framework is the next generation of both Semantic Kernel and AutoGen. To learn more about migrating from either Semantic Kernel or AutoGen, @@ -48,13 +44,13 @@ see the [Migration Guide from Semantic Kernel](../migration-guide/from-semantic- and [Migration Guide from AutoGen](../migration-guide/from-autogen/index.md). Both Semantic Kernel and AutoGen have benefited significantly from the open-source community, -and we expect the same for the Agent Framework. The Microsoft Agent Framework will continue to welcome contributions and will keep improving with new features and capabilities. +and the same is expected for Agent Framework. Microsoft Agent Framework welcomes contributions and will keep improving with new features and capabilities. > [!NOTE] > Microsoft Agent Framework is currently in public preview. Please submit any feedback or issues on the [GitHub repository](https://github.com/microsoft/agent-framework). > [!IMPORTANT] -> If you use the Microsoft Agent Framework to build applications that operate with third-party servers or agents, you do so at your own risk. We recommend reviewing all data being shared with third-party servers or agents and being cognizant of third-party practices for retention and location of data. It is your responsibility to manage whether your data will flow outside of your organization’s Azure compliance and geographic boundaries and any related implications. +> If you use Microsoft Agent Framework to build applications that operate with third-party servers or agents, you do so at your own risk. We recommend reviewing all data being shared with third-party servers or agents and being cognizant of third-party practices for retention and location of data. It is your responsibility to manage whether your data will flow outside of your organization's Azure compliance and geographic boundaries and any related implications. ## Installation @@ -66,7 +62,7 @@ pip install agent-framework .NET: -```bash +```dotnetcli dotnet add package Microsoft.Agents.AI ``` @@ -107,20 +103,20 @@ Here are some common scenarios where AI agents excel: The key is that AI agents are designed to operate in a dynamic and underspecified setting, where the exact sequence of steps to fulfill a user request is not known -in advance and may require exploration and close collaboration with users. +in advance and might require exploration and close collaboration with users. ### When not to use an AI agent? AI agents are not well-suited for tasks that are highly structured and require strict adherence to predefined rules. If your application anticipates a specific kind of input and has a well-defined -sequence of operations to perform, using AI agents may introduce unnecessary +sequence of operations to perform, using AI agents might introduce unnecessary uncertainty, latency, and cost. _If you can write a function to handle the task, do that instead of using an AI agent. You can use AI to help you write that function._ -A single AI agent may struggle with complex tasks that involve multiple steps -and decision points. Such tasks may require a large number of tools (e.g., over 20), +A single AI agent might struggle with complex tasks that involve multiple steps +and decision points. Such tasks might require a large number of tools (for example, over 20), which a single agent cannot feasibly manage. In these cases, consider using workflows instead. @@ -129,7 +125,7 @@ In these cases, consider using workflows instead. ### What is a Workflow? -A **workflow** can express a predefined sequence of operations that can include AI agents as components while maintaining consistency and reliability. Workflows are designed to handle complex and long-running processes that may involve multiple agents, human interactions, and integrations with external systems. +A **workflow** can express a predefined sequence of operations that can include AI agents as components while maintaining consistency and reliability. Workflows are designed to handle complex and long-running processes that might involve multiple agents, human interactions, and integrations with external systems. The execution sequence of a workflow can be explicitly defined, allowing for more control over the execution path. The following diagram illustrates an example of a workflow that connects two AI agents and a function: @@ -137,7 +133,7 @@ The execution sequence of a workflow can be explicitly defined, allowing for mor Workflows can also express dynamic sequences using conditional routing, model-based decision making, and concurrent -execution. This is how our [multi-agent orchestration patterns](../user-guide/workflows/orchestrations/overview.md) are implemented. +execution. This is how [multi-agent orchestration patterns](../user-guide/workflows/orchestrations/overview.md) are implemented. The orchestration patterns provide mechanisms to coordinate multiple agents to work on complex tasks that require multiple steps and decision points, addressing the limitations of single agents. diff --git a/agent-framework/tutorials/agents/agent-as-function-tool.md b/agent-framework/tutorials/agents/agent-as-function-tool.md index d3b09339..d116b9ef 100644 --- a/agent-framework/tutorials/agents/agent-as-function-tool.md +++ b/agent-framework/tutorials/agents/agent-as-function-tool.md @@ -17,14 +17,14 @@ This tutorial shows you how to use an agent as a function tool, so that one agen ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating and using an agent as a function tool +## Create and use an agent as a function tool You can use an `AIAgent` as a function tool by calling `.AsAIFunction()` on the agent and providing it as a tool to another agent. This allows you to compose agents and build more advanced workflows. First, create a function tool as a C# method, and decorate it with descriptions if needed. -This tool will be used by our agent that is exposed as a function. +This tool will be used by your agent that's exposed as a function. ```csharp using System.ComponentModel; @@ -80,11 +80,11 @@ This tutorial shows you how to use an agent as a function tool, so that one agen For prerequisites and installing packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating and using an agent as a function tool +## Create and use an agent as a function tool You can use a `ChatAgent` as a function tool by calling `.as_tool()` on the agent and providing it as a tool to another agent. This allows you to compose agents and build more advanced workflows. -First, create a function tool that will be used by our agent that is exposed as a function. +First, create a function tool that will be used by your agent that's exposed as a function. ```python from typing import Annotated @@ -134,7 +134,7 @@ You can also customize the tool name, description, and argument name when conver weather_tool = weather_agent.as_tool( name="WeatherLookup", description="Look up weather information for any location", - arg_name="query", + arg_name="query", arg_description="The weather query or location" ) diff --git a/agent-framework/tutorials/agents/agent-as-mcp-tool.md b/agent-framework/tutorials/agents/agent-as-mcp-tool.md index 43714d6b..eef8e221 100644 --- a/agent-framework/tutorials/agents/agent-as-mcp-tool.md +++ b/agent-framework/tutorials/agents/agent-as-mcp-tool.md @@ -8,7 +8,7 @@ ms.date: 09/24/2025 ms.service: agent-framework --- -# Exposing an agent as an MCP tool +# Expose an agent as an MCP tool This tutorial shows you how to expose an agent as a tool over the Model Context Protocol (MCP), so it can be used by other systems that support MCP tools. @@ -16,28 +16,28 @@ This tutorial shows you how to expose an agent as a tool over the Model Context For prerequisites see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Installing Nuget packages +## Install NuGet packages -To use the Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: +To use Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: -```powershell +```dotnetcli dotnet add package Azure.Identity dotnet add package Azure.AI.OpenAI dotnet add package Microsoft.Agents.AI.OpenAI --prerelease ``` -To add support for hosting a tool over the Model Context Protocol (MCP), add the following Nuget packages +To add support for hosting a tool over the Model Context Protocol (MCP), add the following NuGet packages -```powershell +```dotnetcli dotnet add package Microsoft.Extensions.Hosting --prerelease dotnet add package ModelContextProtocol --prerelease ``` -## Exposing an agent as an MCP tool +## Expose an agent as an MCP tool You can expose an `AIAgent` as an MCP tool by wrapping it in a function and using `McpServerTool`. You then need to register it with an MCP server. This allows the agent to be invoked as a tool by any MCP-compatible client. -First, create an agent that we will expose as an MCP tool. +First, create an agent that you'll expose as an MCP tool. ```csharp using System; diff --git a/agent-framework/tutorials/agents/enable-observability.md b/agent-framework/tutorials/agents/enable-observability.md index aa6d2a6d..4e4f27aa 100644 --- a/agent-framework/tutorials/agents/enable-observability.md +++ b/agent-framework/tutorials/agents/enable-observability.md @@ -17,17 +17,17 @@ This tutorial shows how to enable OpenTelemetry on an agent so that interactions In this tutorial, output is written to the console using the OpenTelemetry console exporter. > [!NOTE] -> See [Semantic Conventions for GenAI agent and framework spans](https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-agent-spans/) from Open Telemetry for more information about the standards followed by the Microsoft Agent Framework. +> For more information about the standards followed by Microsoft Agent Framework, see [Semantic Conventions for GenAI agent and framework spans](https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-agent-spans/) from Open Telemetry. ## Prerequisites For prerequisites, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Installing Nuget packages +## Install NuGet packages -To use the Agent Framework with Azure OpenAI, you need to install the following NuGet packages: +To use Agent Framework with Azure OpenAI, you need to install the following NuGet packages: -```powershell +```dotnetcli dotnet add package Azure.Identity dotnet add package Azure.AI.OpenAI dotnet add package Microsoft.Agents.AI.OpenAI --prerelease @@ -35,15 +35,15 @@ dotnet add package Microsoft.Agents.AI.OpenAI --prerelease To also add OpenTelemetry support, with support for writing to the console, install these additional packages: -```powershell +```dotnetcli dotnet add package OpenTelemetry dotnet add package OpenTelemetry.Exporter.Console ``` ## Enable OpenTelemetry in your app -Enable the agent framework telemetry and create an OpenTelemetry `TracerProvider` that exports to the console. -Note that the `TracerProvider` must remain alive while you run the agent so traces are exported. +Enable Agent Framework telemetry and create an OpenTelemetry `TracerProvider` that exports to the console. +The `TracerProvider` must remain alive while you run the agent so traces are exported. ```csharp using System; @@ -60,8 +60,8 @@ using var tracerProvider = Sdk.CreateTracerProviderBuilder() ## Create and instrument the agent Create an agent, and using the builder pattern, call `UseOpenTelemetry` to provide a source name. -Note that the string literal "agent-telemetry-source" is the OpenTelemetry source name -that we used above, when we created the tracer provider. +Note that the string literal `agent-telemetry-source` is the OpenTelemetry source name +that you used when you created the tracer provider. ```csharp using System; @@ -134,9 +134,9 @@ In this tutorial, output is written to the console using the OpenTelemetry conso For prerequisites, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Installing packages +## Install packages -To use the Agent Framework with Azure OpenAI, you need to install the following packages. The agent framework automatically includes all necessary OpenTelemetry dependencies: +To use Agent Framework with Azure OpenAI, you need to install the following packages. Agent Framework automatically includes all necessary OpenTelemetry dependencies: ```bash pip install agent-framework @@ -154,14 +154,14 @@ opentelemetry-semantic-conventions-ai ## Enable OpenTelemetry in your app -The agent framework provides a convenient `setup_observability` function that configures OpenTelemetry with sensible defaults. +Agent Frameworkagent framework provides a convenient `setup_observability` function that configures OpenTelemetry with sensible defaults. By default, it exports to the console if no specific exporter is configured. ```python import asyncio from agent_framework.observability import setup_observability -# Enable agent framework telemetry with console output (default behavior) +# Enable Agent Framework telemetry with console output (default behavior) setup_observability(enable_sensitive_data=True) ``` @@ -171,7 +171,7 @@ The `setup_observability` function accepts the following parameters to customize - **`enable_otel`** (bool, optional): Enables OpenTelemetry tracing and metrics. Default is `False` when using environment variables only, but is assumed `True` when calling `setup_observability()` programmatically. When using environment variables, set `ENABLE_OTEL=true`. -- **`enable_sensitive_data`** (bool, optional): Controls whether sensitive data like prompts, responses, function call arguments, and results are included in traces. Default is `False`. Set to `True` to see actual prompts and responses in your traces. **Warning**: Be careful with this setting as it may expose sensitive data in your logs. Can also be set via `ENABLE_SENSITIVE_DATA=true` environment variable. +- **`enable_sensitive_data`** (bool, optional): Controls whether sensitive data like prompts, responses, function call arguments, and results are included in traces. Default is `False`. Set to `True` to see actual prompts and responses in your traces. **Warning**: Be careful with this setting as it might expose sensitive data in your logs. Can also be set via `ENABLE_SENSITIVE_DATA=true` environment variable. - **`otlp_endpoint`** (str, optional): The OTLP endpoint URL for exporting telemetry data. Default is `None`. Commonly set to `http://localhost:4317`. This creates an OTLPExporter for spans, metrics, and logs. Can be used with any OTLP-compliant endpoint such as [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/), [Aspire Dashboard](/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash), or other OTLP endpoints. Can also be set via `OTLP_ENDPOINT` environment variable. @@ -242,7 +242,7 @@ counter.add(1, {"key": "value"}) ## Create and run the agent -Create an agent using the agent framework. The observability will be automatically enabled for the agent once `setup_observability` has been called. +Create an agent using Agent Framework. The observability will be automatically enabled for the agent once `setup_observability` has been called. ```python from agent_framework import ChatAgent @@ -304,7 +304,7 @@ Because he wanted to improve his "arrr-ticulation"! ⛵ ## Understanding the telemetry output -Once observability is enabled, the agent framework automatically creates the following spans: +Once observability is enabled, Agent Framework automatically creates the following spans: - **`invoke_agent `**: The top-level span for each agent invocation. Contains all other spans as children and includes metadata like agent ID, name, and instructions. diff --git a/agent-framework/tutorials/agents/function-tools-approvals.md b/agent-framework/tutorials/agents/function-tools-approvals.md index d25a5911..06cddd25 100644 --- a/agent-framework/tutorials/agents/function-tools-approvals.md +++ b/agent-framework/tutorials/agents/function-tools-approvals.md @@ -19,15 +19,14 @@ The caller of the agent is then responsible for getting the required input from ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating the agent with function tools +## Create the agent with function tools When using functions, it's possible to indicate for each function, whether it requires human approval before being executed. This is done by wrapping the `AIFunction` instance in an `ApprovalRequiredAIFunction` instance. Here is an example of a simple function tool that fakes getting the weather for a given location. -For simplicity we are also listing all required usings for this sample here. ```csharp using System; @@ -51,7 +50,7 @@ AIFunction weatherFunction = AIFunctionFactory.Create(GetWeather); AIFunction approvalRequiredWeatherFunction = new ApprovalRequiredAIFunction(weatherFunction); ``` -When creating the agent, we can now provide the approval requiring function tool to the agent, by passing a list of tools to the `CreateAIAgent` method. +When creating the agent, you can now provide the approval requiring function tool to the agent, by passing a list of tools to the `CreateAIAgent` method. ```csharp AIAgent agent = new AzureOpenAIClient( @@ -61,8 +60,8 @@ AIAgent agent = new AzureOpenAIClient( .CreateAIAgent(instructions: "You are a helpful assistant", tools: [approvalRequiredWeatherFunction]); ``` -Since we now have a function that requires approval, the agent may respond with a request for approval, instead of executing the function directly and returning the result. -We can check the response content for any `FunctionApprovalRequestContent` instances, which indicates that the agent requires user approval for a function. +Since you now have a function that requires approval, the agent might respond with a request for approval, instead of executing the function directly and returning the result. +You can check the response content for any `FunctionApprovalRequestContent` instances, which indicates that the agent requires user approval for a function. ```csharp AgentThread thread = agent.GetNewThread(); @@ -76,14 +75,14 @@ var functionApprovalRequests = response.Messages If there are any function approval requests, the detail of the function call including name and arguments can be found in the `FunctionCall` property on the `FunctionApprovalRequestContent` instance. This can be shown to the user, so that they can decide whether to approve or reject the function call. -For our example, we will assume there is one request. +For this example, assume there is one request. ```csharp FunctionApprovalRequestContent requestContent = functionApprovalRequests.First(); Console.WriteLine($"We require approval to execute '{requestContent.FunctionCall.Name}'"); ``` -Once the user has provided their input, we can create a `FunctionApprovalResponseContent` instance using the `CreateResponse` method on the `FunctionApprovalRequestContent`. +Once the user has provided their input, you can create a `FunctionApprovalResponseContent` instance using the `CreateResponse` method on the `FunctionApprovalRequestContent`. Pass `true` to approve the function call, or `false` to reject it. The response content can then be passed to the agent in a new `User` `ChatMessage`, along with the same thread object to get the result back from the agent. diff --git a/agent-framework/tutorials/agents/function-tools.md b/agent-framework/tutorials/agents/function-tools.md index feda80e3..0d09f047 100644 --- a/agent-framework/tutorials/agents/function-tools.md +++ b/agent-framework/tutorials/agents/function-tools.md @@ -16,13 +16,13 @@ This tutorial step shows you how to use function tools with an agent, where the ::: zone pivot="programming-language-csharp" > [!IMPORTANT] -> Not all agent types support function tools. Some may only support custom built-in tools, without allowing the caller to provide their own functions. In this step we are using a `ChatClientAgent`, which does support function tools. +> Not all agent types support function tools. Some might only support custom built-in tools, without allowing the caller to provide their own functions. This step uses a `ChatClientAgent`, which does support function tools. ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating the agent with function tools +## Create the agent with function tools Function tools are just custom code that you want the agent to be able to call when needed. You can turn any C# method into a function tool, by using the `AIFunctionFactory.Create` method to create an `AIFunction` instance from the method. @@ -40,7 +40,7 @@ static string GetWeather([Description("The location to get the weather for.")] s => $"The weather in {location} is cloudy with a high of 15°C."; ``` -When creating the agent, we can now provide the function tool to the agent, by passing a list of tools to the `CreateAIAgent` method. +When creating the agent, you can now provide the function tool to the agent, by passing a list of tools to the `CreateAIAgent` method. ```csharp using System; @@ -57,7 +57,7 @@ AIAgent agent = new AzureOpenAIClient( .CreateAIAgent(instructions: "You are a helpful assistant", tools: [AIFunctionFactory.Create(GetWeather)]); ``` -Now we can just run the agent as normal, and the agent will be able to call the `GetWeather` function tool when needed. +Now you can just run the agent as normal, and the agent will be able to call the `GetWeather` function tool when needed. ```csharp Console.WriteLine(await agent.RunAsync("What is the weather like in Amsterdam?")); @@ -67,13 +67,13 @@ Console.WriteLine(await agent.RunAsync("What is the weather like in Amsterdam?") ::: zone pivot="programming-language-python" > [!IMPORTANT] -> Not all agent types support function tools. Some may only support custom built-in tools, without allowing the caller to provide their own functions. In this step we are using agents created via chat clients, which do support function tools. +> Not all agent types support function tools. Some might only support custom built-in tools, without allowing the caller to provide their own functions. This step uses agents created via chat clients, which do support function tools. ## Prerequisites For prerequisites and installing Python packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating the agent with function tools +## Create the agent with function tools Function tools are just custom code that you want the agent to be able to call when needed. You can turn any Python function into a function tool by passing it to the agent's `tools` parameter when creating the agent. @@ -110,7 +110,7 @@ def get_weather( If you don't specify the `name` and `description` parameters in the `ai_function` decorator, the framework will automatically use the function's name and docstring as fallbacks. -When creating the agent, we can now provide the function tool to the agent, by passing it to the `tools` parameter. +When creating the agent, you can now provide the function tool to the agent, by passing it to the `tools` parameter. ```python import asyncio @@ -123,7 +123,7 @@ agent = AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent( ) ``` -Now we can just run the agent as normal, and the agent will be able to call the `get_weather` function tool when needed. +Now you can just run the agent as normal, and the agent will be able to call the `get_weather` function tool when needed. ```python async def main(): @@ -133,7 +133,7 @@ async def main(): asyncio.run(main()) ``` -## Creating a class with multiple function tools +## Create a class with multiple function tools You can also create a class that contains multiple function tools as methods. This can be useful for organizing related functions together or when you want to pass state between them. @@ -159,7 +159,7 @@ class WeatherTools: ``` -When creating the agent, we can now provide all the methods of the class as functions: +When creating the agent, you can now provide all the methods of the class as functions: ```python tools = WeatherTools() diff --git a/agent-framework/tutorials/agents/images.md b/agent-framework/tutorials/agents/images.md index df2cc4c0..1fd6d51c 100644 --- a/agent-framework/tutorials/agents/images.md +++ b/agent-framework/tutorials/agents/images.md @@ -15,7 +15,7 @@ This tutorial shows you how to use images with an agent, allowing the agent to a ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. ::: zone pivot="programming-language-csharp" @@ -78,7 +78,7 @@ Next, create a `ChatMessage` that contains both a text prompt and an image URL. from agent_framework import ChatMessage, TextContent, UriContent, Role message = ChatMessage( - role=Role.USER, + role=Role.USER, contents=[ TextContent(text="What do you see in this image?"), UriContent( @@ -99,7 +99,7 @@ with open("path/to/your/image.jpg", "rb") as f: image_bytes = f.read() message = ChatMessage( - role=Role.USER, + role=Role.USER, contents=[ TextContent(text="What do you see in this image?"), DataContent( diff --git a/agent-framework/tutorials/agents/memory.md b/agent-framework/tutorials/agents/memory.md index e29370bc..79b2d06f 100644 --- a/agent-framework/tutorials/agents/memory.md +++ b/agent-framework/tutorials/agents/memory.md @@ -15,14 +15,14 @@ ms.service: agent-framework This tutorial shows how to add memory to an agent by implementing an `AIContextProvider` and attaching it to the agent. > [!IMPORTANT] -> Not all agent types support `AIContextProvider`. In this step we are using a `ChatClientAgent`, which does support `AIContextProvider`. +> Not all agent types support `AIContextProvider`. This step uses a `ChatClientAgent`, which does support `AIContextProvider`. ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating an AIContextProvider +## Create an AIContextProvider `AIContextProvider` is an abstract class that you can inherit from, and which can be associated with the `AgentThread` for a `ChatClientAgent`. It allows you to: @@ -42,15 +42,15 @@ The `AIContextProvider` class has two methods that you can override to run custo `AIContextProvider` instances are created and attached to an `AgentThread` when the thread is created, and when a thread is resumed from a serialized state. -The `AIContextProvider` instance may have its own state that needs to be persisted between invocations of the agent. E.g. a memory component that remembers information about the user may have memories as part of its state. +The `AIContextProvider` instance might have its own state that needs to be persisted between invocations of the agent. For example, a memory component that remembers information about the user might have memories as part of its state. To allow persisting threads, you need to implement the `SerializeAsync` method of the `AIContextProvider` class. You also need to provide a constructor that takes a `JsonElement` parameter, which can be used to deserialize the state when resuming a thread. ### Sample AIContextProvider implementation -Let's look at an example of a custom memory component that remembers a user's name and age, and provides it to the agent before each invocation. +The following example of a custom memory component remembers a user's name and age and provides it to the agent before each invocation. -First we'll create a model class to hold the memories. +First, create a model class to hold the memories. ```csharp internal sealed class UserInfo @@ -60,7 +60,7 @@ internal sealed class UserInfo } ``` -Then we can implement the `AIContextProvider` to manage the memories. +Then you can implement the `AIContextProvider` to manage the memories. The `UserInfoMemory` class below contains the following behavior: 1. It uses a `IChatClient` to look for the user's name and age in user messages when new messages are added to the thread at the end of each run. @@ -178,13 +178,13 @@ Console.WriteLine($"MEMORY - User Age: {userInfo?.UserAge}"); This tutorial shows how to add memory to an agent by implementing a `ContextProvider` and attaching it to the agent. > [!IMPORTANT] -> Not all agent types support `ContextProvider`. In this step we are using a `ChatAgent`, which does support `ContextProvider`. +> Not all agent types support `ContextProvider`. This step uses a `ChatAgent`, which does support `ContextProvider`. ## Prerequisites For prerequisites and installing packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating a ContextProvider +## Create a ContextProvider `ContextProvider` is an abstract class that you can inherit from, and which can be associated with an `AgentThread` for a `ChatAgent`. It allows you to: @@ -204,15 +204,15 @@ The `ContextProvider` class has two methods that you can override to run custom `ContextProvider` instances are created and attached to an `AgentThread` when the thread is created, and when a thread is resumed from a serialized state. -The `ContextProvider` instance may have its own state that needs to be persisted between invocations of the agent. E.g. a memory component that remembers information about the user may have memories as part of its state. +The `ContextProvider` instance might have its own state that needs to be persisted between invocations of the agent. For example, a memory component that remembers information about the user might have memories as part of its state. To allow persisting threads, you need to implement serialization for the `ContextProvider` class. You also need to provide a constructor that can restore state from serialized data when resuming a thread. ### Sample ContextProvider implementation -Let's look at an example of a custom memory component that remembers a user's name and age, and provides it to the agent before each invocation. +The following example of a custom memory component remembers a user's name and age and provides it to the agent before each invocation. -First we'll create a model class to hold the memories. +First, create a model class to hold the memories. ```python from pydantic import BaseModel @@ -222,7 +222,7 @@ class UserInfo(BaseModel): age: int | None = None ``` -Then we can implement the `ContextProvider` to manage the memories. +Then you can implement the `ContextProvider` to manage the memories. The `UserInfoMemory` class below contains the following behavior: 1. It uses a chat client to look for the user's name and age in user messages when new messages are added to the thread at the end of each run. diff --git a/agent-framework/tutorials/agents/middleware.md b/agent-framework/tutorials/agents/middleware.md index 864e965b..76b2b122 100644 --- a/agent-framework/tutorials/agents/middleware.md +++ b/agent-framework/tutorials/agents/middleware.md @@ -17,11 +17,11 @@ Learn how to add middleware to your agents in a few simple steps. Middleware all ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. ## Step 1: Create a Simple Agent -First, let's create a basic agent with a function tool. +First, create a basic agent with a function tool. ```csharp using System; @@ -46,8 +46,8 @@ AIAgent baseAgent = new AzureOpenAIClient( ## Step 2: Create Your Agent Run Middleware -Next, we'll create a function that will get invoked for each agent run. -It allows us to inspect the input and output from the agent. +Next, create a function that will get invoked for each agent run. +It allows you to inspect the input and output from the agent. Unless the intention is to use the middleware to stop executing the run, the function should call `RunAsync` on the provided `innerAgent`. @@ -72,8 +72,7 @@ async Task CustomAgentRunMiddleware( ## Step 3: Add Agent Run Middleware to Your Agent -To add this middleware function to the `baseAgent` we created in step 1, -we should use the builder pattern. +To add this middleware function to the `baseAgent` you created in step 1, use the builder pattern. This creates a new agent that has the middleware applied. The original `baseAgent` is not modified. @@ -87,13 +86,12 @@ var middlewareEnabledAgent = baseAgent ## Step 4: Create Function calling Middleware > [!NOTE] -> Function calling middleware is currently only supported with an `AIAgent` that uses `Microsoft.Extensions.AI.FunctionInvokingChatClient`, e.g. `ChatClientAgent`. +> Function calling middleware is currently only supported with an `AIAgent` that uses , for example, `ChatClientAgent`. -We can also create middleware that gets called for each function tool that is invoked. -Here is an example of function calling middleware, that can inspect and/or modify the function being called, and the result from the function call. +You can also create middleware that gets called for each function tool that's invoked. +Here's an example of function-calling middleware that can inspect and/or modify the function being called and the result from the function call. -Unless the intention is to use the middleware to not execute the function tool, the middleware -should call the provided `next` `Func`. +Unless the intention is to use the middleware to not execute the function tool, the middleware should call the provided `next` `Func`. ```csharp async ValueTask CustomFunctionCallingMiddleware( @@ -112,7 +110,7 @@ async ValueTask CustomFunctionCallingMiddleware( ## Step 5: Add Function calling Middleware to Your Agent -Same as with adding agent run middleware, we can add function calling middleware as follows: +Same as with adding agent-run middleware, you can add function calling middleware as follows: ```csharp var middlewareEnabledAgent = baseAgent @@ -130,10 +128,10 @@ await middlewareEnabledAgent.RunAsync("What's the current time?"); ## Step 6: Create Chat Client Middleware -For agents that are built using `IChatClient` developers may want to intercept calls going from the agent to the `IChatClient`. -In this case it is possible to use middleware for the `IChatClient`. +For agents that are built using , you might want to intercept calls going from the agent to the `IChatClient`. +In this case, it's possible to use middleware for the `IChatClient`. -Here is an example of chat client middleware, that can inspect and/or modify the input and output for the request to the inference service that the chat client provides. +Here is an example of chat client middleware that can inspect and/or modify the input and output for the request to the inference service that the chat client provides. ```csharp async Task CustomChatClientMiddleware( @@ -151,12 +149,11 @@ async Task CustomChatClientMiddleware( ``` > [!NOTE] -> For more information about `IChatClient` middleware, see [Custom IChatClient middleware](/dotnet/ai/microsoft-extensions-ai#custom-ichatclient-middleware) -> in the Microsoft.Extensions.AI documentation. +> For more information about `IChatClient` middleware, see [Custom IChatClient middleware](/dotnet/ai/microsoft-extensions-ai#custom-ichatclient-middleware). ## Step 7: Add Chat client Middleware to an `IChatClient` -To add middleware to your `IChatClient`, you can use the builder pattern. +To add middleware to your , you can use the builder pattern. After adding the middleware, you can use the `IChatClient` with your agent as usual. ```csharp @@ -189,7 +186,7 @@ var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) ## Step 1: Create a Simple Agent -First, let's create a basic agent: +First, create a basic agent: ```python import asyncio @@ -294,11 +291,11 @@ result = await agent.run( ## What's Next? -For more advanced scenarios, check out the [Agent Middleware User Guide](../../user-guide/agents/agent-middleware.md) which covers: +For more advanced scenarios, see the [Agent Middleware User Guide](../../user-guide/agents/agent-middleware.md), which covers: -- Different types of middleware (agent, function, chat) -- Class-based middleware for complex scenarios -- Middleware termination and result overrides -- Advanced middleware patterns and best practices +- Different types of middleware (agent, function, chat). +- Class-based middleware for complex scenarios. +- Middleware termination and result overrides. +- Advanced middleware patterns and best practices. ::: zone-end diff --git a/agent-framework/tutorials/agents/multi-turn-conversation.md b/agent-framework/tutorials/agents/multi-turn-conversation.md index 4fd93b0e..d2685b73 100644 --- a/agent-framework/tutorials/agents/multi-turn-conversation.md +++ b/agent-framework/tutorials/agents/multi-turn-conversation.md @@ -14,7 +14,7 @@ ms.service: agent-framework This tutorial step shows you how to have a multi-turn conversation with an agent, where the agent is built on the Azure OpenAI Chat Completion service. > [!IMPORTANT] -> The agent framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. See the [Agent Framework user guide](../../user-guide/overview.md) for more information on other agent types and how to construct them. +> Agent Framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. For more information on other agent types and how to construct them, see the [Agent Framework user guide](../../user-guide/overview.md). ## Prerequisites @@ -43,7 +43,7 @@ Console.WriteLine(await agent.RunAsync("Now add some emojis to the joke and tell This will maintain the conversation state between the calls, and the agent will be able to refer to previous input and response messages in the conversation when responding to new input. > [!IMPORTANT] -> The type of service that is used by the `AIAgent` will determine how conversation history is stored. E.g. when using a ChatCompletion service, like in this example, the conversation history is stored in the AgentThread object and sent to the service on each call. When using the Azure AI Agent service on the other hand, the conversation history is stored in the Azure AI Agent service and only a reference to the conversation is sent to the service on each call. +> The type of service that is used by the `AIAgent` will determine how conversation history is stored. For example, when using a ChatCompletion service, like in this example, the conversation history is stored in the AgentThread object and sent to the service on each call. When using the Azure AI Agent service on the other hand, the conversation history is stored in the Azure AI Agent service and only a reference to the conversation is sent to the service on each call. ## Single agent with multiple conversations @@ -80,7 +80,7 @@ You can then pass this thread object to the `run` and `run_stream` methods on th async def main(): result1 = await agent.run("Tell me a joke about a pirate.", thread=thread) print(result1.text) - + result2 = await agent.run("Now add some emojis to the joke and tell it in the voice of a pirate's parrot.", thread=thread) print(result2.text) @@ -90,7 +90,7 @@ asyncio.run(main()) This will maintain the conversation state between the calls, and the agent will be able to refer to previous input and response messages in the conversation when responding to new input. > [!IMPORTANT] -> The type of service that is used by the agent will determine how conversation history is stored. E.g. when using a Chat Completion service, like in this example, the conversation history is stored in the AgentThread object and sent to the service on each call. When using the Azure AI Agent service on the other hand, the conversation history is stored in the Azure AI Agent service and only a reference to the conversation is sent to the service on each call. +> The type of service that is used by the agent will determine how conversation history is stored. For example, when using a Chat Completion service, like in this example, the conversation history is stored in the AgentThread object and sent to the service on each call. When using the Azure AI Agent service on the other hand, the conversation history is stored in the Azure AI Agent service and only a reference to the conversation is sent to the service on each call. ## Single agent with multiple conversations @@ -102,16 +102,16 @@ The conversations will be fully independent of each other, since the agent does async def main(): thread1 = agent.get_new_thread() thread2 = agent.get_new_thread() - + result1 = await agent.run("Tell me a joke about a pirate.", thread=thread1) print(result1.text) - + result2 = await agent.run("Tell me a joke about a robot.", thread=thread2) print(result2.text) - + result3 = await agent.run("Now add some emojis to the joke and tell it in the voice of a pirate's parrot.", thread=thread1) print(result3.text) - + result4 = await agent.run("Now add some emojis to the joke and tell it in the voice of a robot.", thread=thread2) print(result4.text) @@ -123,4 +123,4 @@ asyncio.run(main()) ## Next steps > [!div class="nextstepaction"] -> [Using function tools with an agent](./function-tools.md) \ No newline at end of file +> [Using function tools with an agent](./function-tools.md) diff --git a/agent-framework/tutorials/agents/persisted-conversation.md b/agent-framework/tutorials/agents/persisted-conversation.md index 42982609..bdb438bb 100644 --- a/agent-framework/tutorials/agents/persisted-conversation.md +++ b/agent-framework/tutorials/agents/persisted-conversation.md @@ -19,7 +19,7 @@ When hosting an agent in a service or even in a client application, you often wa ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. ## Persisting and resuming the conversation @@ -65,9 +65,9 @@ await File.WriteAllTextAsync(filePath, serializedJson); ``` Load the persisted JSON from storage and recreate the AgentThread instance from it. -Note that the thread must be deserialized using an agent instance. This should be the +The thread must be deserialized using an agent instance. This should be the same agent type that was used to create the original thread. -This is because agents may have their own thread types and may construct threads with +This is because agents might have their own thread types and might construct threads with additional functionality that is specific to that agent type. ```csharp @@ -147,9 +147,9 @@ with open(file_path, "w") as f: ``` Load the persisted JSON from storage and recreate the AgentThread instance from it. -Note that the thread must be deserialized using an agent instance. This should be the +The thread must be deserialized using an agent instance. This should be the same agent type that was used to create the original thread. -This is because agents may have their own thread types and may construct threads with +This is because agents might have their own thread types and might construct threads with additional functionality that is specific to that agent type. ```python diff --git a/agent-framework/tutorials/agents/run-agent.md b/agent-framework/tutorials/agents/run-agent.md index 49807ba3..447fe949 100644 --- a/agent-framework/tutorials/agents/run-agent.md +++ b/agent-framework/tutorials/agents/run-agent.md @@ -13,10 +13,10 @@ ms.service: agent-framework ::: zone pivot="programming-language-csharp" -This tutorial shows you how to create and run an agent with the Agent Framework, based on the Azure OpenAI Chat Completion service. +This tutorial shows you how to create and run an agent with Agent Framework, based on the Azure OpenAI Chat Completion service. > [!IMPORTANT] -> The agent framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. See the [Agent Framework user guide](../../user-guide/overview.md) for more information on other agent types and how to construct them. +> Agent Framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. For more information on other agent types and how to construct them, see the [Agent Framework user guide](../../user-guide/overview.md). ## Prerequisites @@ -28,25 +28,26 @@ Before you begin, ensure you have the following prerequisites: - [User has the `Cognitive Services OpenAI User` or `Cognitive Services OpenAI Contributor` roles for the Azure OpenAI resource.](/azure/ai-foundry/openai/how-to/role-based-access-control) > [!NOTE] -> The Microsoft Agent Framework is supported with all actively supported versions of .net. For the purposes of this sample we are recommending the .NET 8.0 SDK or higher. +> Microsoft Agent Framework is supported with all actively supported versions of .NET. For the purposes of this sample, we recommend the .NET 8 SDK or a later version. + > [!IMPORTANT] -> For this tutorial we are using Azure OpenAI for the Chat Completion service, but you can use any inference service that provides a [Microsoft.Extensions.AI.IChatClient](/dotnet/api/microsoft.extensions.ai.ichatclient) implementation. +> This tutorial uses Azure OpenAI for the Chat Completion service, but you can use any inference service that provides a implementation. -## Installing Nuget packages +## Install NuGet packages -To use the Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: +To use Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: -```powershell +```dotnetcli dotnet add package Azure.Identity dotnet add package Azure.AI.OpenAI dotnet add package Microsoft.Agents.AI.OpenAI --prerelease ``` -## Creating the agent +## Create the agent -- First we create a client for Azure OpenAI, by providing the Azure OpenAI endpoint and using the same login as was used when authenticating with the Azure CLI in the [Prerequisites](#prerequisites) step. -- Then we get a chat client for communicating with the chat completion service, where we also specify the specific model deployment to use. Use one of the deployments that you created in the [Prerequisites](#prerequisites) step. -- Finally we create the agent, providing instructions and a name for the agent. +- First, create a client for Azure OpenAI by providing the Azure OpenAI endpoint and using the same login as you used when authenticating with the Azure CLI in the [Prerequisites](#prerequisites) step. +- Then, get a chat client for communicating with the chat completion service, where you also specify the specific model deployment to use. Use one of the deployments that you created in the [Prerequisites](#prerequisites) step. +- Finally, create the agent, providing instructions and a name for the agent. ```csharp using System; @@ -157,16 +158,16 @@ Console.WriteLine(await agent.RunAsync([systemMessage, userMessage])); Sample output: ```text -I’m not a clown, but I can share an interesting fact! Did you know that pirates often revised the Jolly Roger flag? Depending on the pirate captain, it could feature different symbols like skulls, bones, or hourglasses, each representing their unique approach to piracy. +I'm not a clown, but I can share an interesting fact! Did you know that pirates often revised the Jolly Roger flag? Depending on the pirate captain, it could feature different symbols like skulls, bones, or hourglasses, each representing their unique approach to piracy. ``` ::: zone-end ::: zone pivot="programming-language-python" -This tutorial shows you how to create and run an agent with the Agent Framework, based on the Azure OpenAI Chat Completion service. +This tutorial shows you how to create and run an agent with Agent Framework, based on the Azure OpenAI Chat Completion service. > [!IMPORTANT] -> The agent framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. See the [Agent Framework user guide](../../user-guide/overview.md) for more information on other agent types and how to construct them. +> Agent Framework supports many different types of agents. This tutorial uses an agent based on a Chat Completion service, but all other agent types are run in the same way. For more information on other agent types and how to construct them, see the [Agent Framework user guide](../../user-guide/overview.md). ## Prerequisites @@ -178,20 +179,20 @@ Before you begin, ensure you have the following prerequisites: - [User has the `Cognitive Services OpenAI User` or `Cognitive Services OpenAI Contributor` roles for the Azure OpenAI resource.](/azure/ai-foundry/openai/how-to/role-based-access-control) > [!IMPORTANT] -> For this tutorial we are using Azure OpenAI for the Chat Completion service, but you can use any inference service that is compatible with the Agent Framework's chat client protocol. +> This tutorial uses Azure OpenAI for the Chat Completion service, but you can use any inference service that is compatible with Agent Framework's chat client protocol. -## Installing Python packages +## Install Python packages -To use the Microsoft Agent Framework with Azure OpenAI, you need to install the following Python packages: +To use Microsoft Agent Framework with Azure OpenAI, you need to install the following Python packages: ```bash pip install agent-framework ``` -## Creating the agent +## Create the agent -- First we create a chat client for communicating with Azure OpenAI, where we use the same login as was used when authenticating with the Azure CLI in the [Prerequisites](#prerequisites) step. -- Then we create the agent, providing instructions and a name for the agent. +- First, create a chat client for communicating with Azure OpenAI and use the same login as you used when authenticating with the Azure CLI in the [Prerequisites](#prerequisites) step. +- Then, create the agent, providing instructions and a name for the agent. ```python import asyncio @@ -240,7 +241,7 @@ Instead of a simple string, you can also provide one or more `ChatMessage` objec from agent_framework import ChatMessage, TextContent, UriContent, Role message = ChatMessage( - role=Role.USER, + role=Role.USER, contents=[ TextContent(text="Tell me a joke about this image?"), UriContent(uri="https://samplesite.org/clown.jpg", media_type="image/jpeg") diff --git a/agent-framework/tutorials/agents/structured-output.md b/agent-framework/tutorials/agents/structured-output.md index e4c60490..fff72788 100644 --- a/agent-framework/tutorials/agents/structured-output.md +++ b/agent-framework/tutorials/agents/structured-output.md @@ -16,27 +16,27 @@ ms.service: agent-framework This tutorial step shows you how to produce structured output with an agent, where the agent is built on the Azure OpenAI Chat Completion service. > [!IMPORTANT] -> Not all agent types support structured output. In this step we are using a `ChatClientAgent`, which does support structured output. +> Not all agent types support structured output. This step uses a `ChatClientAgent`, which does support structured output. ## Prerequisites -For prerequisites and installing nuget packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. +For prerequisites and installing NuGet packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating the agent with structured output +## Create the agent with structured output -The `ChatClientAgent` is built on top of any `Microsoft.Extensions.AI.IChatClient` implementation. -The `ChatClientAgent` uses the support for structured output that is provided by the underlying chat client. +The `ChatClientAgent` is built on top of any implementation. +The `ChatClientAgent` uses the support for structured output that's provided by the underlying chat client. -When creating the agent, we have the option to provide the default `ChatOptions` instance to use for the underlying chat client. -This `ChatOptions` instance allows us to pick a preferred [`ChatResponseFormat`](/dotnet/api/microsoft.extensions.ai.chatresponseformat). +When creating the agent, you have the option to provide the default instance to use for the underlying chat client. +This `ChatOptions` instance allows you to pick a preferred . Various options are supported: -- `ChatResponseFormat.Text`: The response will be plain text. -- `ChatResponseFormat.Json`: The response will be a JSON object without any particular schema. -- `ChatResponseFormatJson.ForJsonSchema`: The response will be a JSON object that conforms to the provided schema. +- : The response will be plain text. +- : The response will be a JSON object without any particular schema. +- : The response will be a JSON object that conforms to the provided schema. -Let's look at an example of creating an agent that produces structured output in the form of a JSON object that conforms to a specific schema. +This example creates an agent that produces structured output in the form of a JSON object that conforms to a specific schema. The easiest way to produce the schema is to define a C# class that represents the structure of the output you want from the agent, and then use the `AIJsonUtilities.CreateJsonSchema` method to create a schema from the type. @@ -60,7 +60,7 @@ public class PersonInfo JsonElement schema = AIJsonUtilities.CreateJsonSchema(typeof(PersonInfo)); ``` -We can then create a `ChatOptions` instance that uses this schema for the response format. +You can then create a instance that uses this schema for the response format. ```csharp using Microsoft.Extensions.AI; @@ -95,7 +95,7 @@ AIAgent agent = new AzureOpenAIClient( }); ``` -Now we can just run the agent with some textual information that the agent can use to fill in the structured output. +Now you can just run the agent with some textual information that the agent can use to fill in the structured output. ```csharp var response = await agent.RunAsync("Please provide information about John Smith, who is a 35-year-old software engineer."); @@ -108,8 +108,8 @@ var personInfo = response.Deserialize(JsonSerializerOptions.Web); Console.WriteLine($"Name: {personInfo.Name}, Age: {personInfo.Age}, Occupation: {personInfo.Occupation}"); ``` -When streaming, the agent response is streamed as a series of updates, and we can only deserialize the response once we have received all the updates. -We therefore need to assemble all the updates into a single response, before deserializing it. +When streaming, the agent response is streamed as a series of updates, and you can only deserialize the response once all the updates have been received. +You must assemble all the updates into a single response before deserializing it. ```csharp var updates = agent.RunStreamingAsync("Please provide information about John Smith, who is a 35-year-old software engineer."); @@ -128,16 +128,16 @@ This tutorial step shows you how to produce structured output with an agent, whe For prerequisites and installing packages, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating the agent with structured output +## Create the agent with structured output The `ChatAgent` is built on top of any chat client implementation that supports structured output. The `ChatAgent` uses the `response_format` parameter to specify the desired output schema. -When creating or running the agent, we can provide a Pydantic model that defines the structure of the expected output. +When creating or running the agent, you can provide a Pydantic model that defines the structure of the expected output. Various response formats are supported based on the underlying chat client capabilities. -Let's look at an example of creating an agent that produces structured output in the form of a JSON object that conforms to a Pydantic model schema. +This example creates an agent that produces structured output in the form of a JSON object that conforms to a Pydantic model schema. First, define a Pydantic model that represents the structure of the output you want from the agent: @@ -151,7 +151,7 @@ class PersonInfo(BaseModel): occupation: str | None = None ``` -Now we can create an agent using the Azure OpenAI Chat Client: +Now you can create an agent using the Azure OpenAI Chat Client: ```python from agent_framework.azure import AzureOpenAIChatClient @@ -164,11 +164,11 @@ agent = AzureOpenAIChatClient(credential=AzureCliCredential()).create_agent( ) ``` -Now we can run the agent with some textual information and specify the structured output format using the `response_format` parameter: +Now you can run the agent with some textual information and specify the structured output format using the `response_format` parameter: ```python response = await agent.run( - "Please provide information about John Smith, who is a 35-year-old software engineer.", + "Please provide information about John Smith, who is a 35-year-old software engineer.", response_format=PersonInfo ) ``` @@ -183,7 +183,7 @@ else: print("No structured data found in response") ``` -When streaming, the agent response is streamed as a series of updates. To get the structured output, we need to collect all the updates and then access the final response value: +When streaming, the agent response is streamed as a series of updates. To get the structured output, you must collect all the updates and then access the final response value: ```python from agent_framework import AgentRunResponse diff --git a/agent-framework/tutorials/agents/third-party-chat-history-storage.md b/agent-framework/tutorials/agents/third-party-chat-history-storage.md index cfc10695..46bf67e8 100644 --- a/agent-framework/tutorials/agents/third-party-chat-history-storage.md +++ b/agent-framework/tutorials/agents/third-party-chat-history-storage.md @@ -15,7 +15,6 @@ ms.service: agent-framework This tutorial shows how to store agent chat history in external storage by implementing a custom `ChatMessageStore` and using it with a `ChatClientAgent`. - By default, when using `ChatClientAgent`, chat history is stored either in memory in the `AgentThread` object or the underlying inference service, if the service supports it. Where services do not require chat history to be stored in the service, it is possible to provide a custom store for persisting chat history instead of relying on the default in-memory behavior. @@ -24,24 +23,24 @@ Where services do not require chat history to be stored in the service, it is po For prerequisites, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Installing Nuget packages +## Install NuGet packages -To use the Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: +To use Microsoft Agent Framework with Azure OpenAI, you need to install the following NuGet packages: -```powershell +```dotnetcli dotnet add package Azure.Identity dotnet add package Azure.AI.OpenAI dotnet add package Microsoft.Agents.AI.OpenAI --prerelease ``` -In addition to this, we will use the in-memory vector store to store chat messages and a utility package for async LINQ operations. +In addition, you'll use the in-memory vector store to store chat messages and a utility package for async LINQ operations. -```powershell +```dotnetcli dotnet add package Microsoft.SemanticKernel.Connectors.InMemory --prerelease dotnet add package System.Linq.Async ``` -## Creating a custom ChatMessage Store +## Create a custom ChatMessage Store To create a custom `ChatMessageStore`, you need to implement the abstract `ChatMessageStore` class and provide implementations for the required methods. @@ -52,7 +51,7 @@ The most important methods to implement are: - `AddMessagesAsync` - called to add new messages to the store. - `GetMessagesAsync` - called to retrieve the messages from the store. -`GetMessagesAsync` should return the messages in ascending chronological order. All messages returned by it will be used by the `ChatClientAgent` when making calls to the underlying `IChatClient`. It's therefore important that this method considers the limits of the underlying model, and only returns as many messages as can be handled by the model. +`GetMessagesAsync` should return the messages in ascending chronological order. All messages returned by it will be used by the `ChatClientAgent` when making calls to the underlying . It's therefore important that this method considers the limits of the underlying model, and only returns as many messages as can be handled by the model. Any chat history reduction logic, such as summarization or trimming, should be done before returning messages from `GetMessagesAsync`. @@ -60,15 +59,15 @@ Any chat history reduction logic, such as summarization or trimming, should be d `ChatMessageStore` instances are created and attached to an `AgentThread` when the thread is created, and when a thread is resumed from a serialized state. -While the actual messages making up the chat history are stored externally, the `ChatMessageStore` instance may need to store keys or other state to identify the chat history in the external store. +While the actual messages making up the chat history are stored externally, the `ChatMessageStore` instance might need to store keys or other state to identify the chat history in the external store. -To allow persisting threads, you need to implement the `SerializeStateAsync` method of the `ChatMessageStore` class. You also need to provide a constructor that takes a `JsonElement` parameter, which can be used to deserialize the state when resuming a thread. +To allow persisting threads, you need to implement the `SerializeStateAsync` method of the `ChatMessageStore` class. You also need to provide a constructor that takes a parameter, which can be used to deserialize the state when resuming a thread. ### Sample ChatMessageStore implementation -Let's look at a sample implementation that stores chat messages in a vector store. +The following sample implementation stores chat messages in a vector store. -In `AddMessagesAsync` it upserts messages into the vector store, using a unique key for each message. +`AddMessagesAsync` upserts messages into the vector store, using a unique key for each message. `GetMessagesAsync` retrieves the messages for the current thread from the vector store, orders them by timestamp, and returns them in ascending order. @@ -140,7 +139,7 @@ internal sealed class VectorChatMessageStore : ChatMessageStore } public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) => - // We have to serialize the thread id, so that on deserialization we can retrieve the messages using the same thread id. + // We have to serialize the thread id, so that on deserialization you can retrieve the messages using the same thread id. JsonSerializer.SerializeToElement(this.ThreadDbKey); private sealed class ChatHistoryItem @@ -198,7 +197,7 @@ Where services do not require or are not capable of the chat history to be store For prerequisites, see the [Create and run a simple agent](./run-agent.md) step in this tutorial. -## Creating a custom ChatMessage Store +## Create a custom ChatMessage Store To create a custom `ChatMessageStore`, you need to implement the `ChatMessageStore` protocol and provide implementations for the required methods. @@ -217,15 +216,15 @@ Any chat history reduction logic, such as summarization or trimming, should be d `ChatMessageStore` instances are created and attached to an `AgentThread` when the thread is created, and when a thread is resumed from a serialized state. -While the actual messages making up the chat history are stored externally, the `ChatMessageStore` instance may need to store keys or other state to identify the chat history in the external store. +While the actual messages making up the chat history are stored externally, the `ChatMessageStore` instance might need to store keys or other state to identify the chat history in the external store. To allow persisting threads, you need to implement the `serialize_state` and `deserialize_state` methods of the `ChatMessageStore` protocol. These methods allow the store's state to be persisted and restored when resuming a thread. ### Sample ChatMessageStore implementation -Let's look at a sample implementation that stores chat messages in Redis using the Redis Lists data structure. +The following sample implementation stores chat messages in Redis using the Redis Lists data structure. -In `add_messages` it stores messages in Redis using RPUSH to append them to the end of the list in chronological order. +In `add_messages`, it stores messages in Redis using RPUSH to append them to the end of the list in chronological order. `list_messages` retrieves the messages for the current thread from Redis using LRANGE, and returns them in ascending chronological order. @@ -266,7 +265,7 @@ class RedisChatMessageStore: """Initialize the Redis chat message store. Args: - redis_url: Redis connection URL (e.g., "redis://localhost:6379"). + redis_url: Redis connection URL (for example, "redis://localhost:6379"). thread_id: Unique identifier for this conversation thread. If not provided, a UUID will be auto-generated. key_prefix: Prefix for Redis keys to namespace different applications. diff --git a/agent-framework/tutorials/overview.md b/agent-framework/tutorials/overview.md index 41e9c5ab..9b02b81a 100644 --- a/agent-framework/tutorials/overview.md +++ b/agent-framework/tutorials/overview.md @@ -10,9 +10,9 @@ ms.service: agent-framework # Agent Framework Tutorials -Welcome to the Agent Framework tutorials! This section is designed to help you quickly learn how to build, run, and extend agents using the Agent Framework. Whether you're new to agents or looking to deepen your understanding, these step-by-step guides will walk you through essential concepts such as creating agents, managing conversations, integrating function tools, handling approvals, producing structured output, persisting state, and adding telemetry. Start with the basics and progress to more advanced scenarios to unlock the full potential of agent-based solutions. +Welcome to the Agent Framework tutorials! This section is designed to help you quickly learn how to build, run, and extend agents using Agent Framework. Whether you're new to agents or looking to deepen your understanding, these step-by-step guides will walk you through essential concepts such as creating agents, managing conversations, integrating function tools, handling approvals, producing structured output, persisting state, and adding telemetry. Start with the basics and progress to more advanced scenarios to unlock the full potential of agent-based solutions. ## Agent getting started tutorials -These samples cover the essential capabilities of the Agent Framework. You'll learn how to create agents, enable multi-turn conversations, integrate function tools, add human-in-the-loop approvals, generate structured outputs, persist conversation history, and monitor agent activity with telemetry. Each tutorial is designed to help you build practical solutions and understand the core features step by step. +These samples cover the essential capabilities of Agent Framework. You'll learn how to create agents, enable multi-turn conversations, integrate function tools, add human-in-the-loop approvals, generate structured outputs, persist conversation history, and monitor agent activity with telemetry. Each tutorial is designed to help you build practical solutions and understand the core features step by step. diff --git a/agent-framework/tutorials/quick-start.md b/agent-framework/tutorials/quick-start.md index 3857d19c..19c8c339 100644 --- a/agent-framework/tutorials/quick-start.md +++ b/agent-framework/tutorials/quick-start.md @@ -1,6 +1,6 @@ --- -title: Quick Start -description: Quick start guide for the Agent Framework. +title: Microsoft Agent Framework Quick Start +description: Quick Start guide for Agent Framework. ms.service: agent-framework ms.topic: tutorial ms.date: 09/04/2025 @@ -10,9 +10,9 @@ author: TaoChenOSU ms.author: taochen --- -# Microsoft Agent Framework Quick Start +# Microsoft Agent Framework Quick-Start Guide -This guide will help you get up and running quickly with a basic agent using the Agent Framework and Azure OpenAI. +This guide will help you get up and running quickly with a basic agent using Agent Framework and Azure OpenAI. ::: zone pivot="programming-language-csharp" @@ -21,22 +21,24 @@ This guide will help you get up and running quickly with a basic agent using the Before you begin, ensure you have the following: - [.NET 8.0 SDK or later](https://dotnet.microsoft.com/download) -- [Azure OpenAI resource](/azure/ai-foundry/openai/how-to/create-resource) with a deployed model (e.g., `gpt-4o-mini`) +- [Azure OpenAI resource](/azure/ai-foundry/openai/how-to/create-resource) with a deployed model (for example, `gpt-4o-mini`) - [Azure CLI installed](/cli/azure/install-azure-cli) and [authenticated](/cli/azure/authenticate-azure-cli) (`az login`) - [User has the `Cognitive Services OpenAI User` or `Cognitive Services OpenAI Contributor` roles for the Azure OpenAI resource.](/azure/ai-foundry/openai/how-to/role-based-access-control) -**Note**: The Microsoft Agent Framework is supported with all actively supported versions of .Net. For the purposes of this sample we are recommending the .NET 8.0 SDK or higher. +> [!NOTE] +> Microsoft Agent Framework is supported with all actively supported versions of .NET. For the purposes of this sample, we recommend the .NET 8 SDK or a later version. -**Note**: This demo uses Azure CLI credentials for authentication. Make sure you're logged in with `az login` and have access to the Azure OpenAI resource. For more information, see the [Azure CLI documentation](/cli/azure/authenticate-azure-cli-interactively). It is also possible to replace the `AzureCliCredential` with an `ApiKeyCredential` if you +> [!NOTE] +> This demo uses Azure CLI credentials for authentication. Make sure you're logged in with `az login` and have access to the Azure OpenAI resource. For more information, see the [Azure CLI documentation](/cli/azure/authenticate-azure-cli-interactively). It is also possible to replace the `AzureCliCredential` with an `ApiKeyCredential` if you have an api key and do not wish to use role based authentication, in which case `az login` is not required. -## Installing Packages +## Install Packages -Packages will be published to [NuGet Gallery | MicrosoftAgentFramework](https://www.nuget.org/profiles/MicrosoftAgentFramework). +Packages will be published to [NuGet Gallery | MicrosoftAgentFramework](https://www.nuget.org/profiles/MicrosoftAgentFramework). First, add the following Microsoft Agent Framework NuGet packages into your application, using the following commands: -```powershell +```dotnetcli dotnet add package Azure.AI.OpenAI dotnet add package Azure.Identity dotnet add package Microsoft.Agents.AI.OpenAI --prerelease @@ -66,15 +68,15 @@ AIAgent agent = new AzureOpenAIClient( Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); ``` -## (Optional) Installing Nightly Packages +## (Optional) Install Nightly Packages -If you need to get a package containing the latest enhancements or fixes nightly builds of the Agent Framework are available [here](https://github.com/orgs/microsoft/packages?repo_name=agent-framework). +If you need to get a package containing the latest enhancements or fixes, nightly builds of Agent Framework are available at . -To download nightly builds follow the following steps: +To download nightly builds, follow these steps: 1. You will need a GitHub account to complete these steps. 1. Create a GitHub Personal Access Token with the `read:packages` scope using these [instructions](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic). -1. If your account is part of the Microsoft organization then you must authorize the `Microsoft` organization as a single sign-on organization. +1. If your account is part of the Microsoft organization, then you must authorize the `Microsoft` organization as a single sign-on organization. 1. Click the "Configure SSO" next to the Personal Access Token you just created and then authorize `Microsoft`. 1. Use the following command to add the Microsoft GitHub Packages source to your NuGet configuration: @@ -91,7 +93,7 @@ To download nightly builds follow the following steps: - + @@ -101,7 +103,7 @@ To download nightly builds follow the following steps: - + @@ -111,14 +113,18 @@ To download nightly builds follow the following steps: ``` - * If you place this file in your project folder make sure to have Git (or whatever source control you use) ignore it. - * For more information on where to store this file go [here](/nuget/reference/nuget-config-file). + - If you place this file in your project folder, make sure to have Git (or whatever source control you use) ignore it. + - For more information on where to store this file, see [nuget.config reference](/nuget/reference/nuget-config-file). + 1. You can now add packages from the nightly build to your project. - * E.g. use this command `dotnet add package Microsoft.Agents.AI --prerelease` + + For example, use this command `dotnet add package Microsoft.Agents.AI --prerelease` + 1. And the latest package release can be referenced in the project like this: - * `` -For more information see: + `` + +For more information, see . ::: zone-end @@ -129,10 +135,11 @@ For more information see: [!NOTE] +> This demo uses Azure CLI credentials for authentication. Make sure you're logged in with `az login` and have access to the Azure AI project. For more information, see the [Azure CLI documentation](/cli/azure/authenticate-azure-cli-interactively). ## Running a Basic Agent Sample diff --git a/agent-framework/tutorials/workflows/agents-in-workflows.md b/agent-framework/tutorials/workflows/agents-in-workflows.md index 7eceaefa..73a536a2 100644 --- a/agent-framework/tutorials/workflows/agents-in-workflows.md +++ b/agent-framework/tutorials/workflows/agents-in-workflows.md @@ -1,6 +1,6 @@ --- title: Agents in Workflows -description: Learn how to integrate agents into workflows using the Agent Framework. +description: Learn how to integrate agents into workflows using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -11,7 +11,7 @@ ms.service: agent-framework # Agents in Workflows -This tutorial demonstrates how to integrate AI agents into workflows using the Agent Framework. You'll learn to create workflows that leverage the power of specialized AI agents for content creation, review, and other collaborative tasks. +This tutorial demonstrates how to integrate AI agents into workflows using Agent Framework. You'll learn to create workflows that leverage the power of specialized AI agents for content creation, review, and other collaborative tasks. ::: zone pivot="programming-language-csharp" @@ -217,7 +217,7 @@ async def create_azure_ai_agent() -> tuple[Callable[..., Awaitable[Any]], Callab """ stack = AsyncExitStack() cred = await stack.enter_async_context(AzureCliCredential()) - + client = await stack.enter_async_context(AzureAIAgentClient(async_credential=cred)) async def agent(**kwargs: Any) -> Any: @@ -244,7 +244,7 @@ async def main() -> None: "You are an excellent content writer. You create new content and edit contents based on the feedback." ), ) - + # Create a Reviewer agent that provides feedback reviewer = await agent( name="Reviewer", @@ -317,7 +317,7 @@ if __name__ == "__main__": ## Complete Implementation -For the complete working implementation of this Azure AI agents workflow, see the [azure_ai_agents_streaming.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflow/agents/azure_ai_agents_streaming.py) sample in the Agent Framework repository. +For the complete working implementation of this Azure AI agents workflow, see the [azure_ai_agents_streaming.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py) sample in the Agent Framework repository. ::: zone-end diff --git a/agent-framework/tutorials/workflows/checkpointing-and-resuming.md b/agent-framework/tutorials/workflows/checkpointing-and-resuming.md index 98a26b85..bdcf0b23 100644 --- a/agent-framework/tutorials/workflows/checkpointing-and-resuming.md +++ b/agent-framework/tutorials/workflows/checkpointing-and-resuming.md @@ -1,6 +1,6 @@ --- title: Checkpointing and Resuming Workflows -description: Learn how to implement checkpointing and resuming in workflows using the Agent Framework. +description: Learn how to implement checkpointing and resuming in workflows using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -57,7 +57,7 @@ Executors can persist local state that survives checkpoints using the `Reflectin internal sealed class GuessNumberExecutor : ReflectingExecutor, IMessageHandler { private static readonly StateKey StateKey = new("GuessNumberExecutor.State"); - + public int LowerBound { get; private set; } public int UpperBound { get; private set; } @@ -106,7 +106,7 @@ await foreach (WorkflowEvent evt in checkpointedRun.Run.WatchStreamAsync()) Console.WriteLine($"Checkpoint created at step {checkpoints.Count}."); } break; - + case WorkflowOutputEvent workflowOutputEvt: Console.WriteLine($"Workflow completed with result: {workflowOutputEvt.Data}"); break; @@ -168,7 +168,7 @@ await foreach (WorkflowEvent evt in resumedRun.Run.WatchStreamAsync()) case ExecutorCompletedEvent executorCompletedEvt: Console.WriteLine($"Executor {executorCompletedEvt.ExecutorId} completed."); break; - + case WorkflowOutputEvent workflowOutputEvt: Console.WriteLine($"Workflow completed with result: {workflowOutputEvt.Data}"); return; @@ -247,7 +247,7 @@ await foreach (WorkflowEvent evt in checkpointedRun.Run.WatchStreamAsync()) ExternalResponse response = HandleExternalRequest(requestInputEvt.Request); await checkpointedRun.Run.SendResponseAsync(response); break; - + case SuperStepCompletedEvent superStepCompletedEvt: // Save checkpoint after each interaction CheckpointInfo? checkpoint = superStepCompletedEvt.CompletionInfo!.Checkpoint; @@ -257,7 +257,7 @@ await foreach (WorkflowEvent evt in checkpointedRun.Run.WatchStreamAsync()) Console.WriteLine($"Checkpoint created after human interaction."); } break; - + case WorkflowOutputEvent workflowOutputEvt: Console.WriteLine($"Workflow completed: {workflowOutputEvt.Data}"); return; @@ -269,7 +269,7 @@ if (checkpoints.Count > 0) { var selectedCheckpoint = checkpoints[1]; // Select specific checkpoint await checkpointedRun.RestoreCheckpointAsync(selectedCheckpoint); - + // Continue from that point await foreach (WorkflowEvent evt in checkpointedRun.Run.WatchStreamAsync()) { @@ -311,7 +311,7 @@ public static class CheckpointingExample case ExecutorCompletedEvent executorEvt: Console.WriteLine($"Executor {executorEvt.ExecutorId} completed."); break; - + case SuperStepCompletedEvent superStepEvt: var checkpoint = superStepEvt.CompletionInfo!.Checkpoint; if (checkpoint is not null) @@ -320,7 +320,7 @@ public static class CheckpointingExample Console.WriteLine($"Checkpoint {checkpoints.Count} created."); } break; - + case WorkflowOutputEvent outputEvt: Console.WriteLine($"Workflow completed: {outputEvt.Data}"); goto FinishExecution; @@ -354,7 +354,7 @@ public static class CheckpointingExample { var newWorkflow = await WorkflowHelper.GetWorkflowAsync(); var rehydrationCheckpoint = checkpoints[3]; - + Console.WriteLine("Rehydrating from checkpoint 4 with new workflow instance..."); Checkpointed newRun = await InProcessExecution @@ -433,7 +433,7 @@ class UpperCaseExecutor(Executor): @handler async def to_upper_case(self, text: str, ctx: WorkflowContext[str]) -> None: result = text.upper() - + # Persist executor-local state for checkpoints prev = await ctx.get_state() or {} count = int(prev.get("count", 0)) + 1 @@ -442,7 +442,7 @@ class UpperCaseExecutor(Executor): "last_input": text, "last_output": result, }) - + # Send result to next executor await ctx.send_message(result) ``` @@ -458,7 +458,7 @@ class ProcessorExecutor(Executor): # Write to shared state for cross-executor visibility await ctx.set_shared_state("original_input", text) await ctx.set_shared_state("processed_output", text.upper()) - + await ctx.send_message(text.upper()) ``` @@ -489,7 +489,7 @@ from agent_framework import RequestInfoExecutor for checkpoint in checkpoints: # Get human-readable summary summary = RequestInfoExecutor.checkpoint_summary(checkpoint) - + print(f"Checkpoint: {summary.checkpoint_id}") print(f"Iteration: {summary.iteration_count}") print(f"Status: {summary.status}") @@ -511,7 +511,7 @@ async for event in workflow.run_stream_from_checkpoint( checkpoint_storage=checkpoint_storage ): print(f"Resumed Event: {event}") - + if isinstance(event, WorkflowOutputEvent): print(f"Final Result: {event.data}") break @@ -563,27 +563,27 @@ async def select_and_resume_checkpoint(workflow, storage): if not checkpoints: print("No checkpoints available") return - + # Sort and display options sorted_cps = sorted(checkpoints, key=lambda cp: cp.timestamp) print("Available checkpoints:") for i, cp in enumerate(sorted_cps): summary = RequestInfoExecutor.checkpoint_summary(cp) print(f"[{i}] {summary.checkpoint_id[:8]}... iter={summary.iteration_count}") - + # Get user selection try: idx = int(input("Enter checkpoint index: ")) selected = sorted_cps[idx] - + # Resume from selected checkpoint print(f"Resuming from checkpoint: {selected.checkpoint_id}") async for event in workflow.run_stream_from_checkpoint( - selected.checkpoint_id, + selected.checkpoint_id, checkpoint_storage=storage ): print(f"Event: {event}") - + except (ValueError, IndexError): print("Invalid selection") ``` @@ -596,7 +596,7 @@ Here's a typical checkpointing workflow pattern: import asyncio from pathlib import Path from agent_framework import ( - WorkflowBuilder, FileCheckpointStorage, + WorkflowBuilder, FileCheckpointStorage, WorkflowOutputEvent, RequestInfoExecutor ) @@ -605,7 +605,7 @@ async def main(): checkpoint_dir = Path("./checkpoints") checkpoint_dir.mkdir(exist_ok=True) storage = FileCheckpointStorage(checkpoint_dir) - + # Build workflow with checkpointing workflow = ( WorkflowBuilder() @@ -614,23 +614,23 @@ async def main(): .with_checkpointing(storage) .build() ) - + # Initial run print("Running workflow...") async for event in workflow.run_stream("input data"): print(f"Event: {event}") - + # List and inspect checkpoints checkpoints = await storage.list_checkpoints() for cp in sorted(checkpoints, key=lambda c: c.timestamp): summary = RequestInfoExecutor.checkpoint_summary(cp) print(f"Checkpoint: {summary.checkpoint_id[:8]}... iter={summary.iteration_count}") - + # Resume from a checkpoint if checkpoints: latest = max(checkpoints, key=lambda cp: cp.timestamp) print(f"Resuming from: {latest.checkpoint_id}") - + async for event in workflow.run_stream_from_checkpoint(latest.checkpoint_id): print(f"Resumed: {event}") diff --git a/agent-framework/tutorials/workflows/requests-and-responses.md b/agent-framework/tutorials/workflows/requests-and-responses.md index 76f7db7e..d0b008f5 100644 --- a/agent-framework/tutorials/workflows/requests-and-responses.md +++ b/agent-framework/tutorials/workflows/requests-and-responses.md @@ -1,6 +1,6 @@ --- title: Handle Requests and Responses in Workflows -description: Learn how to handle requests and responses in workflows using the Agent Framework. +description: Learn how to handle requests and responses in workflows using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -11,7 +11,7 @@ ms.service: agent-framework # Handle Requests and Responses in Workflows -This tutorial demonstrates how to handle requests and responses in workflows using the Agent Framework Workflows. You'll learn how to create interactive workflows that can pause execution to request input from external sources (like humans or other systems) and then resume once a response is provided. +This tutorial demonstrates how to handle requests and responses in workflows using Agent Framework Workflows. You'll learn how to create interactive workflows that can pause execution to request input from external sources (like humans or other systems) and then resume once a response is provided. ::: zone pivot="programming-language-csharp" @@ -251,7 +251,7 @@ You'll create an interactive number guessing game workflow that demonstrates req #### Request-Response Flow 1. Workflow sends a `RequestInfoMessage` to `RequestInfoExecutor` -2. `RequestInfoExecutor` emits a `RequestInfoEvent` +2. `RequestInfoExecutor` emits a `RequestInfoEvent` 3. External system (human, API, etc.) processes the request 4. Response is sent back via `send_responses_streaming()` 5. Workflow resumes with the response data @@ -320,7 +320,7 @@ The turn manager coordinates the flow between the AI agent and human: ```python class TurnManager(Executor): """Coordinates turns between the AI agent and human player. - + Responsibilities: - Start the game by requesting the agent's first guess - Process agent responses and request human feedback @@ -429,11 +429,11 @@ async def run_interactive_workflow(workflow): # First iteration uses run_stream("start") # Subsequent iterations use send_responses_streaming with pending responses stream = ( - workflow.send_responses_streaming(pending_responses) - if pending_responses + workflow.send_responses_streaming(pending_responses) + if pending_responses else workflow.run_stream("start") ) - + # Collect events for this turn events = [event async for event in stream] pending_responses = None @@ -470,7 +470,7 @@ async def run_interactive_workflow(workflow): for req_id, prompt in requests: print(f"\n🤖 {prompt}") answer = input("👤 Enter higher/lower/correct/exit: ").lower() - + if answer == "exit": print("👋 Exiting...") return @@ -483,7 +483,7 @@ async def run_interactive_workflow(workflow): ### Running the Example -For the complete working implementation, see the [Human-in-the-Loop Guessing Game sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflow/human-in-the-loop/guessing_game_with_human_input.py). +For the complete working implementation, see the [Human-in-the-Loop Guessing Game sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py). ### How It Works diff --git a/agent-framework/tutorials/workflows/simple-concurrent-workflow.md b/agent-framework/tutorials/workflows/simple-concurrent-workflow.md index 4cee170d..bfc73228 100644 --- a/agent-framework/tutorials/workflows/simple-concurrent-workflow.md +++ b/agent-framework/tutorials/workflows/simple-concurrent-workflow.md @@ -1,6 +1,6 @@ --- title: Create a Simple Concurrent Workflow -description: Learn how to create a simple concurrent workflow using the Agent Framework. +description: Learn how to create a simple concurrent workflow using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -11,7 +11,7 @@ ms.service: agent-framework # Create a Simple Concurrent Workflow -This tutorial demonstrates how to create a concurrent workflow using the Agent Framework. You'll learn to implement fan-out and fan-in patterns that enable parallel processing, allowing multiple executors or agents to work simultaneously and then aggregate their results. +This tutorial demonstrates how to create a concurrent workflow using Agent Framework. You'll learn to implement fan-out and fan-in patterns that enable parallel processing, allowing multiple executors or agents to work simultaneously and then aggregate their results. ::: zone pivot="programming-language-csharp" @@ -19,7 +19,7 @@ This tutorial demonstrates how to create a concurrent workflow using the Agent F You'll create a workflow that: -- Takes a question as input (e.g., "What is temperature?") +- Takes a question as input (for example, "What is temperature?") - Sends the same question to two expert AI agents simultaneously (Physicist and Chemist) - Collects and combines responses from both agents into a single output - Demonstrates concurrent execution with AI agents using fan-out/fan-in patterns @@ -51,7 +51,7 @@ public static class Program private static async Task Main() { // Set up the Azure OpenAI client - var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) @@ -69,7 +69,7 @@ Create two specialized AI agents that will provide expert perspectives: name: "Physicist", instructions: "You are an expert in physics. You answer questions from a physics perspective." ); - + ChatClientAgent chemist = new( chatClient, name: "Chemist", @@ -138,7 +138,7 @@ internal sealed class ConcurrentAggregationExecutor() : if (this._messages.Count == 2) { - var formattedMessages = string.Join(Environment.NewLine, + var formattedMessages = string.Join(Environment.NewLine, this._messages.Select(m => $"{m.AuthorName}: {m.Text}")); await context.YieldOutputAsync(formattedMessages); } @@ -179,18 +179,18 @@ Run the workflow and capture the streaming output: ## How It Works -1. **Fan-Out**: The `ConcurrentStartExecutor` receives the input question and the fan-out edge sends it to both the Physicist and Chemist agents simultaneously -2. **Parallel Processing**: Both AI agents process the same question concurrently, each providing their expert perspective -3. **Fan-In**: The `ConcurrentAggregationExecutor` collects `ChatMessage` responses from both agents -4. **Aggregation**: Once both responses are received, the aggregator combines them into a formatted output +1. **Fan-Out**: The `ConcurrentStartExecutor` receives the input question and the fan-out edge sends it to both the Physicist and Chemist agents simultaneously. +2. **Parallel Processing**: Both AI agents process the same question concurrently, each providing their expert perspective. +3. **Fan-In**: The `ConcurrentAggregationExecutor` collects `ChatMessage` responses from both agents. +4. **Aggregation**: Once both responses are received, the aggregator combines them into a formatted output. ## Key Concepts -- **Fan-Out Edges**: Use `AddFanOutEdge()` to distribute the same input to multiple executors or agents -- **Fan-In Edges**: Use `AddFanInEdge()` to collect results from multiple source executors -- **AI Agent Integration**: AI agents can be used directly as executors in workflows -- **ReflectingExecutor**: Base class for creating custom executors with automatic message handling -- **Streaming Execution**: Use `StreamAsync()` to get real-time updates as the workflow progresses +- **Fan-Out Edges**: Use `AddFanOutEdge()` to distribute the same input to multiple executors or agents. +- **Fan-In Edges**: Use `AddFanInEdge()` to collect results from multiple source executors. +- **AI Agent Integration**: AI agents can be used directly as executors in workflows. +- **ReflectingExecutor**: Base class for creating custom executors with automatic message handling. +- **Streaming Execution**: Use `StreamAsync()` to get real-time updates as the workflow progresses. ## Complete Implementation @@ -218,7 +218,7 @@ You'll create a workflow that: ## Step 1: Import Required Dependencies -Start by importing the necessary components from the Agent Framework: +Start by importing the necessary components from Agent Framework: ```python import asyncio @@ -352,7 +352,7 @@ if __name__ == "__main__": ## Complete Implementation -For the complete working implementation of this concurrent workflow, see the [aggregate_results_of_different_types.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflow/parallelism/aggregate_results_of_different_types.py) sample in the Agent Framework repository. +For the complete working implementation of this concurrent workflow, see the [aggregate_results_of_different_types.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py) sample in the Agent Framework repository. ::: zone-end diff --git a/agent-framework/tutorials/workflows/simple-sequential-workflow.md b/agent-framework/tutorials/workflows/simple-sequential-workflow.md index fa967b5b..36f42b5a 100644 --- a/agent-framework/tutorials/workflows/simple-sequential-workflow.md +++ b/agent-framework/tutorials/workflows/simple-sequential-workflow.md @@ -1,6 +1,6 @@ --- title: Create a Simple Sequential Workflow -description: Learn how to create a simple sequential workflow using the Agent Framework. +description: Learn how to create a simple sequential workflow using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -11,7 +11,7 @@ ms.service: agent-framework # Create a Simple Sequential Workflow -This tutorial demonstrates how to create a simple sequential workflow using the Agent Framework Workflows. +This tutorial demonstrates how to create a simple sequential workflow using Agent Framework Workflows. Sequential workflows are the foundation of building complex AI agent systems. This tutorial shows how to create a simple two-step workflow where each step processes data and passes it to the next step. @@ -39,7 +39,7 @@ The workflow demonstrates core concepts like: ## Step-by-Step Implementation -Let's build the sequential workflow step by step. +The following sections show how to build the sequential workflow step by step. ### Step 1: Add Required Using Statements @@ -60,7 +60,7 @@ Create an executor that converts text to uppercase: /// /// First executor: converts input text to uppercase. /// -internal sealed class UppercaseExecutor() : ReflectingExecutor("UppercaseExecutor"), +internal sealed class UppercaseExecutor() : ReflectingExecutor("UppercaseExecutor"), IMessageHandler { public ValueTask HandleAsync(string input, IWorkflowContext context) @@ -86,7 +86,7 @@ Create an executor that reverses the text: /// /// Second executor: reverses the input text and completes the workflow. /// -internal sealed class ReverseTextExecutor() : ReflectingExecutor("ReverseTextExecutor"), +internal sealed class ReverseTextExecutor() : ReflectingExecutor("ReverseTextExecutor"), IMessageHandler { public ValueTask HandleAsync(string input, IWorkflowContext context) @@ -99,9 +99,9 @@ internal sealed class ReverseTextExecutor() : ReflectingExecutor None: """Transform the input to uppercase and forward it to the next step.""" result = text.upper() - + # Send the intermediate result to the next executor await ctx.send_message(result) ``` @@ -267,7 +267,7 @@ Create an executor that reverses the text and yields the final output: async def reverse_text(text: str, ctx: WorkflowContext[Never, str]) -> None: """Reverse the input and yield the workflow output.""" result = text[::-1] - + # Yield the final output for this workflow run await ctx.yield_output(result) ``` @@ -361,7 +361,7 @@ The workflow will process the input "hello world" through both executors and dis ## Complete Example -For the complete, ready-to-run implementation, see the [sequential_streaming.py sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflow/control-flow/sequential_streaming.py) in the Agent Framework repository. +For the complete, ready-to-run implementation, see the [sequential_streaming.py sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/control-flow/sequential_streaming.py) in the Agent Framework repository. This sample includes: diff --git a/agent-framework/tutorials/workflows/visualization.md b/agent-framework/tutorials/workflows/visualization.md index e2a41ba7..6e2866c8 100644 --- a/agent-framework/tutorials/workflows/visualization.md +++ b/agent-framework/tutorials/workflows/visualization.md @@ -1,6 +1,6 @@ --- title: Workflow Visualization -description: Learn how to visualize workflows using the Agent Framework. +description: Learn how to visualize workflows using Agent Framework. author: TaoChenOSU ms.topic: tutorial ms.author: taochen @@ -111,19 +111,19 @@ try: # Export as SVG (vector format, recommended) svg_file = viz.export(format="svg") print(f"SVG exported to: {svg_file}") - + # Export as PNG (raster format) png_file = viz.export(format="png") print(f"PNG exported to: {png_file}") - + # Export as PDF (vector format) pdf_file = viz.export(format="pdf") print(f"PDF exported to: {pdf_file}") - + # Export raw DOT file dot_file = viz.export(format="dot") print(f"DOT file exported to: {dot_file}") - + except ImportError: print("Install 'viz' extra and GraphViz for image export:") print("pip install agent-framework[viz]") diff --git a/agent-framework/tutorials/workflows/workflow-with-branching-logic.md b/agent-framework/tutorials/workflows/workflow-with-branching-logic.md index 1fe3b7a0..b37815f4 100644 --- a/agent-framework/tutorials/workflows/workflow-with-branching-logic.md +++ b/agent-framework/tutorials/workflows/workflow-with-branching-logic.md @@ -1,6 +1,6 @@ --- title: Create a Workflow with Branching Logic -description: Learn how to create a workflow with branching logic using the Agent Framework. +description: Learn how to create a workflow with branching logic using Agent Framework. zone_pivot_groups: programming-languages author: TaoChenOSU ms.topic: tutorial @@ -11,7 +11,7 @@ ms.service: agent-framework # Create a Workflow with Branching Logic -In this tutorial, you will learn how to create a workflow with branching logic using the Agent Framework. Branching logic allows your workflow to make decisions based on certain conditions, enabling more complex and dynamic behavior. +In this tutorial, you will learn how to create a workflow with branching logic using Agent Framework. Branching logic allows your workflow to make decisions based on certain conditions, enabling more complex and dynamic behavior. ## Conditional Edges @@ -23,24 +23,24 @@ Conditional edges allow your workflow to make routing decisions based on the con You'll create an email processing workflow that demonstrates conditional routing: -- A spam detection agent that analyzes incoming emails and returns structured JSON -- Conditional edges that route emails to different handlers based on classification -- A legitimate email handler that drafts professional responses -- A spam handler that marks suspicious emails -- Shared state management to persist email data between workflow steps +- A spam detection agent that analyzes incoming emails and returns structured JSON. +- Conditional edges that route emails to different handlers based on classification. +- A legitimate email handler that drafts professional responses. +- A spam handler that marks suspicious emails. +- Shared state management to persist email data between workflow steps. ### Prerequisites -- .NET 9.0 or later -- Azure OpenAI deployment with structured output support -- Azure CLI authentication configured (`az login`) -- Basic understanding of C# and async programming +- .NET 9.0 or later. +- Azure OpenAI deployment with structured output support. +- Azure CLI authentication configured (`az login`). +- Basic understanding of C# and async programming. ### Setting Up the Environment First, install the required packages for your .NET project: -```bash +```dotnetcli dotnet add package Microsoft.Agents.AI.Workflows --prerelease dotnet add package Microsoft.Agents.AI.Workflows.Reflection --prerelease dotnet add package Azure.AI.OpenAI @@ -269,7 +269,7 @@ public static class Program private static async Task Main() { // Set up the Azure OpenAI client - var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) @@ -299,7 +299,7 @@ public static class Program string emailContent = "Congratulations! You've won $1,000,000! Click here to claim your prize now!"; StreamingRun run = await InProcessExecution.StreamAsync(workflow, new ChatMessage(ChatRole.User, emailContent)); await run.TrySendMessageAsync(new TurnToken(emitEvents: true)); - + await foreach (WorkflowEvent evt in run.WatchStreamAsync().ConfigureAwait(false)) { if (evt is WorkflowOutputEvent outputEvent) @@ -491,10 +491,10 @@ async def to_email_assistant_request( """Transform spam detection response into a request for the email assistant.""" # Parse the detection result and extract the email content for the assistant detection = DetectionResult.model_validate_json(response.agent_run_response.text) - + # Create a new request for the email assistant with the original email content request = AgentExecutorRequest( - messages=[ChatMessage(Role.USER, text=detection.email_content)], + messages=[ChatMessage(Role.USER, text=detection.email_content)], should_respond=True ) await ctx.send_message(request) @@ -529,7 +529,7 @@ async def main() -> None: chat_client.create_agent( instructions=( "You are an email assistant that helps users draft professional responses to emails. " - "Your input may be a JSON object that includes 'email_content'; base your reply on that content. " + "Your input might be a JSON object that includes 'email_content'; base your reply on that content. " "Return JSON with a single field 'response' containing the drafted reply." ), response_format=EmailResponse, @@ -605,7 +605,7 @@ if __name__ == "__main__": ### Complete Implementation -For the complete working implementation, see the [edge_condition.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflow/control-flow/edge_condition.py) sample in the Agent Framework repository. +For the complete working implementation, see the [edge_condition.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/control-flow/edge_condition.py) sample in the Agent Framework repository. ::: zone-end @@ -621,7 +621,7 @@ The previous conditional edges example demonstrated two-way routing (spam vs. le You'll extend the email processing workflow to handle three decision paths: -- **NotSpam** → Email Assistant → Send Email +- **NotSpam** → Email Assistant → Send Email - **Spam** → Handle Spam Executor - **Uncertain** → Handle Uncertain Executor (default case) @@ -699,7 +699,7 @@ Create a reusable condition factory that generates predicates for each spam deci /// /// The expected spam detection decision /// A function that evaluates whether a message meets the expected result -private static Func GetCondition(SpamDecision expectedDecision) => +private static Func GetCondition(SpamDecision expectedDecision) => detectionResult => detectionResult is DetectionResult result && result.spamDecision == expectedDecision; ``` @@ -959,7 +959,7 @@ The switch-case pattern scales much better as the number of routing decisions gr When you run this workflow with ambiguous email content: ```text -Email marked as uncertain: This email contains promotional language but may be from a legitimate business contact, requiring human review for proper classification. +Email marked as uncertain: This email contains promotional language but might be from a legitimate business contact, requiring human review for proper classification. ``` Try changing the email content to something clearly spam or clearly legitimate to see the different routing paths in action. @@ -980,7 +980,7 @@ The previous conditional edges example demonstrated two-way routing (spam vs. le You'll extend the email processing workflow to handle three decision paths: -- **NotSpam** → Email Assistant → Send Email +- **NotSpam** → Email Assistant → Send Email - **Spam** → Mark as Spam - **Uncertain** → Flag for Manual Review (default case) @@ -995,20 +995,20 @@ from typing import Literal class DetectionResultAgent(BaseModel): """Structured output returned by the spam detection agent.""" - + # The agent classifies the email into one of three categories spam_decision: Literal["NotSpam", "Spam", "Uncertain"] reason: str class EmailResponse(BaseModel): """Structured output returned by the email assistant agent.""" - + response: str @dataclass class DetectionResult: """Internal typed payload used for routing and downstream handling.""" - + spam_decision: str reason: str email_id: str @@ -1016,7 +1016,7 @@ class DetectionResult: @dataclass class Email: """In memory record of the email content stored in shared state.""" - + email_id: str email_content: str ``` @@ -1028,11 +1028,11 @@ Create a reusable condition factory that generates predicates for each spam deci ```python def get_case(expected_decision: str): """Factory that returns a predicate matching a specific spam_decision value.""" - + def condition(message: Any) -> bool: # Only match when the upstream payload is a DetectionResult with the expected decision return isinstance(message, DetectionResult) and message.spam_decision == expected_decision - + return condition ``` @@ -1053,12 +1053,12 @@ CURRENT_EMAIL_ID_KEY = "current_email_id" @executor(id="store_email") async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: """Store email content once and pass around a lightweight ID reference.""" - + # Persist the raw email content in shared state new_email = Email(email_id=str(uuid4()), email_content=email_text) await ctx.set_shared_state(f"{EMAIL_STATE_PREFIX}{new_email.email_id}", new_email) await ctx.set_shared_state(CURRENT_EMAIL_ID_KEY, new_email.email_id) - + # Forward email to spam detection agent await ctx.send_message( AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=new_email.email_content)], should_respond=True) @@ -1067,26 +1067,26 @@ async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest @executor(id="to_detection_result") async def to_detection_result(response: AgentExecutorResponse, ctx: WorkflowContext[DetectionResult]) -> None: """Transform agent response into a typed DetectionResult with email ID.""" - + # Parse the agent's structured JSON output parsed = DetectionResultAgent.model_validate_json(response.agent_run_response.text) email_id: str = await ctx.get_shared_state(CURRENT_EMAIL_ID_KEY) - + # Create typed message for switch-case routing await ctx.send_message(DetectionResult( - spam_decision=parsed.spam_decision, - reason=parsed.reason, + spam_decision=parsed.spam_decision, + reason=parsed.reason, email_id=email_id )) @executor(id="submit_to_email_assistant") async def submit_to_email_assistant(detection: DetectionResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: """Handle NotSpam emails by forwarding to the email assistant.""" - + # Guard against misrouting if detection.spam_decision != "NotSpam": raise RuntimeError("This executor should only handle NotSpam messages.") - + # Retrieve original email content from shared state email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{detection.email_id}") await ctx.send_message( @@ -1096,14 +1096,14 @@ async def submit_to_email_assistant(detection: DetectionResult, ctx: WorkflowCon @executor(id="finalize_and_send") async def finalize_and_send(response: AgentExecutorResponse, ctx: WorkflowContext[Never, str]) -> None: """Parse email assistant response and yield final output.""" - + parsed = EmailResponse.model_validate_json(response.agent_run_response.text) await ctx.yield_output(f"Email sent: {parsed.response}") @executor(id="handle_spam") async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, str]) -> None: """Handle confirmed spam emails.""" - + if detection.spam_decision == "Spam": await ctx.yield_output(f"Email marked as spam: {detection.reason}") else: @@ -1112,7 +1112,7 @@ async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, st @executor(id="handle_uncertain") async def handle_uncertain(detection: DetectionResult, ctx: WorkflowContext[Never, str]) -> None: """Handle uncertain classifications that need manual review.""" - + if detection.spam_decision == "Uncertain": # Include original content for human review email: Email | None = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{detection.email_id}") @@ -1130,7 +1130,7 @@ Update the spam detection agent to be less confident and return three-way classi ```python async def main(): chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - + # Enhanced spam detection agent with three-way classification spam_detection_agent = AgentExecutor( chat_client.create_agent( @@ -1144,7 +1144,7 @@ async def main(): ), id="spam_detection_agent", ) - + # Email assistant remains the same email_assistant_agent = AgentExecutor( chat_client.create_agent( @@ -1194,7 +1194,7 @@ Run the workflow with ambiguous email content that demonstrates the three-way ro "Hey there, I noticed you might be interested in our latest offer—no pressure, but it expires soon. " "Let me know if you'd like more details." ) - + # Execute and display results events = await workflow.run(email) outputs = events.get_outputs() @@ -1217,7 +1217,7 @@ Run the workflow with ambiguous email content that demonstrates the three-way ro ```python .add_edge(detector, handler_a, condition=lambda x: x.result == "A") -.add_edge(detector, handler_b, condition=lambda x: x.result == "B") +.add_edge(detector, handler_b, condition=lambda x: x.result == "B") .add_edge(detector, handler_c, condition=lambda x: x.result == "C") ``` @@ -1459,7 +1459,7 @@ internal sealed class EmailSummaryExecutor : ReflectingExecutor(response.Text); - + // Enrich the analysis result with the summary message.EmailSummary = emailSummary!.Summary; @@ -1619,14 +1619,14 @@ public static class Program ) // Email assistant branch .AddEdge(emailAssistantExecutor, sendEmailExecutor) - + // Database persistence: conditional routing .AddEdge( emailAnalysisExecutor, databaseAccessExecutor, condition: analysisResult => analysisResult?.EmailLength <= LongEmailThreshold) // Short emails .AddEdge(emailSummaryExecutor, databaseAccessExecutor) // Long emails with summary - + .WithOutputFrom(handleUncertainExecutor, handleSpamExecutor, sendEmailExecutor); var workflow = builder.Build(); @@ -1739,24 +1739,24 @@ Extend the data models to support email length analysis and summarization: ```python class AnalysisResultAgent(BaseModel): """Enhanced structured output from email analysis agent.""" - + spam_decision: Literal["NotSpam", "Spam", "Uncertain"] reason: str class EmailResponse(BaseModel): """Response from email assistant.""" - + response: str class EmailSummaryModel(BaseModel): """Summary generated by email summary agent.""" - + summary: str @dataclass class AnalysisResult: """Internal analysis result with email metadata for routing decisions.""" - + spam_decision: str reason: str email_length: int # Used for conditional routing @@ -1766,7 +1766,7 @@ class AnalysisResult: @dataclass class Email: """Email content stored in shared state.""" - + email_id: str email_content: str @@ -1785,24 +1785,24 @@ LONG_EMAIL_THRESHOLD = 100 def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str]: """Intelligent routing based on spam decision and email characteristics.""" - + # Target order: [handle_spam, submit_to_email_assistant, summarize_email, handle_uncertain] handle_spam_id, submit_to_email_assistant_id, summarize_email_id, handle_uncertain_id = target_ids - + if analysis.spam_decision == "Spam": # Route only to spam handler return [handle_spam_id] - + elif analysis.spam_decision == "NotSpam": # Always route to email assistant targets = [submit_to_email_assistant_id] - + # Conditionally add summarizer for long emails if analysis.email_length > LONG_EMAIL_THRESHOLD: targets.append(summarize_email_id) - + return targets - + else: # Uncertain # Route only to uncertain handler return [handle_uncertain_id] @@ -1826,11 +1826,11 @@ CURRENT_EMAIL_ID_KEY = "current_email_id" @executor(id="store_email") async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: """Store email and initiate analysis.""" - + new_email = Email(email_id=str(uuid4()), email_content=email_text) await ctx.set_shared_state(f"{EMAIL_STATE_PREFIX}{new_email.email_id}", new_email) await ctx.set_shared_state(CURRENT_EMAIL_ID_KEY, new_email.email_id) - + await ctx.send_message( AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=new_email.email_content)], should_respond=True) ) @@ -1838,11 +1838,11 @@ async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest @executor(id="to_analysis_result") async def to_analysis_result(response: AgentExecutorResponse, ctx: WorkflowContext[AnalysisResult]) -> None: """Transform agent response into enriched analysis result.""" - + parsed = AnalysisResultAgent.model_validate_json(response.agent_run_response.text) email_id: str = await ctx.get_shared_state(CURRENT_EMAIL_ID_KEY) email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{email_id}") - + # Create enriched analysis result with email length for routing decisions await ctx.send_message( AnalysisResult( @@ -1857,10 +1857,10 @@ async def to_analysis_result(response: AgentExecutorResponse, ctx: WorkflowConte @executor(id="submit_to_email_assistant") async def submit_to_email_assistant(analysis: AnalysisResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: """Handle legitimate emails by forwarding to email assistant.""" - + if analysis.spam_decision != "NotSpam": raise RuntimeError("This executor should only handle NotSpam messages.") - + email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{analysis.email_id}") await ctx.send_message( AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=email.email_content)], should_respond=True) @@ -1869,14 +1869,14 @@ async def submit_to_email_assistant(analysis: AnalysisResult, ctx: WorkflowConte @executor(id="finalize_and_send") async def finalize_and_send(response: AgentExecutorResponse, ctx: WorkflowContext[Never, str]) -> None: """Final step for email assistant branch.""" - + parsed = EmailResponse.model_validate_json(response.agent_run_response.text) await ctx.yield_output(f"Email sent: {parsed.response}") @executor(id="summarize_email") async def summarize_email(analysis: AnalysisResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: """Generate summary for long emails (parallel branch).""" - + # Only called for long NotSpam emails by selection function email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{analysis.email_id}") await ctx.send_message( @@ -1886,11 +1886,11 @@ async def summarize_email(analysis: AnalysisResult, ctx: WorkflowContext[AgentEx @executor(id="merge_summary") async def merge_summary(response: AgentExecutorResponse, ctx: WorkflowContext[AnalysisResult]) -> None: """Merge summary back into analysis result for database persistence.""" - + summary = EmailSummaryModel.model_validate_json(response.agent_run_response.text) email_id: str = await ctx.get_shared_state(CURRENT_EMAIL_ID_KEY) email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{email_id}") - + # Create analysis result with summary for database storage await ctx.send_message( AnalysisResult( @@ -1905,7 +1905,7 @@ async def merge_summary(response: AgentExecutorResponse, ctx: WorkflowContext[An @executor(id="handle_spam") async def handle_spam(analysis: AnalysisResult, ctx: WorkflowContext[Never, str]) -> None: """Handle spam emails (single target like switch-case).""" - + if analysis.spam_decision == "Spam": await ctx.yield_output(f"Email marked as spam: {analysis.reason}") else: @@ -1914,7 +1914,7 @@ async def handle_spam(analysis: AnalysisResult, ctx: WorkflowContext[Never, str] @executor(id="handle_uncertain") async def handle_uncertain(analysis: AnalysisResult, ctx: WorkflowContext[Never, str]) -> None: """Handle uncertain emails (single target like switch-case).""" - + if analysis.spam_decision == "Uncertain": email: Email | None = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{analysis.email_id}") await ctx.yield_output( @@ -1926,7 +1926,7 @@ async def handle_uncertain(analysis: AnalysisResult, ctx: WorkflowContext[Never, @executor(id="database_access") async def database_access(analysis: AnalysisResult, ctx: WorkflowContext[Never, str]) -> None: """Simulate database persistence with custom events.""" - + await asyncio.sleep(0.05) # Simulate DB operation await ctx.add_event(DatabaseEvent(f"Email {analysis.email_id} saved to database.")) ``` @@ -1938,7 +1938,7 @@ Create agents for analysis, assistance, and summarization: ```python async def main() -> None: chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - + # Enhanced analysis agent email_analysis_agent = AgentExecutor( chat_client.create_agent( @@ -1951,7 +1951,7 @@ async def main() -> None: ), id="email_analysis_agent", ) - + # Email assistant (same as before) email_assistant_agent = AgentExecutor( chat_client.create_agent( @@ -1962,7 +1962,7 @@ async def main() -> None: ), id="email_assistant_agent", ) - + # New: Email summary agent for long emails email_summary_agent = AgentExecutor( chat_client.create_agent( @@ -1983,27 +1983,27 @@ Construct the workflow with sophisticated routing and parallel processing: .set_start_executor(store_email) .add_edge(store_email, email_analysis_agent) .add_edge(email_analysis_agent, to_analysis_result) - + # Multi-selection edge group: intelligent fan-out based on content .add_multi_selection_edge_group( to_analysis_result, [handle_spam, submit_to_email_assistant, summarize_email, handle_uncertain], selection_func=select_targets, ) - + # Email assistant branch (always for NotSpam) .add_edge(submit_to_email_assistant, email_assistant_agent) .add_edge(email_assistant_agent, finalize_and_send) - + # Summary branch (only for long NotSpam emails) .add_edge(summarize_email, email_summary_agent) .add_edge(email_summary_agent, merge_summary) - + # Database persistence: conditional routing - .add_edge(to_analysis_result, database_access, + .add_edge(to_analysis_result, database_access, condition=lambda r: r.email_length <= LONG_EMAIL_THRESHOLD) # Short emails .add_edge(merge_summary, database_access) # Long emails with summary - + .build() ) ``` @@ -2026,7 +2026,7 @@ Run the workflow and observe parallel execution through custom events: Best regards, Alex """ - + # Stream events to see parallel execution async for event in workflow.run_stream(email): if isinstance(event, DatabaseEvent): diff --git a/agent-framework/user-guide/agents/agent-memory.md b/agent-framework/user-guide/agents/agent-memory.md index 1365594c..23b0c3eb 100644 --- a/agent-framework/user-guide/agents/agent-memory.md +++ b/agent-framework/user-guide/agents/agent-memory.md @@ -48,6 +48,38 @@ IList? messages = thread.GetService>(); > [!NOTE] > Retrieving messages from the `AgentThread` object in this way will only work if in-memory storage is being used. +##### Chat History reduction with In-Memory storage + +The built-in `InMemoryChatMessageStore` that is used by default when the underlying service does not support in-service storage, +can be configured with a reducer to manage the size of the chat history. +This is useful to avoid exceeding the context size limits of the underlying service. + +The `InMemoryChatMessageStore` can take an optional `Microsoft.Extensions.AI.IChatReducer` implementation to reduce the size of the chat history. +It also allows you to configure the event during which the reducer is invoked, either after a message is added to the chat history +or before the chat history is returned for the next invocation. + +To configure the `InMemoryChatMessageStore` with a reducer, you can provide a factory to construct a new `InMemoryChatMessageStore` +for each new `AgentThread` and pass it a reducer of your choice. The `InMemoryChatMessageStore` can also be passed an optional trigger event +which can be set to either `InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded` or `InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval`. + +```csharp +AIAgent agent = new OpenAIClient("") + .GetChatClient(modelName) + .CreateAIAgent(new ChatClientAgentOptions + { + Name = JokerName, + Instructions = JokerInstructions, + ChatMessageStoreFactory = ctx => new InMemoryChatMessageStore( + new MessageCountingChatReducer(2), + ctx.SerializedState, + ctx.JsonSerializerOptions, + InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded) + }); +``` + +> [!NOTE] +> This feature is only supported when using the `InMemoryChatMessageStore`. When a service has in-service chat history storage, it is up to the service itself to manage the size of the chat history. Similarly, when using 3rd party storage (see below), it is up to the 3rd party storage solution to manage the chat history size. If you provide a `ChatMessageStoreFactory` for a message store but you use a service with built-in chat history storage, the factory will not be used. + #### Inference service chat history storage When using a service that requires in-service storage of chat history, the Agent Framework will storage the id of the remote chat history in the `AgentThread` object. diff --git a/agent-framework/user-guide/agents/agent-types/TOC.yml b/agent-framework/user-guide/agents/agent-types/TOC.yml index f4c0ede6..f451c6f5 100644 --- a/agent-framework/user-guide/agents/agent-types/TOC.yml +++ b/agent-framework/user-guide/agents/agent-types/TOC.yml @@ -2,6 +2,8 @@ href: index.md - name: Azure AI Foundry Agents href: azure-ai-foundry-agent.md +- name: Azure AI Foundry Models Agents + href: azure-ai-foundry-models.md - name: Azure OpenAI ChatCompletion Agents href: azure-openai-chat-completion-agent.md - name: Azure OpenAI Responses Agents diff --git a/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-agent.md b/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-agent.md index e2cb7954..571134dc 100644 --- a/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-agent.md +++ b/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-agent.md @@ -341,4 +341,4 @@ See the [Agent getting started tutorials](../../../tutorials/overview.md) for mo ## Next steps > [!div class="nextstepaction"] -> [OpenAI ChatCompletion Agents](./azure-openai-chat-completion-agent.md) +> [Azure AI Foundry Models based Agents](./azure-ai-foundry-models.md) diff --git a/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-models.md b/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-models.md new file mode 100644 index 00000000..8350a1ea --- /dev/null +++ b/agent-framework/user-guide/agents/agent-types/azure-ai-foundry-models.md @@ -0,0 +1,90 @@ +--- +title: Azure AI Foundry Models Agents +description: Learn how to use the Microsoft Agent Framework with Azure AI Foundry Models service. +zone_pivot_groups: programming-languages +author: westey-m +ms.topic: tutorial +ms.author: westey +ms.date: 10/07/2025 +ms.service: agent-framework +--- + +# Azure AI Foundry Models Agents + +[Azure AI Foundry supports deploying](/azure/ai-foundry/foundry-models/how-to/create-model-deployments?pivots=ai-foundry-portal) a wide range of models, including open source models. +Microsoft Agent Framework can create agents that use these models. + +> [!NOTE] +> The capabilities of these models may limit the functionality of the agents. For example, many open source models do not support function calling and therefore any agent based on such models will not be able to use function tools. + +::: zone pivot="programming-language-csharp" + +## Getting Started + +Foundry supports accessing models via an OpenAI Chat Completion compatible API, and therefore the OpenAI client libraries can be used to access Foundry models. + +Add the required NuGet packages to your project. + +```powershell +dotnet add package Microsoft.Agents.AI.OpenAI --prerelease +``` + +## Creating an OpenAI ChatCompletion Agent with Foundry Models + +As a first step you need to create a client to connect to the OpenAI service. + +Since the code is not using the default OpenAI service, the URI of the OpenAI compatible Foundry service, needs to be provided via `OpenAIClientOptions`. + +```csharp +using System; +using Microsoft.Agents.AI; +using OpenAI; + +var clientOptions = new OpenAIClientOptions() { Endpoint = new Uri("https://ai-foundry-.services.ai.azure.com/openai/v1/") }; +``` + +There are different options for constructing the `OpenAIClient` depending on the desired authentication method. Let's look at two common options. + +The first option uses an API key. + +```csharp +OpenAIClient client = new OpenAIClient(new ApiKeyCredential(""), clientOptions); +``` + +The second option uses token based authentication, and here it is using the Azure CLI credential to get a token. + +```csharp +OpenAIClient client = new OpenAIClient(new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), clientOptions); +``` + +A client for chat completions can then be created using the model deployment name. + +```csharp +var chatCompletionClient = client.GetChatClient("gpt-4o-mini"); +``` + +Finally, the agent can be created using the `CreateAIAgent` extension method on the `ChatCompletionClient`. + +```csharp +AIAgent agent = chatCompletionClient.CreateAIAgent( + instructions: "You are good at telling jokes.", + name: "Joker"); +``` + +## Using the Agent + +The agent is a standard `AIAgent` and supports all standard `AIAgent` operations. + +For more information on how to run and interact with agents, see the [Agent getting started tutorials](../../../tutorials/overview.md) + +::: zone-end +::: zone pivot="programming-language-python" + +More docs coming soon. + +::: zone-end + +## Next steps + +> [!div class="nextstepaction"] +> [Azure OpenAI ChatCompletion Agents](./azure-openai-chat-completion-agent.md) diff --git a/agent-framework/user-guide/agents/agent-types/index.md b/agent-framework/user-guide/agents/agent-types/index.md index 5cc61137..ef644c3a 100644 --- a/agent-framework/user-guide/agents/agent-types/index.md +++ b/agent-framework/user-guide/agents/agent-types/index.md @@ -48,6 +48,7 @@ See the documentation for each service, for more information: |Underlying Inference Service|Description|Service Chat History storage supported|Custom Chat History storage supported| |---|---|---|---| |[Azure AI Foundry Agent](./azure-ai-foundry-agent.md)|An agent that uses the Azure AI Foundry Agents Service as its backend.|Yes|No| +|[Azure AI Foundry Models](./azure-ai-foundry-models.md)|An agent that uses any of the models deployed in the Azure AI Foundry Service as its backend.|No|Yes| |[Azure OpenAI ChatCompletion](./azure-openai-chat-completion-agent.md)|An agent that uses the Azure OpenAI ChatCompletion service.|No|Yes| |[Azure OpenAI Responses](./azure-openai-responses-agent.md)|An agent that uses the Azure OpenAI Responses service.|Yes|Yes| |[OpenAI ChatCompletion](./openai-chat-completion-agent.md)|An agent that uses the OpenAI ChatCompletion service.|No|Yes|