Skip to content

Commit 8f737de

Browse files
authored
Add client close (#5871)
Fixes #4821 by adding a `close()` method to all clients. Additionally: * The m1 CLI is updated to close the client before exiting. * The playwrightcontroller is updated to suppress some other unrelated chatty warnings (e.g,, produced by markitdown when encountering conversions that require external utilities)
1 parent dd82883 commit 8f737de

File tree

12 files changed

+37
-7
lines changed

12 files changed

+37
-7
lines changed

python/packages/autogen-core/src/autogen_core/models/_model_client.py

+3
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,9 @@ def create_stream(
152152
cancellation_token: Optional[CancellationToken] = None,
153153
) -> AsyncGenerator[Union[str, CreateResult], None]: ...
154154

155+
@abstractmethod
156+
async def close(self) -> None: ...
157+
155158
@abstractmethod
156159
def actual_usage(self) -> RequestUsage: ...
157160

python/packages/autogen-core/tests/test_tool_agent.py

+3
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,9 @@ def create_stream(
126126
) -> AsyncGenerator[Union[str, CreateResult], None]:
127127
raise NotImplementedError()
128128

129+
async def close(self) -> None:
130+
pass
131+
129132
def actual_usage(self) -> RequestUsage:
130133
return RequestUsage(prompt_tokens=0, completion_tokens=0)
131134

python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/playwright_controller.py

+3
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,13 @@
33
import io
44
import os
55
import random
6+
import warnings
67
from typing import Any, Callable, Dict, Optional, Tuple, Union, cast
78

89
# TODO: Fix unfollowed import
910
try:
11+
# Suppress warnings from markitdown -- which is pretty chatty
12+
warnings.filterwarnings(action="ignore", module="markitdown")
1013
from markitdown import MarkItDown # type: ignore
1114
except ImportError:
1215
MarkItDown = None

python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/chat_completion_client_recorder.py

+3
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,9 @@ def create_stream(
166166
cancellation_token=cancellation_token,
167167
)
168168

169+
async def close(self) -> None:
170+
await self.base_client.close()
171+
169172
def actual_usage(self) -> RequestUsage:
170173
# Calls base_client.actual_usage() and returns the result.
171174
return self.base_client.actual_usage()

python/packages/autogen-ext/src/autogen_ext/models/anthropic/_anthropic_client.py

+3
Original file line numberDiff line numberDiff line change
@@ -775,6 +775,9 @@ async def create_stream(
775775

776776
yield result
777777

778+
async def close(self) -> None:
779+
await self._client.close()
780+
778781
def count_tokens(self, messages: Sequence[LLMMessage], *, tools: Sequence[Tool | ToolSchema] = []) -> int:
779782
"""
780783
Estimate the number of tokens used by messages and tools.

python/packages/autogen-ext/src/autogen_ext/models/azure/_azure_ai_client.py

+3
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,9 @@ async def create_stream(
490490

491491
yield result
492492

493+
async def close(self) -> None:
494+
await self._client.close()
495+
493496
def actual_usage(self) -> RequestUsage:
494497
return self._actual_usage
495498

python/packages/autogen-ext/src/autogen_ext/models/cache/_chat_completion_cache.py

+3
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,9 @@ async def _generator() -> AsyncGenerator[Union[str, CreateResult], None]:
206206

207207
return _generator()
208208

209+
async def close(self) -> None:
210+
await self.client.close()
211+
209212
def actual_usage(self) -> RequestUsage:
210213
return self.client.actual_usage()
211214

python/packages/autogen-ext/src/autogen_ext/models/ollama/_ollama_client.py

+3
Original file line numberDiff line numberDiff line change
@@ -772,6 +772,9 @@ async def create_stream(
772772

773773
yield result
774774

775+
async def close(self) -> None:
776+
pass # ollama has no close method?
777+
775778
def actual_usage(self) -> RequestUsage:
776779
return self._actual_usage
777780

python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py

+3
Original file line numberDiff line numberDiff line change
@@ -944,6 +944,9 @@ async def _create_stream_chunks_beta_client(
944944
except StopAsyncIteration:
945945
break
946946

947+
async def close(self) -> None:
948+
await self._client.close()
949+
947950
def actual_usage(self) -> RequestUsage:
948951
return self._actual_usage
949952

python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import logging
44
import warnings
55
from typing import Any, AsyncGenerator, Dict, List, Mapping, Optional, Sequence, Union
6-
from typing_extensions import Self
76

87
from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component
98
from autogen_core.models import (
@@ -18,6 +17,7 @@
1817
)
1918
from autogen_core.tools import Tool, ToolSchema
2019
from pydantic import BaseModel
20+
from typing_extensions import Self
2121

2222
logger = logging.getLogger(EVENT_LOGGER_NAME)
2323

@@ -229,6 +229,9 @@ async def create_stream(
229229

230230
self._current_index += 1
231231

232+
async def close(self) -> None:
233+
pass
234+
232235
def actual_usage(self) -> RequestUsage:
233236
return self._cur_usage
234237

python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py

+3
Original file line numberDiff line numberDiff line change
@@ -654,6 +654,9 @@ async def create_stream(
654654
thought=thought,
655655
)
656656

657+
async def close(self) -> None:
658+
pass # No explicit close method in SK client?
659+
657660
def actual_usage(self) -> RequestUsage:
658661
return RequestUsage(prompt_tokens=self._total_prompt_tokens, completion_tokens=self._total_completion_tokens)
659662

python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import asyncio
33
import os
44
import sys
5-
import warnings
65
from typing import Any, Dict, Optional
76

87
import yaml
@@ -13,9 +12,6 @@
1312
from autogen_ext.teams.magentic_one import MagenticOne
1413
from autogen_ext.ui import RichConsole
1514

16-
# Suppress warnings about the requests.Session() not being closed
17-
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
18-
1915
DEFAULT_CONFIG_FILE = "config.yaml"
2016
DEFAULT_CONFIG_CONTENTS = """# config.yaml
2117
#
@@ -109,10 +105,9 @@ def main() -> None:
109105
with open(args.config if isinstance(args.config, str) else args.config[0], "r") as f:
110106
config = yaml.safe_load(f)
111107

112-
client = ChatCompletionClient.load_component(config["client"])
113-
114108
# Run the task
115109
async def run_task(task: str, hil_mode: bool, use_rich_console: bool) -> None:
110+
client = ChatCompletionClient.load_component(config["client"])
116111
input_manager = UserInputManager(callback=cancellable_input)
117112

118113
async with DockerCommandLineCodeExecutor(work_dir=os.getcwd()) as code_executor:
@@ -128,6 +123,8 @@ async def run_task(task: str, hil_mode: bool, use_rich_console: bool) -> None:
128123
else:
129124
await Console(m1.run_stream(task=task), output_stats=False, user_input_manager=input_manager)
130125

126+
await client.close()
127+
131128
task = args.task if isinstance(args.task, str) else args.task[0]
132129
asyncio.run(run_task(task, not args.no_hil, args.rich))
133130

0 commit comments

Comments
 (0)