diff --git a/pyproject.toml b/pyproject.toml index c1f5e46..693af3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "letta-client" [tool.poetry] name = "letta-client" -version = "0.1.99" +version = "0.1.100" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 2ec0ddd..7539898 100644 --- a/reference.md +++ b/reference.md @@ -6563,6 +6563,147 @@ client.voice.create_voice_chat_completions( + + + + +## Messages +
client.messages.list_batch_results(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Stream back responses from the batch request +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.list_batch_results( + run_id="run_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**run_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.messages.cancel_batch_run(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Cancel a batch run. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.cancel_batch_run( + run_id="run_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**run_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -9357,6 +9498,277 @@ client.groups.messages.modify( + + + + +## Messages Batches +
client.messages.batches.list() +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all batch runs. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.batches.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.messages.batches.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Process a batch of requests +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.batches.create( + requests=[ + LettaBatchRequest( + messages=[ + MessageCreate( + role="user", + content=[ + TextContent( + text="text", + ) + ], + ) + ], + agent_id="agent_id", + ) + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**requests:** `typing.Sequence[LettaBatchRequest]` — List of requests to be processed in batch. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.messages.batches.retrieve(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get the status of a batch run. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.batches.retrieve( + run_id="run_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**run_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.messages.batches.cancel(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from letta_client import Letta + +client = Letta( + token="YOUR_TOKEN", +) +client.messages.batches.cancel( + run_id="run_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**run_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
diff --git a/src/letta_client/__init__.py b/src/letta_client/__init__.py index d6438a1..d428a2f 100644 --- a/src/letta_client/__init__.py +++ b/src/letta_client/__init__.py @@ -113,6 +113,7 @@ JobStatus, JobType, JsonSchema, + LettaBatchRequest, LettaMessageContentUnion, LettaMessageUnion, LettaRequest, @@ -221,6 +222,7 @@ health, identities, jobs, + messages, models, providers, runs, @@ -386,6 +388,7 @@ "JobType", "JsonSchema", "Letta", + "LettaBatchRequest", "LettaEnvironment", "LettaMessageContentUnion", "LettaMessageUnion", @@ -498,6 +501,7 @@ "health", "identities", "jobs", + "messages", "models", "providers", "runs", diff --git a/src/letta_client/base_client.py b/src/letta_client/base_client.py index ec73805..20c93f2 100644 --- a/src/letta_client/base_client.py +++ b/src/letta_client/base_client.py @@ -18,6 +18,7 @@ from .steps.client import StepsClient from .tag.client import TagClient from .voice.client import VoiceClient +from .messages.client import MessagesClient from .templates.client import TemplatesClient from .core.client_wrapper import AsyncClientWrapper from .tools.client import AsyncToolsClient @@ -34,6 +35,7 @@ from .steps.client import AsyncStepsClient from .tag.client import AsyncTagClient from .voice.client import AsyncVoiceClient +from .messages.client import AsyncMessagesClient from .templates.client import AsyncTemplatesClient @@ -109,6 +111,7 @@ def __init__( self.steps = StepsClient(client_wrapper=self._client_wrapper) self.tag = TagClient(client_wrapper=self._client_wrapper) self.voice = VoiceClient(client_wrapper=self._client_wrapper) + self.messages = MessagesClient(client_wrapper=self._client_wrapper) self.templates = TemplatesClient(client_wrapper=self._client_wrapper) @@ -184,6 +187,7 @@ def __init__( self.steps = AsyncStepsClient(client_wrapper=self._client_wrapper) self.tag = AsyncTagClient(client_wrapper=self._client_wrapper) self.voice = AsyncVoiceClient(client_wrapper=self._client_wrapper) + self.messages = AsyncMessagesClient(client_wrapper=self._client_wrapper) self.templates = AsyncTemplatesClient(client_wrapper=self._client_wrapper) diff --git a/src/letta_client/core/client_wrapper.py b/src/letta_client/core/client_wrapper.py index 9902bc0..cf9a5e0 100644 --- a/src/letta_client/core/client_wrapper.py +++ b/src/letta_client/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "letta-client", - "X-Fern-SDK-Version": "0.1.99", + "X-Fern-SDK-Version": "0.1.100", } if self.token is not None: headers["Authorization"] = f"Bearer {self.token}" diff --git a/src/letta_client/messages/__init__.py b/src/letta_client/messages/__init__.py new file mode 100644 index 0000000..2f65c52 --- /dev/null +++ b/src/letta_client/messages/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from . import batches + +__all__ = ["batches"] diff --git a/src/letta_client/messages/batches/__init__.py b/src/letta_client/messages/batches/__init__.py new file mode 100644 index 0000000..f3ea265 --- /dev/null +++ b/src/letta_client/messages/batches/__init__.py @@ -0,0 +1,2 @@ +# This file was auto-generated by Fern from our API Definition. + diff --git a/src/letta_client/messages/batches/client.py b/src/letta_client/messages/batches/client.py new file mode 100644 index 0000000..19ed9a3 --- /dev/null +++ b/src/letta_client/messages/batches/client.py @@ -0,0 +1,519 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...core.client_wrapper import SyncClientWrapper +from ...core.request_options import RequestOptions +from ...types.run import Run +from ...core.unchecked_base_model import construct_type +from ...errors.unprocessable_entity_error import UnprocessableEntityError +from ...types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ...core.api_error import ApiError +from ...types.letta_batch_request import LettaBatchRequest +from ...core.serialization import convert_and_respect_annotation_metadata +from ...core.jsonable_encoder import jsonable_encoder +from ...core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class BatchesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Run]: + """ + List all batch runs. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Run] + Successful Response + + Examples + -------- + from letta_client import Letta + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.batches.list() + """ + _response = self._client_wrapper.httpx_client.request( + "v1/messages/batches", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Run], + construct_type( + type_=typing.List[Run], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create( + self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None + ) -> Run: + """ + Process a batch of requests + + Parameters + ---------- + requests : typing.Sequence[LettaBatchRequest] + List of requests to be processed in batch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Run + Successful Response + + Examples + -------- + from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.batches.create( + requests=[ + LettaBatchRequest( + messages=[ + MessageCreate( + role="user", + content=[ + TextContent( + text="text", + ) + ], + ) + ], + agent_id="agent_id", + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/messages/batches", + method="POST", + json={ + "requests": convert_and_respect_annotation_metadata( + object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Run, + construct_type( + type_=Run, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def retrieve(self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Run: + """ + Get the status of a batch run. + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Run + Successful Response + + Examples + -------- + from letta_client import Letta + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.batches.retrieve( + run_id="run_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Run, + construct_type( + type_=Run, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def cancel(self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from letta_client import Letta + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.batches.cancel( + run_id="run_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}", + method="PATCH", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncBatchesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Run]: + """ + List all batch runs. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Run] + Successful Response + + Examples + -------- + import asyncio + + from letta_client import AsyncLetta + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.batches.list() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/messages/batches", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Run], + construct_type( + type_=typing.List[Run], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create( + self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None + ) -> Run: + """ + Process a batch of requests + + Parameters + ---------- + requests : typing.Sequence[LettaBatchRequest] + List of requests to be processed in batch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Run + Successful Response + + Examples + -------- + import asyncio + + from letta_client import ( + AsyncLetta, + LettaBatchRequest, + MessageCreate, + TextContent, + ) + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.batches.create( + requests=[ + LettaBatchRequest( + messages=[ + MessageCreate( + role="user", + content=[ + TextContent( + text="text", + ) + ], + ) + ], + agent_id="agent_id", + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v1/messages/batches", + method="POST", + json={ + "requests": convert_and_respect_annotation_metadata( + object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Run, + construct_type( + type_=Run, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def retrieve(self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> Run: + """ + Get the status of a batch run. + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Run + Successful Response + + Examples + -------- + import asyncio + + from letta_client import AsyncLetta + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.batches.retrieve( + run_id="run_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Run, + construct_type( + type_=Run, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def cancel(self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from letta_client import AsyncLetta + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.batches.cancel( + run_id="run_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}", + method="PATCH", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/letta_client/messages/client.py b/src/letta_client/messages/client.py new file mode 100644 index 0000000..eb1c1a3 --- /dev/null +++ b/src/letta_client/messages/client.py @@ -0,0 +1,274 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +from .batches.client import BatchesClient +import typing +from ..core.request_options import RequestOptions +from ..core.jsonable_encoder import jsonable_encoder +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper +from .batches.client import AsyncBatchesClient + + +class MessagesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + self.batches = BatchesClient(client_wrapper=self._client_wrapper) + + def list_batch_results( + self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Stream back responses from the batch request + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful response + + Examples + -------- + from letta_client import Letta + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.list_batch_results( + run_id="run_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def cancel_batch_run( + self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Cancel a batch run. + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from letta_client import Letta + + client = Letta( + token="YOUR_TOKEN", + ) + client.messages.cancel_batch_run( + run_id="run_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}/cancel", + method="PATCH", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncMessagesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + self.batches = AsyncBatchesClient(client_wrapper=self._client_wrapper) + + async def list_batch_results( + self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Stream back responses from the batch request + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful response + + Examples + -------- + import asyncio + + from letta_client import AsyncLetta + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.list_batch_results( + run_id="run_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def cancel_batch_run( + self, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Cancel a batch run. + + Parameters + ---------- + run_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from letta_client import AsyncLetta + + client = AsyncLetta( + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.messages.cancel_batch_run( + run_id="run_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/messages/batches/{jsonable_encoder(run_id)}/cancel", + method="PATCH", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/letta_client/types/__init__.py b/src/letta_client/types/__init__.py index e1ff7c7..9c8f681 100644 --- a/src/letta_client/types/__init__.py +++ b/src/letta_client/types/__init__.py @@ -112,6 +112,7 @@ from .job_status import JobStatus from .job_type import JobType from .json_schema import JsonSchema +from .letta_batch_request import LettaBatchRequest from .letta_message_content_union import LettaMessageContentUnion from .letta_message_union import LettaMessageUnion from .letta_request import LettaRequest @@ -329,6 +330,7 @@ "JobStatus", "JobType", "JsonSchema", + "LettaBatchRequest", "LettaMessageContentUnion", "LettaMessageUnion", "LettaRequest", diff --git a/src/letta_client/types/app_auth_scheme_auth_mode.py b/src/letta_client/types/app_auth_scheme_auth_mode.py index 0dc1288..41a858e 100644 --- a/src/letta_client/types/app_auth_scheme_auth_mode.py +++ b/src/letta_client/types/app_auth_scheme_auth_mode.py @@ -13,7 +13,6 @@ "GOOGLE_SERVICE_ACCOUNT", "GOOGLEADS_AUTH", "NO_AUTH", - "COMPOSIO_LINK", "CALCOM_AUTH", ], typing.Any, diff --git a/src/letta_client/types/chat_completion_audio_param_voice.py b/src/letta_client/types/chat_completion_audio_param_voice.py index 002b9a5..fe1e8d7 100644 --- a/src/letta_client/types/chat_completion_audio_param_voice.py +++ b/src/letta_client/types/chat_completion_audio_param_voice.py @@ -3,5 +3,16 @@ import typing ChatCompletionAudioParamVoice = typing.Union[ - typing.Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], typing.Any + str, + typing.Literal["alloy"], + typing.Literal["ash"], + typing.Literal["ballad"], + typing.Literal["coral"], + typing.Literal["echo"], + typing.Literal["fable"], + typing.Literal["onyx"], + typing.Literal["nova"], + typing.Literal["sage"], + typing.Literal["shimmer"], + typing.Literal["verse"], ] diff --git a/src/letta_client/types/job_status.py b/src/letta_client/types/job_status.py index f7e03f1..bd61a22 100644 --- a/src/letta_client/types/job_status.py +++ b/src/letta_client/types/job_status.py @@ -3,5 +3,5 @@ import typing JobStatus = typing.Union[ - typing.Literal["not_started", "created", "running", "completed", "failed", "pending"], typing.Any + typing.Literal["not_started", "created", "running", "completed", "failed", "pending", "cancelled"], typing.Any ] diff --git a/src/letta_client/types/letta_batch_request.py b/src/letta_client/types/letta_batch_request.py new file mode 100644 index 0000000..c68b067 --- /dev/null +++ b/src/letta_client/types/letta_batch_request.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .message_create import MessageCreate +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LettaBatchRequest(UncheckedBaseModel): + messages: typing.List[MessageCreate] = pydantic.Field() + """ + The messages to be sent to the agent. + """ + + use_assistant_message: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects. + """ + + assistant_message_tool_name: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the designated message tool. + """ + + assistant_message_tool_kwarg: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the message argument in the designated message tool. + """ + + agent_id: str = pydantic.Field() + """ + The ID of the agent to process the requests for. + """ + + custom_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Custom ID for request. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow