From 7fa40391134e26c07c2f2d19bce91a331a91f2e4 Mon Sep 17 00:00:00 2001
From: Elastic Machine
Date: Mon, 18 Aug 2025 06:03:14 +0000
Subject: [PATCH] Auto-generated API code
---
elasticsearch/_async/client/__init__.py | 10 +-
elasticsearch/_async/client/cat.py | 134 ++++++++++-
elasticsearch/_async/client/cluster.py | 9 +-
elasticsearch/_async/client/esql.py | 19 +-
elasticsearch/_async/client/indices.py | 2 +-
elasticsearch/_async/client/inference.py | 283 ++++++++++++++++++++++-
elasticsearch/_async/client/simulate.py | 9 +
elasticsearch/_async/client/sql.py | 2 +-
elasticsearch/_sync/client/__init__.py | 10 +-
elasticsearch/_sync/client/cat.py | 134 ++++++++++-
elasticsearch/_sync/client/cluster.py | 9 +-
elasticsearch/_sync/client/esql.py | 19 +-
elasticsearch/_sync/client/indices.py | 2 +-
elasticsearch/_sync/client/inference.py | 283 ++++++++++++++++++++++-
elasticsearch/_sync/client/simulate.py | 9 +
elasticsearch/_sync/client/sql.py | 2 +-
elasticsearch/dsl/types.py | 22 +-
17 files changed, 902 insertions(+), 56 deletions(-)
diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py
index 0874e120f..157282b75 100644
--- a/elasticsearch/_async/client/__init__.py
+++ b/elasticsearch/_async/client/__init__.py
@@ -608,6 +608,7 @@ async def bulk(
JavaScript: Check out client.helpers.*
.NET: Check out BulkAllObservable
PHP: Check out bulk indexing.
+ Ruby: Check out Elasticsearch::Helpers::BulkHelper
Submitting bulk requests with cURL
If you're providing text file input to curl
, you must use the --data-binary
flag instead of plain -d
.
@@ -3875,6 +3876,13 @@ async def reindex(
In this case, the response includes a count of the version conflicts that were encountered.
Note that the handling of other error types is unaffected by the conflicts
property.
Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs
until it has successfully indexed max_docs
documents into the target or it has gone through every document in the source query.
+ It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.
+
+ - When requested with
wait_for_completion=true
(default), the request fails if the node shuts down.
+ - When requested with
wait_for_completion=false
, a task id is returned, which can be used via the task management API to monitor, debug, or cancel the task. The task may disappear or fail if the node shuts down.
+ When retrying a failed reindex operation, it might be necessary to set conflicts=proceed
or to first delete the partial destination index.
+ Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.
+
Refer to the linked documentation for examples of how to reindex documents.
@@ -5659,7 +5667,7 @@ async def termvectors(
doc: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
field_statistics: t.Optional[bool] = None,
- fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ fields: t.Optional[t.Sequence[str]] = None,
filter: t.Optional[t.Mapping[str, t.Any]] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py
index 148ed721f..4a870f6ab 100644
--- a/elasticsearch/_async/client/cat.py
+++ b/elasticsearch/_async/client/cat.py
@@ -47,7 +47,34 @@ async def aliases(
] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "alias",
+ "filter",
+ "index",
+ "is_write_index",
+ "routing.index",
+ "routing.search",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "alias",
+ "filter",
+ "index",
+ "is_write_index",
+ "routing.index",
+ "routing.search",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -74,7 +101,8 @@ async def aliases(
values, such as `open,hidden`.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param master_timeout: The period to wait for a connection to the master node.
@@ -137,7 +165,48 @@ async def allocation(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "disk.avail",
+ "disk.indices",
+ "disk.indices.forecast",
+ "disk.percent",
+ "disk.total",
+ "disk.used",
+ "host",
+ "ip",
+ "node",
+ "node.role",
+ "shards",
+ "shards.undesired",
+ "write_load.forecast",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "disk.avail",
+ "disk.indices",
+ "disk.indices.forecast",
+ "disk.percent",
+ "disk.total",
+ "disk.used",
+ "host",
+ "ip",
+ "node",
+ "node.role",
+ "shards",
+ "shards.undesired",
+ "write_load.forecast",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
local: t.Optional[bool] = None,
@@ -161,7 +230,8 @@ async def allocation(
:param bytes: The unit used to display byte values.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param local: If `true`, the request computes the list of selected nodes from
@@ -224,7 +294,36 @@ async def component_templates(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "alias_count",
+ "included_in",
+ "mapping_count",
+ "metadata_count",
+ "name",
+ "settings_count",
+ "version",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "alias_count",
+ "included_in",
+ "mapping_count",
+ "metadata_count",
+ "name",
+ "settings_count",
+ "version",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
local: t.Optional[bool] = None,
@@ -249,7 +348,8 @@ async def component_templates(
If it is omitted, all component templates are returned.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param local: If `true`, the request computes the list of selected nodes from
@@ -310,7 +410,12 @@ async def count(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]],
+ t.Union[str, t.Literal["count", "epoch", "timestamp"]],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
@@ -334,7 +439,8 @@ async def count(
and indices, omit this parameter or use `*` or `_all`.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param s: List of columns that determine how the table should be sorted. Sorting
@@ -389,7 +495,14 @@ async def fielddata(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]]
+ ],
+ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
@@ -412,7 +525,8 @@ async def fielddata(
:param bytes: The unit used to display byte values.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param s: List of columns that determine how the table should be sorted. Sorting
diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py
index 91956f7c4..a6efa8529 100644
--- a/elasticsearch/_async/client/cluster.py
+++ b/elasticsearch/_async/client/cluster.py
@@ -374,8 +374,13 @@ async def get_settings(
``_
:param flat_settings: If `true`, returns settings in flat format.
- :param include_defaults: If `true`, returns default cluster settings from the
- local node.
+ :param include_defaults: If `true`, also returns default values for all other
+ cluster settings, reflecting the values in the `elasticsearch.yml` file of
+ one of the nodes in the cluster. If the nodes in your cluster do not all
+ have the same values in their `elasticsearch.yml` config files then the values
+ returned by this API may vary from invocation to invocation and may not reflect
+ the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings`
+ API to fetch the settings for each individual node in your cluster.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py
index 9999f1db1..bc03e0db6 100644
--- a/elasticsearch/_async/client/esql.py
+++ b/elasticsearch/_async/client/esql.py
@@ -50,7 +50,7 @@ class EsqlClient(NamespacedClient):
async def async_query(
self,
*,
- query: t.Optional[str] = None,
+ query: t.Optional[t.Union[str, "ESQLBase"]] = None,
allow_partial_results: t.Optional[bool] = None,
columnar: t.Optional[bool] = None,
delimiter: t.Optional[str] = None,
@@ -111,7 +111,12 @@ async def async_query(
which has the name of all the columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
- :param format: A short version of the Accept header, for example `json` or `yaml`.
+ :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+ `tsv`, and `txt` formats will return results in a tabular format, excluding
+ other metadata fields from the response. For async requests, nothing will
+ be returned if the async query doesn't finish within the timeout. The query
+ ID and running status are available in the `X-Elasticsearch-Async-Id` and
+ `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
@@ -165,7 +170,7 @@ async def async_query(
__query["pretty"] = pretty
if not __body:
if query is not None:
- __body["query"] = query
+ __body["query"] = str(query)
if columnar is not None:
__body["columnar"] = columnar
if filter is not None:
@@ -484,7 +489,7 @@ async def list_queries(
async def query(
self,
*,
- query: t.Optional[str] = None,
+ query: t.Optional[t.Union[str, "ESQLBase"]] = None,
allow_partial_results: t.Optional[bool] = None,
columnar: t.Optional[bool] = None,
delimiter: t.Optional[str] = None,
@@ -539,7 +544,9 @@ async def query(
`all_columns` which has the name of all columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
- :param format: A short version of the Accept header, e.g. json, yaml.
+ :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+ `tsv`, and `txt` formats will return results in a tabular format, excluding
+ other metadata fields from the response.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
@@ -579,7 +586,7 @@ async def query(
__query["pretty"] = pretty
if not __body:
if query is not None:
- __body["query"] = query
+ __body["query"] = str(query)
if columnar is not None:
__body["columnar"] = columnar
if filter is not None:
diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py
index 2a40027c2..ec8d9d9c3 100644
--- a/elasticsearch/_async/client/indices.py
+++ b/elasticsearch/_async/client/indices.py
@@ -4179,7 +4179,7 @@ async def put_mapping(
Change a field's mapping using reindexing
Rename a field using a field alias
- Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.
+ Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.
``_
diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py
index 58b51a72e..686d99d27 100644
--- a/elasticsearch/_async/client/inference.py
+++ b/elasticsearch/_async/client/inference.py
@@ -389,23 +389,26 @@ async def put(
However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
The following integrations are available through the inference API. You can find the available task types next to the integration name:
+ - AI21 (
chat_completion
, completion
)
- AlibabaCloud AI Search (
completion
, rerank
, sparse_embedding
, text_embedding
)
- Amazon Bedrock (
completion
, text_embedding
)
+ - Amazon SageMaker (
chat_completion
, completion
, rerank
, sparse_embedding
, text_embedding
)
- Anthropic (
completion
)
- Azure AI Studio (
completion
, 'rerank', text_embedding
)
- Azure OpenAI (
completion
, text_embedding
)
- Cohere (
completion
, rerank
, text_embedding
)
- - DeepSeek (
completion
, chat_completion
)
+ - DeepSeek (
chat_completion
, completion
)
- Elasticsearch (
rerank
, sparse_embedding
, text_embedding
- this service is for built-in models and models uploaded through Eland)
- ELSER (
sparse_embedding
)
- Google AI Studio (
completion
, text_embedding
)
- - Google Vertex AI (
rerank
, text_embedding
)
+ - Google Vertex AI (
chat_completion
, completion
, rerank
, text_embedding
)
- Hugging Face (
chat_completion
, completion
, rerank
, text_embedding
)
+ - JinaAI (
rerank
, text_embedding
)
+ - Llama (
chat_completion
, completion
, text_embedding
)
- Mistral (
chat_completion
, completion
, text_embedding
)
- OpenAI (
chat_completion
, completion
, text_embedding
)
- - VoyageAI (
text_embedding
, rerank
)
+ - VoyageAI (
rerank
, text_embedding
)
- Watsonx inference integration (
text_embedding
)
- - JinaAI (
text_embedding
, rerank
)
@@ -461,6 +464,86 @@ async def put(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("service", "service_settings"),
+ )
+ async def put_ai21(
+ self,
+ *,
+ task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+ ai21_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["ai21"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create a AI21 inference endpoint.
+ Create an inference endpoint to perform an inference task with the ai21
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param ai21_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `ai21`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `ai21` service.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if ai21_inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'ai21_inference_id'")
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "ai21_inference_id": _quote(ai21_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["ai21_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return await self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_ai21",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=(
"service",
@@ -659,6 +742,112 @@ async def put_amazonbedrock(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=(
+ "service",
+ "service_settings",
+ "chunking_settings",
+ "task_settings",
+ ),
+ )
+ async def put_amazonsagemaker(
+ self,
+ *,
+ task_type: t.Union[
+ str,
+ t.Literal[
+ "chat_completion",
+ "completion",
+ "rerank",
+ "sparse_embedding",
+ "text_embedding",
+ ],
+ ],
+ amazonsagemaker_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create an Amazon SageMaker inference endpoint.
+ Create an inference endpoint to perform an inference task with the amazon_sagemaker
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `amazon_sagemaker`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `amazon_sagemaker` service and `service_settings.api`
+ you specified.
+ :param chunking_settings: The chunking configuration object.
+ :param task_settings: Settings to configure the inference task. These settings
+ are specific to the task type and `service_settings.api` you specified.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if amazonsagemaker_inference_id in SKIP_IN_PATH:
+ raise ValueError(
+ "Empty value passed for parameter 'amazonsagemaker_inference_id'"
+ )
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
+ if task_settings is not None:
+ __body["task_settings"] = task_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return await self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_amazonsagemaker",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=(
"service",
@@ -1887,6 +2076,92 @@ async def put_jinaai(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("service", "service_settings", "chunking_settings"),
+ )
+ async def put_llama(
+ self,
+ *,
+ task_type: t.Union[
+ str, t.Literal["chat_completion", "completion", "text_embedding"]
+ ],
+ llama_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["llama"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create a Llama inference endpoint.
+ Create an inference endpoint to perform an inference task with the llama
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param llama_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `llama`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `llama` service.
+ :param chunking_settings: The chunking configuration object.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if llama_inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'llama_inference_id'")
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "llama_inference_id": _quote(llama_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["llama_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return await self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_llama",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=("service", "service_settings", "chunking_settings"),
)
diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py
index bb636ddb6..e9a3fc277 100644
--- a/elasticsearch/_async/client/simulate.py
+++ b/elasticsearch/_async/client/simulate.py
@@ -56,6 +56,7 @@ async def ingest(
t.Mapping[str, t.Mapping[str, t.Any]]
] = None,
mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None,
+ merge_type: t.Optional[t.Union[str, t.Literal["index", "template"]]] = None,
pipeline: t.Optional[str] = None,
pipeline_substitutions: t.Optional[
t.Mapping[str, t.Mapping[str, t.Any]]
@@ -93,6 +94,12 @@ async def ingest(
:param index_template_substitutions: A map of index template names to substitute
index template definition objects.
:param mapping_addition:
+ :param merge_type: The method to be used when merging mapping_additions existing
+ mappings. Mappings can be merged in the way mapping changes are merged into
+ an existing index, or in the way mapping changes are merged into existing
+ templates. Some changes are allowed to templates that are not allowed to
+ indices. For example, a field cannot be changed to an incompatible type in
+ an index, but can in a template.
:param pipeline: The pipeline to use as the default pipeline. This value can
be used to override the default pipeline of the index.
:param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline`
@@ -116,6 +123,8 @@ async def ingest(
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
+ if merge_type is not None:
+ __query["merge_type"] = merge_type
if pipeline is not None:
__query["pipeline"] = pipeline
if pretty is not None:
diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py
index 3eb37a6cc..de423ea66 100644
--- a/elasticsearch/_async/client/sql.py
+++ b/elasticsearch/_async/client/sql.py
@@ -283,7 +283,7 @@ async def query(
keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
keep_on_completion: t.Optional[bool] = None,
page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
- params: t.Optional[t.Mapping[str, t.Any]] = None,
+ params: t.Optional[t.Sequence[t.Any]] = None,
pretty: t.Optional[bool] = None,
query: t.Optional[str] = None,
request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py
index 5f7a4313d..5081722bc 100644
--- a/elasticsearch/_sync/client/__init__.py
+++ b/elasticsearch/_sync/client/__init__.py
@@ -606,6 +606,7 @@ def bulk(
JavaScript: Check out client.helpers.*
.NET: Check out BulkAllObservable
PHP: Check out bulk indexing.
+ Ruby: Check out Elasticsearch::Helpers::BulkHelper
Submitting bulk requests with cURL
If you're providing text file input to curl
, you must use the --data-binary
flag instead of plain -d
.
@@ -3873,6 +3874,13 @@ def reindex(
In this case, the response includes a count of the version conflicts that were encountered.
Note that the handling of other error types is unaffected by the conflicts
property.
Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs
until it has successfully indexed max_docs
documents into the target or it has gone through every document in the source query.
+ It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.
+
+ - When requested with
wait_for_completion=true
(default), the request fails if the node shuts down.
+ - When requested with
wait_for_completion=false
, a task id is returned, which can be used via the task management API to monitor, debug, or cancel the task. The task may disappear or fail if the node shuts down.
+ When retrying a failed reindex operation, it might be necessary to set conflicts=proceed
or to first delete the partial destination index.
+ Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.
+
Refer to the linked documentation for examples of how to reindex documents.
@@ -5657,7 +5665,7 @@ def termvectors(
doc: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
field_statistics: t.Optional[bool] = None,
- fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ fields: t.Optional[t.Sequence[str]] = None,
filter: t.Optional[t.Mapping[str, t.Any]] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py
index b1ab46d99..fad330ae1 100644
--- a/elasticsearch/_sync/client/cat.py
+++ b/elasticsearch/_sync/client/cat.py
@@ -47,7 +47,34 @@ def aliases(
] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "alias",
+ "filter",
+ "index",
+ "is_write_index",
+ "routing.index",
+ "routing.search",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "alias",
+ "filter",
+ "index",
+ "is_write_index",
+ "routing.index",
+ "routing.search",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -74,7 +101,8 @@ def aliases(
values, such as `open,hidden`.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param master_timeout: The period to wait for a connection to the master node.
@@ -137,7 +165,48 @@ def allocation(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "disk.avail",
+ "disk.indices",
+ "disk.indices.forecast",
+ "disk.percent",
+ "disk.total",
+ "disk.used",
+ "host",
+ "ip",
+ "node",
+ "node.role",
+ "shards",
+ "shards.undesired",
+ "write_load.forecast",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "disk.avail",
+ "disk.indices",
+ "disk.indices.forecast",
+ "disk.percent",
+ "disk.total",
+ "disk.used",
+ "host",
+ "ip",
+ "node",
+ "node.role",
+ "shards",
+ "shards.undesired",
+ "write_load.forecast",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
local: t.Optional[bool] = None,
@@ -161,7 +230,8 @@ def allocation(
:param bytes: The unit used to display byte values.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param local: If `true`, the request computes the list of selected nodes from
@@ -224,7 +294,36 @@ def component_templates(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "alias_count",
+ "included_in",
+ "mapping_count",
+ "metadata_count",
+ "name",
+ "settings_count",
+ "version",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "alias_count",
+ "included_in",
+ "mapping_count",
+ "metadata_count",
+ "name",
+ "settings_count",
+ "version",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
local: t.Optional[bool] = None,
@@ -249,7 +348,8 @@ def component_templates(
If it is omitted, all component templates are returned.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param local: If `true`, the request computes the list of selected nodes from
@@ -310,7 +410,12 @@ def count(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]],
+ t.Union[str, t.Literal["count", "epoch", "timestamp"]],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
@@ -334,7 +439,8 @@ def count(
and indices, omit this parameter or use `*` or `_all`.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param s: List of columns that determine how the table should be sorted. Sorting
@@ -389,7 +495,14 @@ def fielddata(
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]]
+ ],
+ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
@@ -412,7 +525,8 @@ def fielddata(
:param bytes: The unit used to display byte values.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param s: List of columns that determine how the table should be sorted. Sorting
diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py
index a56892d54..46cb6059f 100644
--- a/elasticsearch/_sync/client/cluster.py
+++ b/elasticsearch/_sync/client/cluster.py
@@ -374,8 +374,13 @@ def get_settings(
``_
:param flat_settings: If `true`, returns settings in flat format.
- :param include_defaults: If `true`, returns default cluster settings from the
- local node.
+ :param include_defaults: If `true`, also returns default values for all other
+ cluster settings, reflecting the values in the `elasticsearch.yml` file of
+ one of the nodes in the cluster. If the nodes in your cluster do not all
+ have the same values in their `elasticsearch.yml` config files then the values
+ returned by this API may vary from invocation to invocation and may not reflect
+ the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings`
+ API to fetch the settings for each individual node in your cluster.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py
index 0744a81bd..089079e0d 100644
--- a/elasticsearch/_sync/client/esql.py
+++ b/elasticsearch/_sync/client/esql.py
@@ -50,7 +50,7 @@ class EsqlClient(NamespacedClient):
def async_query(
self,
*,
- query: t.Optional[str] = None,
+ query: t.Optional[t.Union[str, "ESQLBase"]] = None,
allow_partial_results: t.Optional[bool] = None,
columnar: t.Optional[bool] = None,
delimiter: t.Optional[str] = None,
@@ -111,7 +111,12 @@ def async_query(
which has the name of all the columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
- :param format: A short version of the Accept header, for example `json` or `yaml`.
+ :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+ `tsv`, and `txt` formats will return results in a tabular format, excluding
+ other metadata fields from the response. For async requests, nothing will
+ be returned if the async query doesn't finish within the timeout. The query
+ ID and running status are available in the `X-Elasticsearch-Async-Id` and
+ `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
@@ -165,7 +170,7 @@ def async_query(
__query["pretty"] = pretty
if not __body:
if query is not None:
- __body["query"] = query
+ __body["query"] = str(query)
if columnar is not None:
__body["columnar"] = columnar
if filter is not None:
@@ -484,7 +489,7 @@ def list_queries(
def query(
self,
*,
- query: t.Optional[str] = None,
+ query: t.Optional[t.Union[str, "ESQLBase"]] = None,
allow_partial_results: t.Optional[bool] = None,
columnar: t.Optional[bool] = None,
delimiter: t.Optional[str] = None,
@@ -539,7 +544,9 @@ def query(
`all_columns` which has the name of all columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
- :param format: A short version of the Accept header, e.g. json, yaml.
+ :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+ `tsv`, and `txt` formats will return results in a tabular format, excluding
+ other metadata fields from the response.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
@@ -579,7 +586,7 @@ def query(
__query["pretty"] = pretty
if not __body:
if query is not None:
- __body["query"] = query
+ __body["query"] = str(query)
if columnar is not None:
__body["columnar"] = columnar
if filter is not None:
diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py
index 8697d971b..4fb65847f 100644
--- a/elasticsearch/_sync/client/indices.py
+++ b/elasticsearch/_sync/client/indices.py
@@ -4179,7 +4179,7 @@ def put_mapping(
Change a field's mapping using reindexing
Rename a field using a field alias
- Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.
+ Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.
``_
diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py
index b7bab7f1c..47fc86fd5 100644
--- a/elasticsearch/_sync/client/inference.py
+++ b/elasticsearch/_sync/client/inference.py
@@ -389,23 +389,26 @@ def put(
However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
The following integrations are available through the inference API. You can find the available task types next to the integration name:
+ - AI21 (
chat_completion
, completion
)
- AlibabaCloud AI Search (
completion
, rerank
, sparse_embedding
, text_embedding
)
- Amazon Bedrock (
completion
, text_embedding
)
+ - Amazon SageMaker (
chat_completion
, completion
, rerank
, sparse_embedding
, text_embedding
)
- Anthropic (
completion
)
- Azure AI Studio (
completion
, 'rerank', text_embedding
)
- Azure OpenAI (
completion
, text_embedding
)
- Cohere (
completion
, rerank
, text_embedding
)
- - DeepSeek (
completion
, chat_completion
)
+ - DeepSeek (
chat_completion
, completion
)
- Elasticsearch (
rerank
, sparse_embedding
, text_embedding
- this service is for built-in models and models uploaded through Eland)
- ELSER (
sparse_embedding
)
- Google AI Studio (
completion
, text_embedding
)
- - Google Vertex AI (
rerank
, text_embedding
)
+ - Google Vertex AI (
chat_completion
, completion
, rerank
, text_embedding
)
- Hugging Face (
chat_completion
, completion
, rerank
, text_embedding
)
+ - JinaAI (
rerank
, text_embedding
)
+ - Llama (
chat_completion
, completion
, text_embedding
)
- Mistral (
chat_completion
, completion
, text_embedding
)
- OpenAI (
chat_completion
, completion
, text_embedding
)
- - VoyageAI (
text_embedding
, rerank
)
+ - VoyageAI (
rerank
, text_embedding
)
- Watsonx inference integration (
text_embedding
)
- - JinaAI (
text_embedding
, rerank
)
@@ -461,6 +464,86 @@ def put(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("service", "service_settings"),
+ )
+ def put_ai21(
+ self,
+ *,
+ task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+ ai21_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["ai21"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create a AI21 inference endpoint.
+ Create an inference endpoint to perform an inference task with the ai21
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param ai21_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `ai21`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `ai21` service.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if ai21_inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'ai21_inference_id'")
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "ai21_inference_id": _quote(ai21_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["ai21_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_ai21",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=(
"service",
@@ -659,6 +742,112 @@ def put_amazonbedrock(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=(
+ "service",
+ "service_settings",
+ "chunking_settings",
+ "task_settings",
+ ),
+ )
+ def put_amazonsagemaker(
+ self,
+ *,
+ task_type: t.Union[
+ str,
+ t.Literal[
+ "chat_completion",
+ "completion",
+ "rerank",
+ "sparse_embedding",
+ "text_embedding",
+ ],
+ ],
+ amazonsagemaker_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create an Amazon SageMaker inference endpoint.
+ Create an inference endpoint to perform an inference task with the amazon_sagemaker
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `amazon_sagemaker`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `amazon_sagemaker` service and `service_settings.api`
+ you specified.
+ :param chunking_settings: The chunking configuration object.
+ :param task_settings: Settings to configure the inference task. These settings
+ are specific to the task type and `service_settings.api` you specified.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if amazonsagemaker_inference_id in SKIP_IN_PATH:
+ raise ValueError(
+ "Empty value passed for parameter 'amazonsagemaker_inference_id'"
+ )
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
+ if task_settings is not None:
+ __body["task_settings"] = task_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_amazonsagemaker",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=(
"service",
@@ -1887,6 +2076,92 @@ def put_jinaai(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("service", "service_settings", "chunking_settings"),
+ )
+ def put_llama(
+ self,
+ *,
+ task_type: t.Union[
+ str, t.Literal["chat_completion", "completion", "text_embedding"]
+ ],
+ llama_inference_id: str,
+ service: t.Optional[t.Union[str, t.Literal["llama"]]] = None,
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create a Llama inference endpoint.
+ Create an inference endpoint to perform an inference task with the llama
service.
+
+
+ ``_
+
+ :param task_type: The type of the inference task that the model will perform.
+ :param llama_inference_id: The unique identifier of the inference endpoint.
+ :param service: The type of service supported for the specified task type. In
+ this case, `llama`.
+ :param service_settings: Settings used to install the inference model. These
+ settings are specific to the `llama` service.
+ :param chunking_settings: The chunking configuration object.
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
+ to be created.
+ """
+ if task_type in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'task_type'")
+ if llama_inference_id in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'llama_inference_id'")
+ if service is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service'")
+ if service_settings is None and body is None:
+ raise ValueError("Empty value passed for parameter 'service_settings'")
+ __path_parts: t.Dict[str, str] = {
+ "task_type": _quote(task_type),
+ "llama_inference_id": _quote(llama_inference_id),
+ }
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["llama_inference_id"]}'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if service is not None:
+ __body["service"] = service
+ if service_settings is not None:
+ __body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
+ if not __body:
+ __body = None # type: ignore[assignment]
+ __headers = {"accept": "application/json"}
+ if __body is not None:
+ __headers["content-type"] = "application/json"
+ return self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="inference.put_llama",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_fields=("service", "service_settings", "chunking_settings"),
)
diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py
index 5f22ae433..8eb19ebd3 100644
--- a/elasticsearch/_sync/client/simulate.py
+++ b/elasticsearch/_sync/client/simulate.py
@@ -56,6 +56,7 @@ def ingest(
t.Mapping[str, t.Mapping[str, t.Any]]
] = None,
mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None,
+ merge_type: t.Optional[t.Union[str, t.Literal["index", "template"]]] = None,
pipeline: t.Optional[str] = None,
pipeline_substitutions: t.Optional[
t.Mapping[str, t.Mapping[str, t.Any]]
@@ -93,6 +94,12 @@ def ingest(
:param index_template_substitutions: A map of index template names to substitute
index template definition objects.
:param mapping_addition:
+ :param merge_type: The method to be used when merging mapping_additions existing
+ mappings. Mappings can be merged in the way mapping changes are merged into
+ an existing index, or in the way mapping changes are merged into existing
+ templates. Some changes are allowed to templates that are not allowed to
+ indices. For example, a field cannot be changed to an incompatible type in
+ an index, but can in a template.
:param pipeline: The pipeline to use as the default pipeline. This value can
be used to override the default pipeline of the index.
:param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline`
@@ -116,6 +123,8 @@ def ingest(
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
+ if merge_type is not None:
+ __query["merge_type"] = merge_type
if pipeline is not None:
__query["pipeline"] = pipeline
if pretty is not None:
diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py
index 90cb01681..b2750ede1 100644
--- a/elasticsearch/_sync/client/sql.py
+++ b/elasticsearch/_sync/client/sql.py
@@ -283,7 +283,7 @@ def query(
keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
keep_on_completion: t.Optional[bool] = None,
page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
- params: t.Optional[t.Mapping[str, t.Any]] = None,
+ params: t.Optional[t.Sequence[t.Any]] = None,
pretty: t.Optional[bool] = None,
query: t.Optional[str] = None,
request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py
index 383a69d83..1ec2aecad 100644
--- a/elasticsearch/dsl/types.py
+++ b/elasticsearch/dsl/types.py
@@ -4569,7 +4569,7 @@ class ArrayPercentilesItem(AttrDict[Any]):
:arg value_as_string:
"""
- key: str
+ key: float
value: Union[float, None]
value_as_string: str
@@ -5415,7 +5415,9 @@ class HdrPercentileRanksAggregate(AttrDict[Any]):
:arg meta:
"""
- values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ values: Union[
+ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+ ]
meta: Mapping[str, Any]
@@ -5425,7 +5427,9 @@ class HdrPercentilesAggregate(AttrDict[Any]):
:arg meta:
"""
- values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ values: Union[
+ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+ ]
meta: Mapping[str, Any]
@@ -5932,7 +5936,9 @@ class PercentilesBucketAggregate(AttrDict[Any]):
:arg meta:
"""
- values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ values: Union[
+ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+ ]
meta: Mapping[str, Any]
@@ -6467,7 +6473,9 @@ class TDigestPercentileRanksAggregate(AttrDict[Any]):
:arg meta:
"""
- values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ values: Union[
+ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+ ]
meta: Mapping[str, Any]
@@ -6477,7 +6485,9 @@ class TDigestPercentilesAggregate(AttrDict[Any]):
:arg meta:
"""
- values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]]
+ values: Union[
+ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+ ]
meta: Mapping[str, Any]