diff --git a/elasticsearch_serverless/_async/client/__init__.py b/elasticsearch_serverless/_async/client/__init__.py index cb4aedb..1bcbe9f 100644 --- a/elasticsearch_serverless/_async/client/__init__.py +++ b/elasticsearch_serverless/_async/client/__init__.py @@ -2271,6 +2271,7 @@ async def msearch( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, @@ -2304,6 +2305,13 @@ async def msearch( when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests @@ -2353,6 +2361,8 @@ async def msearch( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: @@ -2585,7 +2595,9 @@ async def mtermvectors( path_parts=__path_parts, ) - @_rewrite_parameters() + @_rewrite_parameters( + body_fields=("index_filter",), + ) async def open_point_in_time( self, *, @@ -2603,9 +2615,11 @@ async def open_point_in_time( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + index_filter: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ A search request by default executes against the most recent visible data of @@ -2627,17 +2641,20 @@ async def open_point_in_time( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param index_filter: Allows to filter indices if the provided query rewrites + to `match_none` on every shard. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param routing: Custom value used to route operations to a specific shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") - if keep_alive is None: + if keep_alive is None and body is None: raise ValueError("Empty value passed for parameter 'keep_alive'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_pit' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive if error_trace is not None: @@ -2656,12 +2673,20 @@ async def open_point_in_time( __query["pretty"] = pretty if routing is not None: __query["routing"] = routing + if not __body: + if index_filter is not None: + __body["index_filter"] = index_filter + if not __body: + __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, + body=__body, endpoint_id="open_point_in_time", path_parts=__path_parts, ) @@ -3221,6 +3246,7 @@ async def search( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -3348,6 +3374,13 @@ async def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param indices_boost: Boosts the _score of documents from specified indices. :param knn: Defines the approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text @@ -3529,6 +3562,8 @@ async def search( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: @@ -4389,6 +4424,7 @@ async def update_by_query( pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, + q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, @@ -4455,6 +4491,7 @@ async def update_by_query( parameter. :param preference: Specifies the node or shard the operation should be performed on. Random by default. + :param q: Query in the Lucene query string syntax. :param query: Specifies the documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. @@ -4539,6 +4576,8 @@ async def update_by_query( __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty + if q is not None: + __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: diff --git a/elasticsearch_serverless/_async/client/cat.py b/elasticsearch_serverless/_async/client/cat.py index 849a45c..d391d8f 100644 --- a/elasticsearch_serverless/_async/client/cat.py +++ b/elasticsearch_serverless/_async/client/cat.py @@ -223,7 +223,7 @@ async def count( ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get a document count. Provides quick access to a document count for a data stream, - an index, or an entire cluster.n/ The document count only includes live documents, + an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, diff --git a/elasticsearch_serverless/_async/client/cluster.py b/elasticsearch_serverless/_async/client/cluster.py index 815dad6..7dab99d 100644 --- a/elasticsearch_serverless/_async/client/cluster.py +++ b/elasticsearch_serverless/_async/client/cluster.py @@ -284,7 +284,7 @@ async def put_component_template( ``_ :param name: Name of the component template to create. Elasticsearch includes - the following built-in component templates: `logs-mappings`; 'logs-settings`; + the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, diff --git a/elasticsearch_serverless/_async/client/esql.py b/elasticsearch_serverless/_async/client/esql.py index d084964..f708a1a 100644 --- a/elasticsearch_serverless/_async/client/esql.py +++ b/elasticsearch_serverless/_async/client/esql.py @@ -47,7 +47,14 @@ async def query( error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ diff --git a/elasticsearch_serverless/_async/client/indices.py b/elasticsearch_serverless/_async/client/indices.py index a2766ae..b992182 100644 --- a/elasticsearch_serverless/_async/client/indices.py +++ b/elasticsearch_serverless/_async/client/indices.py @@ -303,7 +303,9 @@ async def create_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create a data stream. Creates a data stream. You must have a matching index template @@ -316,6 +318,11 @@ async def create_data_stream( `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -328,8 +335,12 @@ async def create_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", @@ -340,63 +351,6 @@ async def create_data_stream( path_parts=__path_parts, ) - @_rewrite_parameters() - async def data_streams_stats( - self, - *, - name: t.Optional[str] = None, - error_trace: t.Optional[bool] = None, - expand_wildcards: t.Optional[ - t.Union[ - t.Sequence[ - t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] - ], - t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], - ] - ] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - ) -> ObjectApiResponse[t.Any]: - """ - Get data stream stats. Retrieves statistics for one or more data streams. - - ``_ - - :param name: Comma-separated list of data streams used to limit the request. - Wildcard expressions (`*`) are supported. To target all data streams in a - cluster, omit this parameter or use `*`. - :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. - """ - __path_parts: t.Dict[str, str] - if name not in SKIP_IN_PATH: - __path_parts = {"name": _quote(name)} - __path = f'/_data_stream/{__path_parts["name"]}/_stats' - else: - __path_parts = {} - __path = "/_data_stream/_stats" - __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if expand_wildcards is not None: - __query["expand_wildcards"] = expand_wildcards - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - __headers = {"accept": "application/json"} - return await self.perform_request( # type: ignore[return-value] - "GET", - __path, - params=__query, - headers=__headers, - endpoint_id="indices.data_streams_stats", - path_parts=__path_parts, - ) - @_rewrite_parameters() async def delete( self, @@ -611,6 +565,7 @@ async def delete_data_stream( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -622,6 +577,9 @@ async def delete_data_stream( are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -636,6 +594,8 @@ async def delete_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1162,6 +1122,7 @@ async def get_data_lifecycle( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1177,6 +1138,9 @@ async def get_data_lifecycle( Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param include_defaults: If `true`, return all default settings in the response. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1193,6 +1157,8 @@ async def get_data_lifecycle( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1222,7 +1188,9 @@ async def get_data_stream( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Get data streams. Retrieves information about one or more data streams. @@ -1236,6 +1204,11 @@ async def get_data_stream( Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param verbose: Whether the maximum timestamp for each data stream should be + calculated and returned. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -1255,8 +1228,12 @@ async def get_data_stream( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if verbose is not None: + __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -1522,7 +1499,9 @@ async def migrate_to_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Convert an index alias to a data stream. Converts an index alias to a data stream. @@ -1537,6 +1516,11 @@ async def migrate_to_data_stream( ``_ :param name: Name of the index alias to convert to a data stream. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1549,8 +1533,12 @@ async def migrate_to_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -2383,6 +2371,7 @@ async def resolve_index( self, *, name: t.Union[str, t.Sequence[str]], + allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2394,6 +2383,7 @@ async def resolve_index( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2405,16 +2395,25 @@ async def resolve_index( :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_resolve/index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2423,6 +2422,8 @@ async def resolve_index( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch_serverless/_async/client/ingest.py b/elasticsearch_serverless/_async/client/ingest.py index 97611e6..ea70cc7 100644 --- a/elasticsearch_serverless/_async/client/ingest.py +++ b/elasticsearch_serverless/_async/client/ingest.py @@ -171,13 +171,21 @@ async def processor_grok( ) @_rewrite_parameters( - body_fields=("description", "meta", "on_failure", "processors", "version"), + body_fields=( + "deprecated", + "description", + "meta", + "on_failure", + "processors", + "version", + ), parameter_aliases={"_meta": "meta"}, ) async def put_pipeline( self, *, id: str, + deprecated: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -199,6 +207,10 @@ async def put_pipeline( ``_ :param id: ID of the ingest pipeline to create or update. + :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated + ingest pipeline is referenced as the default or final pipeline when creating + or updating a non-deprecated index template, Elasticsearch will emit a deprecation + warning. :param description: Description of the ingest pipeline. :param if_version: Required version for optimistic concurrency control for pipeline updates @@ -242,6 +254,8 @@ async def put_pipeline( if timeout is not None: __query["timeout"] = timeout if not __body: + if deprecated is not None: + __body["deprecated"] = deprecated if description is not None: __body["description"] = description if meta is not None: @@ -269,8 +283,8 @@ async def put_pipeline( async def simulate( self, *, - id: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -284,15 +298,17 @@ async def simulate( ``_ + :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. - :param docs: Sample documents to test in the pipeline. :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor in the executed pipeline. """ + if docs is None and body is None: + raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} diff --git a/elasticsearch_serverless/_async/client/ml.py b/elasticsearch_serverless/_async/client/ml.py index 8b5ffd5..e8dd9be 100644 --- a/elasticsearch_serverless/_async/client/ml.py +++ b/elasticsearch_serverless/_async/client/ml.py @@ -688,14 +688,14 @@ async def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. The flush jobs API is only - applicable when sending data for analysis using the post data API. Depending - on the content of the buffer, then it might additionally calculate new results. - Both flush and close operations are similar, however the flush is more efficient - if you are expecting to send more data for analysis. When flushing, the job remains - open and is available to continue analyzing data. A close operation additionally - prunes and persists the model state to disk and the job must be opened again - before analyzing further data. + Force buffered data to be processed. The flush jobs API is only applicable when + sending data for analysis using the post data API. Depending on the content of + the buffer, then it might additionally calculate new results. Both flush and + close operations are similar, however the flush is more efficient if you are + expecting to send more data for analysis. When flushing, the job remains open + and is available to continue analyzing data. A close operation additionally prunes + and persists the model state to disk and the job must be opened again before + analyzing further data. ``_ @@ -764,7 +764,7 @@ async def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the scheduled events in calendars. + Get info about events in calendars. ``_ @@ -830,7 +830,7 @@ async def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for calendars. + Get calendar configuration info. ``_ @@ -900,9 +900,9 @@ async def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. You can get - information for multiple data frame analytics jobs in a single API request by - using a comma-separated list of data frame analytics jobs or a wildcard expression. + Get data frame analytics job configuration info. You can get information for + multiple data frame analytics jobs in a single API request by using a comma-separated + list of data frame analytics jobs or a wildcard expression. ``_ @@ -974,7 +974,7 @@ async def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for data frame analytics jobs. + Get data frame analytics jobs usage info. ``_ @@ -1039,12 +1039,12 @@ async def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. You can get statistics for multiple - datafeeds in a single API request by using a comma-separated list of datafeeds - or a wildcard expression. You can get statistics for all datafeeds by using `_all`, - by specifying `*` as the ``, or by omitting the ``. If the - datafeed is stopped, the only information you receive is the `datafeed_id` and - the `state`. This API returns a maximum of 10,000 datafeeds. + Get datafeeds usage info. You can get statistics for multiple datafeeds in a + single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get statistics for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. If the datafeed is stopped, + the only information you receive is the `datafeed_id` and the `state`. This API + returns a maximum of 10,000 datafeeds. ``_ @@ -1100,11 +1100,11 @@ async def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. You can get information for - multiple datafeeds in a single API request by using a comma-separated list of - datafeeds or a wildcard expression. You can get information for all datafeeds - by using `_all`, by specifying `*` as the ``, or by omitting the ``. - This API returns a maximum of 10,000 datafeeds. + Get datafeeds configuration info. You can get information for multiple datafeeds + in a single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get information for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. This API returns a maximum + of 10,000 datafeeds. ``_ @@ -1167,7 +1167,7 @@ async def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. You can get a single filter or all filters. + Get filters. You can get a single filter or all filters. ``_ @@ -1217,7 +1217,7 @@ async def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection jobs. + Get anomaly detection jobs usage info. ``_ @@ -1274,11 +1274,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. You can get information - for multiple anomaly detection jobs in a single API request by using a group - name, a comma-separated list of jobs, or a wildcard expression. You can get information - for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, - or by omitting the ``. + Get anomaly detection jobs configuration info. You can get information for multiple + anomaly detection jobs in a single API request by using a group name, a comma-separated + list of jobs, or a wildcard expression. You can get information for all anomaly + detection jobs by using `_all`, by specifying `*` as the ``, or by omitting + the ``. ``_ @@ -1355,19 +1355,19 @@ async def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. The `overall_score` is calculated by combining the scores - of all the buckets within the overall bucket span. First, the maximum `anomaly_score` - per anomaly detection job in the overall bucket is calculated. Then the `top_n` - of those scores are averaged to result in the `overall_score`. This means that - you can fine-tune the `overall_score` so that it is more or less sensitive to - the number of jobs that detect an anomaly at the same time. For example, if you - set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall - bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` - is high only when all jobs detect anomalies in that overall bucket. If you set - the `bucket_span` parameter (to a value greater than its default), the `overall_score` - is the maximum `overall_score` of the overall buckets that have a span equal - to the jobs' largest bucket span. + Get overall bucket results. Retrievs overall bucket results that summarize the + bucket results of multiple anomaly detection jobs. The `overall_score` is calculated + by combining the scores of all the buckets within the overall bucket span. First, + the maximum `anomaly_score` per anomaly detection job in the overall bucket is + calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. + This means that you can fine-tune the `overall_score` so that it is more or less + sensitive to the number of jobs that detect an anomaly at the same time. For + example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket + score in the overall bucket. Alternatively, if you set `top_n` to the number + of jobs, the `overall_score` is high only when all jobs detect anomalies in that + overall bucket. If you set the `bucket_span` parameter (to a value greater than + its default), the `overall_score` is the maximum `overall_score` of the overall + buckets that have a span equal to the jobs' largest bucket span. ``_ @@ -1463,7 +1463,7 @@ async def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained model. + Get trained model configuration info. ``_ @@ -1545,9 +1545,9 @@ async def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained models. You can get usage information - for multiple trained models in a single API request by using a comma-separated - list of model IDs or a wildcard expression. + Get trained models usage info. You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or + a wildcard expression. ``_ @@ -1610,7 +1610,7 @@ async def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates a trained model. + Evaluate a trained model. ``_ @@ -1672,12 +1672,12 @@ async def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened in order - for it to be ready to receive and analyze data. It can be opened and closed multiple - times throughout its lifecycle. When you open a new job, it starts with an empty - model. When you open an existing job, the most recent model state is automatically - loaded. The job is ready to resume its analysis from where it left off, once - new data is received. + Open anomaly detection jobs. An anomaly detection job must be opened to be ready + to receive and analyze data. It can be opened and closed multiple times throughout + its lifecycle. When you open a new job, it starts with an empty model. When you + open an existing job, the most recent model state is automatically loaded. The + job is ready to resume its analysis from where it left off, once new data is + received. ``_ @@ -1731,7 +1731,7 @@ async def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds scheduled events to a calendar. + Add scheduled events to the calendar. ``_ @@ -1785,7 +1785,8 @@ async def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews the extracted features used by a data frame analytics config. + Preview features used by data frame analytics. Previews the extracted features + used by a data frame analytics config. ``_ @@ -1847,7 +1848,7 @@ async def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. This API returns the first "page" of search results from + Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When @@ -1931,7 +1932,7 @@ async def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a calendar. + Create a calendar. ``_ @@ -1985,7 +1986,7 @@ async def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds an anomaly detection job to a calendar. + Add anomaly detection job to calendar. ``_ @@ -2057,9 +2058,9 @@ async def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. This API creates a data frame analytics - job that performs an analysis on the source indices and stores the outcome in - a destination index. + Create a data frame analytics job. This API creates a data frame analytics job + that performs an analysis on the source indices and stores the outcome in a destination + index. ``_ @@ -2227,8 +2228,8 @@ async def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis - by an anomaly detection job. You can associate only one datafeed with each anomaly + Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by + an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed @@ -2390,9 +2391,9 @@ async def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. A filter contains a list of strings. It can be used by - one or more anomaly detection jobs. Specifically, filters are referenced in the - `custom_rules` property of detector configuration objects. + Create a filter. A filter contains a list of strings. It can be used by one or + more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` + property of detector configuration objects. ``_ @@ -2659,7 +2660,8 @@ async def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to supply a trained model that is not created by data frame analytics. + Create a trained model. Enable you to supply a trained model that is not created + by data frame analytics. ``_ @@ -2761,15 +2763,15 @@ async def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a trained model alias. A trained model alias is a logical - name used to reference a single trained model. You can use aliases instead of - trained model identifiers to make it easier to reference your models. For example, - you can use aliases in inference aggregations and processors. An alias must be - unique and refer to only a single trained model. However, you can have multiple - aliases for each trained model. If you use this API to update an alias such that - it references a different trained model ID and the model uses a different type - of data frame analytics, an error occurs. For example, this situation occurs - if you have a trained model for regression analysis and a trained model for classification + Create or update a trained model alias. A trained model alias is a logical name + used to reference a single trained model. You can use aliases instead of trained + model identifiers to make it easier to reference your models. For example, you + can use aliases in inference aggregations and processors. An alias must be unique + and refer to only a single trained model. However, you can have multiple aliases + for each trained model. If you use this API to update an alias such that it references + a different trained model ID and the model uses a different type of data frame + analytics, an error occurs. For example, this situation occurs if you have a + trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns @@ -2831,7 +2833,7 @@ async def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition. + Create part of a trained model definition. ``_ @@ -2908,7 +2910,7 @@ async def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary. This API is supported only for natural language + Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -2966,7 +2968,7 @@ async def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an anomaly detection job. All model state and results are deleted. The + Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. @@ -3018,16 +3020,16 @@ async def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. A data frame analytics job can be started - and stopped multiple times throughout its lifecycle. If the destination index - does not exist, it is created automatically the first time you start the data - frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` - settings for the destination index are copied from the source index. If there - are multiple source indices, the destination index copies the highest setting - values. The mappings for the destination index are also copied from the source - indices. If there are any mapping conflicts, the job fails to start. If the destination - index exists, it is used as is. You can therefore set up the destination index - in advance with custom settings and mappings. + Start a data frame analytics job. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. If the destination index does + not exist, it is created automatically the first time you start the data frame + analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings + for the destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. If + there are any mapping conflicts, the job fails to start. If the destination index + exists, it is used as is. You can therefore set up the destination index in advance + with custom settings and mappings. ``_ @@ -3079,17 +3081,17 @@ async def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. A datafeed must be started in order to retrieve - data from Elasticsearch. A datafeed can be started and stopped multiple times - throughout its lifecycle. Before you can start a datafeed, the anomaly detection - job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, - it continues processing input data from the next millisecond after it was stopped. - If new data was indexed for that exact millisecond between stopping and starting, - it will be ignored. When Elasticsearch security features are enabled, your datafeed - remembers which roles the last user to create or update it had at the time of - creation or update and runs the query using those same roles. If you provided - secondary authorization headers when you created or updated the datafeed, those - credentials are used instead. + Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. + Before you can start a datafeed, the anomaly detection job must be open. Otherwise, + an error occurs. If you restart a stopped datafeed, it continues processing input + data from the next millisecond after it was stopped. If new data was indexed + for that exact millisecond between stopping and starting, it will be ignored. + When Elasticsearch security features are enabled, your datafeed remembers which + roles the last user to create or update it had at the time of creation or update + and runs the query using those same roles. If you provided secondary authorization + headers when you created or updated the datafeed, those credentials are used + instead. ``_ @@ -3158,8 +3160,8 @@ async def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Start a trained model deployment. It allocates the model to every machine learning + node. ``_ @@ -3242,8 +3244,8 @@ async def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. A data frame analytics job can be - started and stopped multiple times throughout its lifecycle. + Stop data frame analytics jobs. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. ``_ @@ -3308,9 +3310,8 @@ async def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data - from Elasticsearch. A datafeed can be started and stopped multiple times throughout - its lifecycle. + Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. ``_ @@ -3372,7 +3373,7 @@ async def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops a trained model deployment. + Stop a trained model deployment. ``_ @@ -3437,7 +3438,7 @@ async def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an existing data frame analytics job. + Update a data frame analytics job. ``_ @@ -3545,11 +3546,11 @@ async def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the properties of a datafeed. You must stop and start the datafeed for - the changes to be applied. When Elasticsearch security features are enabled, - your datafeed remembers which roles the user who updated it had at the time of - the update and runs the query using those same roles. If you provide secondary - authorization headers, those credentials are used instead. + Update a datafeed. You must stop and start the datafeed for the changes to be + applied. When Elasticsearch security features are enabled, your datafeed remembers + which roles the user who updated it had at the time of the update and runs the + query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. ``_ @@ -3712,7 +3713,8 @@ async def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items from the list. + Update a filter. Updates the description of a filter, adds items, or removes + items from the list. ``_ @@ -3802,7 +3804,8 @@ async def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of an anomaly detection job. + Update an anomaly detection job. Updates certain properties of an anomaly detection + job. ``_ @@ -3928,8 +3931,7 @@ async def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Update a trained model deployment. ``_ diff --git a/elasticsearch_serverless/_async/client/query_rules.py b/elasticsearch_serverless/_async/client/query_rules.py index f371704..f37fbf4 100644 --- a/elasticsearch_serverless/_async/client/query_rules.py +++ b/elasticsearch_serverless/_async/client/query_rules.py @@ -262,7 +262,7 @@ async def put_rule( criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, - type: t.Optional[t.Union[str, t.Literal["pinned"]]] = None, + type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch_serverless/_async/client/search_application.py b/elasticsearch_serverless/_async/client/search_application.py index 73fc368..f35714b 100644 --- a/elasticsearch_serverless/_async/client/search_application.py +++ b/elasticsearch_serverless/_async/client/search_application.py @@ -36,7 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a search application. + Delete a search application. Remove a search application and its associated alias. + Indices attached to the search application are not removed. ``_ @@ -76,7 +77,8 @@ async def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. + Delete a behavioral analytics collection. The associated data stream is also + deleted. ``_ @@ -116,7 +118,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application + Get search application details. ``_ @@ -156,7 +158,7 @@ async def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing behavioral analytics collections. + Get behavioral analytics collections. ``_ @@ -254,7 +256,7 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a search application. + Create or update a search application. ``_ @@ -307,7 +309,7 @@ async def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a behavioral analytics collection. + Create a behavioral analytics collection. ``_ @@ -353,7 +355,10 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application. + Run a search application search. Generate and run an Elasticsearch query that + uses the specified query parameteter and the search template associated with + the search application or default template. Unspecified template parameters are + assigned their default values if applicable. ``_ diff --git a/elasticsearch_serverless/_async/client/sql.py b/elasticsearch_serverless/_async/client/sql.py index c089ed4..a376d52 100644 --- a/elasticsearch_serverless/_async/client/sql.py +++ b/elasticsearch_serverless/_async/client/sql.py @@ -251,7 +251,11 @@ async def query( field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] + ] + ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, diff --git a/elasticsearch_serverless/_async/client/synonyms.py b/elasticsearch_serverless/_async/client/synonyms.py index 0f6ec91..153c552 100644 --- a/elasticsearch_serverless/_async/client/synonyms.py +++ b/elasticsearch_serverless/_async/client/synonyms.py @@ -262,7 +262,9 @@ async def put_synonym( self, *, id: str, - synonyms_set: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + synonyms_set: t.Optional[ + t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch_serverless/_sync/client/__init__.py b/elasticsearch_serverless/_sync/client/__init__.py index 40639a4..8065532 100644 --- a/elasticsearch_serverless/_sync/client/__init__.py +++ b/elasticsearch_serverless/_sync/client/__init__.py @@ -2269,6 +2269,7 @@ def msearch( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, @@ -2302,6 +2303,13 @@ def msearch( when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests @@ -2351,6 +2359,8 @@ def msearch( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: @@ -2583,7 +2593,9 @@ def mtermvectors( path_parts=__path_parts, ) - @_rewrite_parameters() + @_rewrite_parameters( + body_fields=("index_filter",), + ) def open_point_in_time( self, *, @@ -2601,9 +2613,11 @@ def open_point_in_time( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + index_filter: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ A search request by default executes against the most recent visible data of @@ -2625,17 +2639,20 @@ def open_point_in_time( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param index_filter: Allows to filter indices if the provided query rewrites + to `match_none` on every shard. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param routing: Custom value used to route operations to a specific shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") - if keep_alive is None: + if keep_alive is None and body is None: raise ValueError("Empty value passed for parameter 'keep_alive'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_pit' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive if error_trace is not None: @@ -2654,12 +2671,20 @@ def open_point_in_time( __query["pretty"] = pretty if routing is not None: __query["routing"] = routing + if not __body: + if index_filter is not None: + __body["index_filter"] = index_filter + if not __body: + __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, + body=__body, endpoint_id="open_point_in_time", path_parts=__path_parts, ) @@ -3219,6 +3244,7 @@ def search( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -3346,6 +3372,13 @@ def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param indices_boost: Boosts the _score of documents from specified indices. :param knn: Defines the approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text @@ -3527,6 +3560,8 @@ def search( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: @@ -4387,6 +4422,7 @@ def update_by_query( pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, + q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, @@ -4453,6 +4489,7 @@ def update_by_query( parameter. :param preference: Specifies the node or shard the operation should be performed on. Random by default. + :param q: Query in the Lucene query string syntax. :param query: Specifies the documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. @@ -4537,6 +4574,8 @@ def update_by_query( __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty + if q is not None: + __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: diff --git a/elasticsearch_serverless/_sync/client/cat.py b/elasticsearch_serverless/_sync/client/cat.py index f5ae4bf..ebecec3 100644 --- a/elasticsearch_serverless/_sync/client/cat.py +++ b/elasticsearch_serverless/_sync/client/cat.py @@ -223,7 +223,7 @@ def count( ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get a document count. Provides quick access to a document count for a data stream, - an index, or an entire cluster.n/ The document count only includes live documents, + an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, diff --git a/elasticsearch_serverless/_sync/client/cluster.py b/elasticsearch_serverless/_sync/client/cluster.py index 400f290..23d7318 100644 --- a/elasticsearch_serverless/_sync/client/cluster.py +++ b/elasticsearch_serverless/_sync/client/cluster.py @@ -284,7 +284,7 @@ def put_component_template( ``_ :param name: Name of the component template to create. Elasticsearch includes - the following built-in component templates: `logs-mappings`; 'logs-settings`; + the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, diff --git a/elasticsearch_serverless/_sync/client/esql.py b/elasticsearch_serverless/_sync/client/esql.py index 8442238..19d8c71 100644 --- a/elasticsearch_serverless/_sync/client/esql.py +++ b/elasticsearch_serverless/_sync/client/esql.py @@ -47,7 +47,14 @@ def query( error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ diff --git a/elasticsearch_serverless/_sync/client/indices.py b/elasticsearch_serverless/_sync/client/indices.py index e7ff2f2..015a524 100644 --- a/elasticsearch_serverless/_sync/client/indices.py +++ b/elasticsearch_serverless/_sync/client/indices.py @@ -303,7 +303,9 @@ def create_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create a data stream. Creates a data stream. You must have a matching index template @@ -316,6 +318,11 @@ def create_data_stream( `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -328,8 +335,12 @@ def create_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", @@ -340,63 +351,6 @@ def create_data_stream( path_parts=__path_parts, ) - @_rewrite_parameters() - def data_streams_stats( - self, - *, - name: t.Optional[str] = None, - error_trace: t.Optional[bool] = None, - expand_wildcards: t.Optional[ - t.Union[ - t.Sequence[ - t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] - ], - t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], - ] - ] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - ) -> ObjectApiResponse[t.Any]: - """ - Get data stream stats. Retrieves statistics for one or more data streams. - - ``_ - - :param name: Comma-separated list of data streams used to limit the request. - Wildcard expressions (`*`) are supported. To target all data streams in a - cluster, omit this parameter or use `*`. - :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. - """ - __path_parts: t.Dict[str, str] - if name not in SKIP_IN_PATH: - __path_parts = {"name": _quote(name)} - __path = f'/_data_stream/{__path_parts["name"]}/_stats' - else: - __path_parts = {} - __path = "/_data_stream/_stats" - __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if expand_wildcards is not None: - __query["expand_wildcards"] = expand_wildcards - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - __headers = {"accept": "application/json"} - return self.perform_request( # type: ignore[return-value] - "GET", - __path, - params=__query, - headers=__headers, - endpoint_id="indices.data_streams_stats", - path_parts=__path_parts, - ) - @_rewrite_parameters() def delete( self, @@ -611,6 +565,7 @@ def delete_data_stream( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -622,6 +577,9 @@ def delete_data_stream( are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -636,6 +594,8 @@ def delete_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1162,6 +1122,7 @@ def get_data_lifecycle( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1177,6 +1138,9 @@ def get_data_lifecycle( Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param include_defaults: If `true`, return all default settings in the response. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1193,6 +1157,8 @@ def get_data_lifecycle( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1222,7 +1188,9 @@ def get_data_stream( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Get data streams. Retrieves information about one or more data streams. @@ -1236,6 +1204,11 @@ def get_data_stream( Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param verbose: Whether the maximum timestamp for each data stream should be + calculated and returned. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -1255,8 +1228,12 @@ def get_data_stream( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if verbose is not None: + __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -1522,7 +1499,9 @@ def migrate_to_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Convert an index alias to a data stream. Converts an index alias to a data stream. @@ -1537,6 +1516,11 @@ def migrate_to_data_stream( ``_ :param name: Name of the index alias to convert to a data stream. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1549,8 +1533,12 @@ def migrate_to_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -2383,6 +2371,7 @@ def resolve_index( self, *, name: t.Union[str, t.Sequence[str]], + allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2394,6 +2383,7 @@ def resolve_index( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2405,16 +2395,25 @@ def resolve_index( :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_resolve/index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2423,6 +2422,8 @@ def resolve_index( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch_serverless/_sync/client/ingest.py b/elasticsearch_serverless/_sync/client/ingest.py index 583e928..c91b102 100644 --- a/elasticsearch_serverless/_sync/client/ingest.py +++ b/elasticsearch_serverless/_sync/client/ingest.py @@ -171,13 +171,21 @@ def processor_grok( ) @_rewrite_parameters( - body_fields=("description", "meta", "on_failure", "processors", "version"), + body_fields=( + "deprecated", + "description", + "meta", + "on_failure", + "processors", + "version", + ), parameter_aliases={"_meta": "meta"}, ) def put_pipeline( self, *, id: str, + deprecated: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -199,6 +207,10 @@ def put_pipeline( ``_ :param id: ID of the ingest pipeline to create or update. + :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated + ingest pipeline is referenced as the default or final pipeline when creating + or updating a non-deprecated index template, Elasticsearch will emit a deprecation + warning. :param description: Description of the ingest pipeline. :param if_version: Required version for optimistic concurrency control for pipeline updates @@ -242,6 +254,8 @@ def put_pipeline( if timeout is not None: __query["timeout"] = timeout if not __body: + if deprecated is not None: + __body["deprecated"] = deprecated if description is not None: __body["description"] = description if meta is not None: @@ -269,8 +283,8 @@ def put_pipeline( def simulate( self, *, - id: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -284,15 +298,17 @@ def simulate( ``_ + :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. - :param docs: Sample documents to test in the pipeline. :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor in the executed pipeline. """ + if docs is None and body is None: + raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} diff --git a/elasticsearch_serverless/_sync/client/ml.py b/elasticsearch_serverless/_sync/client/ml.py index 14070dd..1e4afac 100644 --- a/elasticsearch_serverless/_sync/client/ml.py +++ b/elasticsearch_serverless/_sync/client/ml.py @@ -688,14 +688,14 @@ def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. The flush jobs API is only - applicable when sending data for analysis using the post data API. Depending - on the content of the buffer, then it might additionally calculate new results. - Both flush and close operations are similar, however the flush is more efficient - if you are expecting to send more data for analysis. When flushing, the job remains - open and is available to continue analyzing data. A close operation additionally - prunes and persists the model state to disk and the job must be opened again - before analyzing further data. + Force buffered data to be processed. The flush jobs API is only applicable when + sending data for analysis using the post data API. Depending on the content of + the buffer, then it might additionally calculate new results. Both flush and + close operations are similar, however the flush is more efficient if you are + expecting to send more data for analysis. When flushing, the job remains open + and is available to continue analyzing data. A close operation additionally prunes + and persists the model state to disk and the job must be opened again before + analyzing further data. ``_ @@ -764,7 +764,7 @@ def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the scheduled events in calendars. + Get info about events in calendars. ``_ @@ -830,7 +830,7 @@ def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for calendars. + Get calendar configuration info. ``_ @@ -900,9 +900,9 @@ def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. You can get - information for multiple data frame analytics jobs in a single API request by - using a comma-separated list of data frame analytics jobs or a wildcard expression. + Get data frame analytics job configuration info. You can get information for + multiple data frame analytics jobs in a single API request by using a comma-separated + list of data frame analytics jobs or a wildcard expression. ``_ @@ -974,7 +974,7 @@ def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for data frame analytics jobs. + Get data frame analytics jobs usage info. ``_ @@ -1039,12 +1039,12 @@ def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. You can get statistics for multiple - datafeeds in a single API request by using a comma-separated list of datafeeds - or a wildcard expression. You can get statistics for all datafeeds by using `_all`, - by specifying `*` as the ``, or by omitting the ``. If the - datafeed is stopped, the only information you receive is the `datafeed_id` and - the `state`. This API returns a maximum of 10,000 datafeeds. + Get datafeeds usage info. You can get statistics for multiple datafeeds in a + single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get statistics for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. If the datafeed is stopped, + the only information you receive is the `datafeed_id` and the `state`. This API + returns a maximum of 10,000 datafeeds. ``_ @@ -1100,11 +1100,11 @@ def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. You can get information for - multiple datafeeds in a single API request by using a comma-separated list of - datafeeds or a wildcard expression. You can get information for all datafeeds - by using `_all`, by specifying `*` as the ``, or by omitting the ``. - This API returns a maximum of 10,000 datafeeds. + Get datafeeds configuration info. You can get information for multiple datafeeds + in a single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get information for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. This API returns a maximum + of 10,000 datafeeds. ``_ @@ -1167,7 +1167,7 @@ def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. You can get a single filter or all filters. + Get filters. You can get a single filter or all filters. ``_ @@ -1217,7 +1217,7 @@ def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection jobs. + Get anomaly detection jobs usage info. ``_ @@ -1274,11 +1274,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. You can get information - for multiple anomaly detection jobs in a single API request by using a group - name, a comma-separated list of jobs, or a wildcard expression. You can get information - for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, - or by omitting the ``. + Get anomaly detection jobs configuration info. You can get information for multiple + anomaly detection jobs in a single API request by using a group name, a comma-separated + list of jobs, or a wildcard expression. You can get information for all anomaly + detection jobs by using `_all`, by specifying `*` as the ``, or by omitting + the ``. ``_ @@ -1355,19 +1355,19 @@ def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. The `overall_score` is calculated by combining the scores - of all the buckets within the overall bucket span. First, the maximum `anomaly_score` - per anomaly detection job in the overall bucket is calculated. Then the `top_n` - of those scores are averaged to result in the `overall_score`. This means that - you can fine-tune the `overall_score` so that it is more or less sensitive to - the number of jobs that detect an anomaly at the same time. For example, if you - set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall - bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` - is high only when all jobs detect anomalies in that overall bucket. If you set - the `bucket_span` parameter (to a value greater than its default), the `overall_score` - is the maximum `overall_score` of the overall buckets that have a span equal - to the jobs' largest bucket span. + Get overall bucket results. Retrievs overall bucket results that summarize the + bucket results of multiple anomaly detection jobs. The `overall_score` is calculated + by combining the scores of all the buckets within the overall bucket span. First, + the maximum `anomaly_score` per anomaly detection job in the overall bucket is + calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. + This means that you can fine-tune the `overall_score` so that it is more or less + sensitive to the number of jobs that detect an anomaly at the same time. For + example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket + score in the overall bucket. Alternatively, if you set `top_n` to the number + of jobs, the `overall_score` is high only when all jobs detect anomalies in that + overall bucket. If you set the `bucket_span` parameter (to a value greater than + its default), the `overall_score` is the maximum `overall_score` of the overall + buckets that have a span equal to the jobs' largest bucket span. ``_ @@ -1463,7 +1463,7 @@ def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained model. + Get trained model configuration info. ``_ @@ -1545,9 +1545,9 @@ def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained models. You can get usage information - for multiple trained models in a single API request by using a comma-separated - list of model IDs or a wildcard expression. + Get trained models usage info. You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or + a wildcard expression. ``_ @@ -1610,7 +1610,7 @@ def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates a trained model. + Evaluate a trained model. ``_ @@ -1672,12 +1672,12 @@ def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened in order - for it to be ready to receive and analyze data. It can be opened and closed multiple - times throughout its lifecycle. When you open a new job, it starts with an empty - model. When you open an existing job, the most recent model state is automatically - loaded. The job is ready to resume its analysis from where it left off, once - new data is received. + Open anomaly detection jobs. An anomaly detection job must be opened to be ready + to receive and analyze data. It can be opened and closed multiple times throughout + its lifecycle. When you open a new job, it starts with an empty model. When you + open an existing job, the most recent model state is automatically loaded. The + job is ready to resume its analysis from where it left off, once new data is + received. ``_ @@ -1731,7 +1731,7 @@ def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds scheduled events to a calendar. + Add scheduled events to the calendar. ``_ @@ -1785,7 +1785,8 @@ def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews the extracted features used by a data frame analytics config. + Preview features used by data frame analytics. Previews the extracted features + used by a data frame analytics config. ``_ @@ -1847,7 +1848,7 @@ def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. This API returns the first "page" of search results from + Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When @@ -1931,7 +1932,7 @@ def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a calendar. + Create a calendar. ``_ @@ -1985,7 +1986,7 @@ def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds an anomaly detection job to a calendar. + Add anomaly detection job to calendar. ``_ @@ -2057,9 +2058,9 @@ def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. This API creates a data frame analytics - job that performs an analysis on the source indices and stores the outcome in - a destination index. + Create a data frame analytics job. This API creates a data frame analytics job + that performs an analysis on the source indices and stores the outcome in a destination + index. ``_ @@ -2227,8 +2228,8 @@ def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis - by an anomaly detection job. You can associate only one datafeed with each anomaly + Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by + an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed @@ -2390,9 +2391,9 @@ def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. A filter contains a list of strings. It can be used by - one or more anomaly detection jobs. Specifically, filters are referenced in the - `custom_rules` property of detector configuration objects. + Create a filter. A filter contains a list of strings. It can be used by one or + more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` + property of detector configuration objects. ``_ @@ -2659,7 +2660,8 @@ def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to supply a trained model that is not created by data frame analytics. + Create a trained model. Enable you to supply a trained model that is not created + by data frame analytics. ``_ @@ -2761,15 +2763,15 @@ def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a trained model alias. A trained model alias is a logical - name used to reference a single trained model. You can use aliases instead of - trained model identifiers to make it easier to reference your models. For example, - you can use aliases in inference aggregations and processors. An alias must be - unique and refer to only a single trained model. However, you can have multiple - aliases for each trained model. If you use this API to update an alias such that - it references a different trained model ID and the model uses a different type - of data frame analytics, an error occurs. For example, this situation occurs - if you have a trained model for regression analysis and a trained model for classification + Create or update a trained model alias. A trained model alias is a logical name + used to reference a single trained model. You can use aliases instead of trained + model identifiers to make it easier to reference your models. For example, you + can use aliases in inference aggregations and processors. An alias must be unique + and refer to only a single trained model. However, you can have multiple aliases + for each trained model. If you use this API to update an alias such that it references + a different trained model ID and the model uses a different type of data frame + analytics, an error occurs. For example, this situation occurs if you have a + trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns @@ -2831,7 +2833,7 @@ def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition. + Create part of a trained model definition. ``_ @@ -2908,7 +2910,7 @@ def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary. This API is supported only for natural language + Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -2966,7 +2968,7 @@ def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an anomaly detection job. All model state and results are deleted. The + Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. @@ -3018,16 +3020,16 @@ def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. A data frame analytics job can be started - and stopped multiple times throughout its lifecycle. If the destination index - does not exist, it is created automatically the first time you start the data - frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` - settings for the destination index are copied from the source index. If there - are multiple source indices, the destination index copies the highest setting - values. The mappings for the destination index are also copied from the source - indices. If there are any mapping conflicts, the job fails to start. If the destination - index exists, it is used as is. You can therefore set up the destination index - in advance with custom settings and mappings. + Start a data frame analytics job. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. If the destination index does + not exist, it is created automatically the first time you start the data frame + analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings + for the destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. If + there are any mapping conflicts, the job fails to start. If the destination index + exists, it is used as is. You can therefore set up the destination index in advance + with custom settings and mappings. ``_ @@ -3079,17 +3081,17 @@ def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. A datafeed must be started in order to retrieve - data from Elasticsearch. A datafeed can be started and stopped multiple times - throughout its lifecycle. Before you can start a datafeed, the anomaly detection - job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, - it continues processing input data from the next millisecond after it was stopped. - If new data was indexed for that exact millisecond between stopping and starting, - it will be ignored. When Elasticsearch security features are enabled, your datafeed - remembers which roles the last user to create or update it had at the time of - creation or update and runs the query using those same roles. If you provided - secondary authorization headers when you created or updated the datafeed, those - credentials are used instead. + Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. + Before you can start a datafeed, the anomaly detection job must be open. Otherwise, + an error occurs. If you restart a stopped datafeed, it continues processing input + data from the next millisecond after it was stopped. If new data was indexed + for that exact millisecond between stopping and starting, it will be ignored. + When Elasticsearch security features are enabled, your datafeed remembers which + roles the last user to create or update it had at the time of creation or update + and runs the query using those same roles. If you provided secondary authorization + headers when you created or updated the datafeed, those credentials are used + instead. ``_ @@ -3158,8 +3160,8 @@ def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Start a trained model deployment. It allocates the model to every machine learning + node. ``_ @@ -3242,8 +3244,8 @@ def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. A data frame analytics job can be - started and stopped multiple times throughout its lifecycle. + Stop data frame analytics jobs. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. ``_ @@ -3308,9 +3310,8 @@ def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data - from Elasticsearch. A datafeed can be started and stopped multiple times throughout - its lifecycle. + Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. ``_ @@ -3372,7 +3373,7 @@ def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops a trained model deployment. + Stop a trained model deployment. ``_ @@ -3437,7 +3438,7 @@ def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an existing data frame analytics job. + Update a data frame analytics job. ``_ @@ -3545,11 +3546,11 @@ def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the properties of a datafeed. You must stop and start the datafeed for - the changes to be applied. When Elasticsearch security features are enabled, - your datafeed remembers which roles the user who updated it had at the time of - the update and runs the query using those same roles. If you provide secondary - authorization headers, those credentials are used instead. + Update a datafeed. You must stop and start the datafeed for the changes to be + applied. When Elasticsearch security features are enabled, your datafeed remembers + which roles the user who updated it had at the time of the update and runs the + query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. ``_ @@ -3712,7 +3713,8 @@ def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items from the list. + Update a filter. Updates the description of a filter, adds items, or removes + items from the list. ``_ @@ -3802,7 +3804,8 @@ def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of an anomaly detection job. + Update an anomaly detection job. Updates certain properties of an anomaly detection + job. ``_ @@ -3928,8 +3931,7 @@ def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Update a trained model deployment. ``_ diff --git a/elasticsearch_serverless/_sync/client/query_rules.py b/elasticsearch_serverless/_sync/client/query_rules.py index 06dda16..72150e9 100644 --- a/elasticsearch_serverless/_sync/client/query_rules.py +++ b/elasticsearch_serverless/_sync/client/query_rules.py @@ -262,7 +262,7 @@ def put_rule( criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, - type: t.Optional[t.Union[str, t.Literal["pinned"]]] = None, + type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch_serverless/_sync/client/search_application.py b/elasticsearch_serverless/_sync/client/search_application.py index e6483fe..69a186e 100644 --- a/elasticsearch_serverless/_sync/client/search_application.py +++ b/elasticsearch_serverless/_sync/client/search_application.py @@ -36,7 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a search application. + Delete a search application. Remove a search application and its associated alias. + Indices attached to the search application are not removed. ``_ @@ -76,7 +77,8 @@ def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. + Delete a behavioral analytics collection. The associated data stream is also + deleted. ``_ @@ -116,7 +118,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application + Get search application details. ``_ @@ -156,7 +158,7 @@ def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing behavioral analytics collections. + Get behavioral analytics collections. ``_ @@ -254,7 +256,7 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a search application. + Create or update a search application. ``_ @@ -307,7 +309,7 @@ def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a behavioral analytics collection. + Create a behavioral analytics collection. ``_ @@ -353,7 +355,10 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application. + Run a search application search. Generate and run an Elasticsearch query that + uses the specified query parameteter and the search template associated with + the search application or default template. Unspecified template parameters are + assigned their default values if applicable. ``_ diff --git a/elasticsearch_serverless/_sync/client/sql.py b/elasticsearch_serverless/_sync/client/sql.py index 3635bfc..bd8afc0 100644 --- a/elasticsearch_serverless/_sync/client/sql.py +++ b/elasticsearch_serverless/_sync/client/sql.py @@ -251,7 +251,11 @@ def query( field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] + ] + ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, diff --git a/elasticsearch_serverless/_sync/client/synonyms.py b/elasticsearch_serverless/_sync/client/synonyms.py index 2455c3f..9e2b66e 100644 --- a/elasticsearch_serverless/_sync/client/synonyms.py +++ b/elasticsearch_serverless/_sync/client/synonyms.py @@ -262,7 +262,9 @@ def put_synonym( self, *, id: str, - synonyms_set: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + synonyms_set: t.Optional[ + t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None,