diff --git a/elasticsearch_serverless/_async/client/__init__.py b/elasticsearch_serverless/_async/client/__init__.py index 8eb78fa..3031fef 100644 --- a/elasticsearch_serverless/_async/client/__init__.py +++ b/elasticsearch_serverless/_async/client/__init__.py @@ -2126,9 +2126,9 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" diff --git a/elasticsearch_serverless/_async/client/cluster.py b/elasticsearch_serverless/_async/client/cluster.py index 7dab99d..1b7a004 100644 --- a/elasticsearch_serverless/_async/client/cluster.py +++ b/elasticsearch_serverless/_async/client/cluster.py @@ -38,9 +38,8 @@ async def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Deletes component templates. Component templates - are building blocks for constructing index templates that specify index mappings, - settings, and aliases. + Delete component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -148,7 +147,7 @@ async def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Retrieves information about component templates. + Get component templates. Get information about component templates. ``_ @@ -266,20 +265,21 @@ async def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Creates or updates a component template. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. An index template can be composed - of multiple component templates. To use a component template, specify it in an - index template’s `composed_of` list. Component templates are only applied to - new data streams and indices as part of a matching index template. Settings and - mappings specified directly in the index template or the create index request - override any settings or mappings specified in a component template. Component - templates are only used during index creation. For data streams, this includes - data stream creation and the creation of a stream’s backing indices. Changes - to component templates do not affect existing indices, including a stream’s backing - indices. You can use C-style `/* *\\/` block comments in component templates. + Create or update a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. You can include comments anywhere in the request body except before the opening - curly bracket. + curly bracket. **Applying component templates** You cannot directly apply a component + template to a data stream or index. To be applied, a component template must + be included in an index template's `composed_of` list. ``_ @@ -302,8 +302,8 @@ async def put_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the component template. May have any - contents. This map is not automatically generated by Elasticsearch. This + :param meta: Optional user metadata about the component template. It may have + any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. diff --git a/elasticsearch_serverless/_async/client/enrich.py b/elasticsearch_serverless/_async/client/enrich.py index 33de86b..b544bf0 100644 --- a/elasticsearch_serverless/_async/client/enrich.py +++ b/elasticsearch_serverless/_async/client/enrich.py @@ -33,6 +33,7 @@ async def delete_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -41,6 +42,7 @@ async def delete_policy( ``_ :param name: Enrich policy to delete. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -53,6 +55,8 @@ async def delete_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -73,6 +77,7 @@ async def execute_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -82,6 +87,7 @@ async def execute_policy( ``_ :param name: Enrich policy to execute. + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ @@ -96,6 +102,8 @@ async def execute_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: @@ -118,6 +126,7 @@ async def get_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -127,6 +136,7 @@ async def get_policy( :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -142,6 +152,8 @@ async def get_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -165,6 +177,7 @@ async def put_policy( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, @@ -178,6 +191,7 @@ async def put_policy( :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. + :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. @@ -194,6 +208,8 @@ async def put_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch_serverless/_async/client/indices.py b/elasticsearch_serverless/_async/client/indices.py index 218c5ab..7cc94d4 100644 --- a/elasticsearch_serverless/_async/client/indices.py +++ b/elasticsearch_serverless/_async/client/indices.py @@ -137,8 +137,12 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) - on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs analysis on a text string + and returns the resulting tokens. Generating excessive amount of tokens may cause + a node to run out of memory. The `index.analyze.max_token_count` setting enables + you to limit the number of tokens that can be produced. If more than this limit + of tokens gets generated, an error occurs. The `_analyze` endpoint without a + specified index will always use `10000` as its limit. ``_ @@ -236,7 +240,26 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. Creates a new index. + Create an index. You can use the create index API to add a new index to an Elasticsearch + cluster. When creating an index, you can specify the following: * Settings for + the index. * Mappings for fields in the index. * Index aliases **Wait for active + shards** By default, index creation will only return a response to the client + when the primary copies of each shard have been started, or the request times + out. The index creation response will indicate what happened. For example, `acknowledged` + indicates whether the index was successfully created in the cluster, `while shards_acknowledged` + indicates whether the requisite number of shard copies were started for each + shard in the index before timing out. Note that it is still possible for either + `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation + to be successful. These values simply indicate whether the operation completed + before the timeout. If `acknowledged` is false, the request timed out before + the cluster state was updated with the newly created index, but it probably will + be created sometime soon. If `shards_acknowledged` is false, then the request + timed out before the requisite number of shards were started (by default just + the primaries), even if the cluster state was successfully updated to reflect + the newly created index (that is to say, `acknowledged` is `true`). You can change + the default of only waiting for the primary shards to start through the index + setting `index.write.wait_for_active_shards`. Note that changing this setting + will also affect the `wait_for_active_shards` value on all subsequent write operations. ``_ @@ -375,7 +398,11 @@ async def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deletes one or more indices. + Delete indices. Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, + or dashboards. You cannot delete the current write index of a data stream. To + delete the index, you must roll over the data stream so a new write index is + created. You can then use the delete index API to delete the previous write index. ``_ @@ -447,7 +474,7 @@ async def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -626,8 +653,7 @@ async def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Checks if one or more indices, index aliases, or data streams - exist. + Check indices. Check if one or more indices, index aliases, or data streams exist. ``_ @@ -894,8 +920,8 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Returns information about one or more indices. For data - streams, the API returns information about the stream’s backing indices. + Get index information. Get information about one or more indices. For data streams, + the API returns information about the stream’s backing indices. ``_ @@ -986,8 +1012,6 @@ async def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ - :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -1200,7 +1224,7 @@ async def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Returns information about one or more index templates. + Get index templates. Get information about one or more index templates. ``_ @@ -1273,8 +1297,8 @@ async def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more indices. - For data streams, the API retrieves mappings for the stream’s backing indices. + Get mapping definitions. For data streams, the API retrieves mappings for the + stream’s backing indices. ``_ @@ -1358,8 +1382,8 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Returns setting information for one or more indices. For - data streams, returns setting information for the stream’s backing indices. + Get index settings. Get setting information for one or more indices. For data + streams, it returns setting information for the stream's backing indices. ``_ @@ -1762,7 +1786,33 @@ async def put_index_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an wildcard pattern that matches the index + name. Index templates are applied during data stream or index creation. For data + streams, these settings and mappings are applied when the stream's backing indices + are created. Settings and mappings specified in a create index API request override + any settings or mappings specified in an index template. Changes to index templates + do not affect existing indices, including the existing backing indices of a data + stream. You can use C-style `/* *\\/` block comments in index templates. You + can include comments anywhere in the request body, except before the opening + curly bracket. **Multiple matching templates** If multiple index templates match + the name of a new index or data stream, the template with the highest priority + is used. Multiple templates with overlapping index patterns at the same priority + are not allowed and an error will be thrown when attempting to create a template + matching an existing index template at identical priorities. **Composing aliases, + mappings, and settings** When multiple component templates are specified in the + `composed_of` field for an index template, they are merged in the order specified, + meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged + in next. Finally, any configuration on the index request itself is merged. Mapping + definitions are merged recursively, which means that later mapping components + can introduce new field mappings and update the mapping configuration. If a field + mapping is already contained in an earlier component, its definition will be + completely overwritten by the later one. This recursive merging strategy applies + not only to field mappings, but also root options like `dynamic_templates` and + `meta`. If an earlier component contains a `dynamic_templates` block, then by + default new `dynamic_templates` entries are appended onto the end. If an entry + already exists with the same key, then it is overwritten by the new definition. ``_ @@ -1792,8 +1842,11 @@ async def put_index_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the index template. May have any contents. - This map is not automatically generated by Elasticsearch. + :param meta: Optional user metadata about the index template. It may have any + contents. It is not automatically generated or used by Elasticsearch. This + user-defined object is stored in the cluster state, so keeping it short is + preferable To unset the metadata, replace the template without specifying + it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though @@ -1802,7 +1855,9 @@ async def put_index_template( :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. External systems + can use these version numbers to simplify template management. To unset a + version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1921,9 +1976,27 @@ async def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Adds new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields. For data - streams, these changes are applied to all backing indices by default. + Update field mappings. Add new fields to an existing data stream or index. You + can also use this API to change the search settings of existing fields and add + new properties to existing object fields. For data streams, these changes are + applied to all backing indices by default. **Add multi-fields to an existing + field** Multi-fields let you index the same field in different ways. You can + use this API to update the fields mapping parameter and enable multi-fields for + an existing field. WARNING: If an index (or data stream) contains documents when + you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API. **Change supported + mapping parameters for an existing field** The documentation for each mapping + parameter indicates whether you can update it for an existing field using this + API. For example, you can use the update mapping API to update the `ignore_above` + parameter. **Change the mapping of an existing field** Except for supported mapping + parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed. If you + need to change the mapping of a field in a data stream's backing indices, refer + to documentation about modifying data streams. If you need to change the mapping + of a field in other indices, create a new index with the correct mapping and + reindex your data into that index. **Rename a field** Renaming a field would + invalidate data already indexed under the old field name. Instead, add an alias + field to create an alternate field name. ``_ @@ -2054,6 +2127,19 @@ async def put_settings( """ Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + To revert a setting to the default value, use a null value. The list of per-index + settings that can be updated dynamically on live indices can be found in index + module documentation. To preserve existing settings from being updated, set the + `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers + on closed indices. To add an analyzer, you must close the index, define the analyzer, + and reopen the index. You cannot close the write index of a data stream. To update + the analyzer for a data stream's write index and future backing indices, update + the analyzer in the index template used by the stream. Then roll over the data + stream to apply the new analyzer to the stream's write index and future backing + indices. This affects searches and any new data added to the stream after the + rollover. However, it does not affect the data stream's backing indices or their + existing data. To change the analyzer for existing backing indices, you must + create a new data stream and reindex your data into it. ``_ @@ -2150,7 +2236,17 @@ async def refresh( """ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. + on the stream’s backing indices. By default, Elasticsearch periodically refreshes + indices every second, but only on indices that have received one search request + or more in the last 30 seconds. You can change this default interval with the + `index.refresh_interval` setting. Refresh requests are synchronous and do not + return a response until the refresh operation completes. Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's + periodic refresh rather than performing an explicit refresh when possible. If + your application workflow indexes documents and then runs a search to retrieve + the indexed document, it's recommended to use the index API's `refresh=wait_for` + query parameter option. This option ensures the indexing operation waits for + a periodic refresh before running the search. ``_ @@ -2294,7 +2390,33 @@ async def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. Creates a new index for a data stream or index alias. + Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover + action to automate rollovers. The rollover API creates a new index for a data + stream or index alias. The API behavior depends on the rollover target. **Roll + over a data stream** If you roll over a data stream, the API creates a new write + index for the stream. The stream's previous write index becomes a regular backing + index. A rollover also increments the data stream's generation. **Roll over an + index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically + use an index alias with a write index to manage time series data. Data streams + replace this functionality, require less maintenance, and automatically integrate + with data tiers. If an index alias points to multiple indices, one of the indices + must be a write index. The rollover API creates a new write index for the alias + with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` + for the previous write index. **Roll over an index alias with one index** If + you roll over an index alias that points to only one index, the API creates a + new index for the alias and removes the original index from the alias. NOTE: + A rollover creates a new index and is subject to the `wait_for_active_shards` + setting. **Increment index names for an alias** When you roll over an index alias, + you can specify a name for the new index. If you don't specify a name and the + current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, + the new index name increments that number. For example, if you roll over an alias + with a current index of `my-index-000001`, the rollover creates a new index named + `my-index-000002`. This number is always six characters and zero-padded, regardless + of the previous index's name. If you use an index alias for time series data, + you can use date math in the index name to track the rollover date. For example, + you can create an alias that points to an index named ``. + If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. ``_ @@ -2390,8 +2512,8 @@ async def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Returns the index configuration that would be applied to the - specified index from an existing index template. + Simulate an index. Get the index configuration that would be applied to the specified + index from an existing index template. ``_ @@ -2468,7 +2590,7 @@ async def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Returns the index configuration that would be applied + Simulate an index template. Get the index configuration that would be applied by a particular index template. ``_ diff --git a/elasticsearch_serverless/_async/client/logstash.py b/elasticsearch_serverless/_async/client/logstash.py index 25e9f82..406aea3 100644 --- a/elasticsearch_serverless/_async/client/logstash.py +++ b/elasticsearch_serverless/_async/client/logstash.py @@ -37,7 +37,8 @@ async def delete_pipeline( ) -> ObjectApiResponse[t.Any]: """ Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. + Management. If the request succeeds, you receive an empty response with an appropriate + status code. ``_ diff --git a/elasticsearch_serverless/_async/client/ml.py b/elasticsearch_serverless/_async/client/ml.py index e4dfb5b..f2846f4 100644 --- a/elasticsearch_serverless/_async/client/ml.py +++ b/elasticsearch_serverless/_async/client/ml.py @@ -457,6 +457,7 @@ async def delete_trained_model( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an unreferenced trained model. The request deletes a trained inference @@ -467,6 +468,8 @@ async def delete_trained_model( :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -483,6 +486,8 @@ async def delete_trained_model( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -2074,7 +2079,11 @@ async def put_data_frame_analytics( """ Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination - index. + index. By default, the query used in the source configuration is `{"match_all": + {}}`. If the destination index does not exist, it is created automatically when + you start the job. If you supply only a subset of the regression or classification + parameters, hyperparameter optimization occurs. It determines a value for each + of the undefined parameters. ``_ @@ -2250,8 +2259,9 @@ async def put_datafeed( Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) - at each interval. When Elasticsearch security features are enabled, your datafeed + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. By default, the datafeed uses the following query: `{"match_all": + {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or @@ -2514,7 +2524,8 @@ async def put_job( ) -> ObjectApiResponse[t.Any]: """ Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. + have read index privileges on the source index. If you include a `datafeed_config` + but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. ``_ diff --git a/elasticsearch_serverless/_async/client/query_rules.py b/elasticsearch_serverless/_async/client/query_rules.py index 6622cf0..4218557 100644 --- a/elasticsearch_serverless/_async/client/query_rules.py +++ b/elasticsearch_serverless/_async/client/query_rules.py @@ -38,7 +38,9 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. This is a destructive + action that is only recoverable by re-adding the same rule with the create or + update query rule API. ``_ @@ -86,7 +88,8 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. + Delete a query ruleset. Remove a query ruleset and its associated data. This + is a destructive action that is not recoverable. ``_ @@ -222,8 +225,8 @@ async def list_rulesets( ``_ - :param from_: Starting offset (default: 0) - :param size: specifies a max number of results to get + :param from_: The offset from the first result to fetch. + :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" @@ -272,16 +275,25 @@ async def put_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a query rule. Create or update a query rule within a query ruleset. + IMPORTANT: Due to limitations within pinned queries, you can only pin documents + using ids or docs, but cannot use both in single rule. It is advised to use one + or the other in query rulesets, to avoid errors. Additionally, pinned queries + have a maximum limit of 100 pinned hits. If multiple matching rules pin more + than 100 documents, only the first 100 documents are pinned in the order they + are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset containing the - rule to be created or updated + rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified - ruleset to be created or updated - :param actions: - :param criteria: - :param type: + ruleset to be created or updated. + :param actions: The actions to take when the rule is matched. The format of this + action depends on the rule type. + :param criteria: The criteria that must be met for the rule to be applied. If + multiple criteria are specified for a rule, all criteria must be met for + the rule to be applied. + :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: @@ -346,12 +358,19 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. + Create or update a query ruleset. There is a limit of 100 rules per ruleset. + This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` + cluster setting. IMPORTANT: Due to limitations within pinned queries, you can + only select documents using `ids` or `docs`, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. Additionally, + pinned queries have a maximum limit of 100 pinned hits. If multiple matching + rules pin more than 100 documents, only the first 100 documents are pinned in + the order they are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or - updated + updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: @@ -406,7 +425,9 @@ async def test( :param ruleset_id: The unique identifier of the query ruleset to be created or updated - :param match_criteria: + :param match_criteria: The match criteria to apply to rules in the given query + ruleset. Match criteria should match the keys defined in the `criteria.metadata` + field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") diff --git a/elasticsearch_serverless/_async/client/security.py b/elasticsearch_serverless/_async/client/security.py index 2390f0a..fa9bcf4 100644 --- a/elasticsearch_serverless/_async/client/security.py +++ b/elasticsearch_serverless/_async/client/security.py @@ -903,6 +903,91 @@ async def query_api_keys( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("from_", "query", "search_after", "size", "sort"), + parameter_aliases={"from": "from_"}, + ) + async def query_role( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + from_: t.Optional[int] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[t.Mapping[str, t.Any]] = None, + search_after: t.Optional[ + t.Sequence[t.Union[None, bool, float, int, str, t.Any]] + ] = None, + size: t.Optional[int] = None, + sort: t.Optional[ + t.Union[ + t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], + t.Union[str, t.Mapping[str, t.Any]], + ] + ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. + + ``_ + + :param from_: Starting document offset. By default, you cannot page through more + than 10,000 hits using the from and size parameters. To page through more + hits, use the `search_after` parameter. + :param query: A query to filter which roles to return. If the query parameter + is missing, it is equivalent to a `match_all` query. The query supports a + subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + You can query the following information associated with roles: `name`, `description`, + `metadata`, `applications.application`, `applications.privileges`, `applications.resources`. + :param search_after: Search after definition + :param size: The number of hits to return. By default, you cannot page through + more than 10,000 hits using the `from` and `size` parameters. To page through + more hits, use the `search_after` parameter. + :param sort: All public fields of a role are eligible for sorting. In addition, + sort can also be applied to the `_doc` field to sort by index order. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/_query/role" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if from_ is not None: + __body["from"] = from_ + if query is not None: + __body["query"] = query + if search_after is not None: + __body["search_after"] = search_after + if size is not None: + __body["size"] = size + if sort is not None: + __body["sort"] = sort + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.query_role", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("expiration", "metadata", "role_descriptors"), ) diff --git a/elasticsearch_serverless/_async/client/sql.py b/elasticsearch_serverless/_async/client/sql.py index c041681..239df90 100644 --- a/elasticsearch_serverless/_async/client/sql.py +++ b/elasticsearch_serverless/_async/client/sql.py @@ -85,11 +85,14 @@ async def delete_async( ) -> ObjectApiResponse[t.Any]: """ Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. + SQL search. If the search is still running, the API cancels it. If the Elasticsearch + security features are enabled, only the following users can use this API to delete + a search: * Users with the `cancel_task` cluster privilege. * The user who first + submitted the search. ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -132,20 +135,23 @@ async def get_async( ) -> ObjectApiResponse[t.Any]: """ Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. + an async SQL search or stored synchronous SQL search. If the Elasticsearch security + features are enabled, only the user who first submitted the SQL search can retrieve + the search using this API. ``_ - :param id: Identifier for the search. - :param delimiter: Separator for CSV results. The API only supports this parameter - for CSV responses. - :param format: Format for the response. You must specify a format using this - parameter or the Accept HTTP header. If you specify both, the API uses this - parameter. - :param keep_alive: Retention period for the search and its results. Defaults + :param id: The identifier for the search. + :param delimiter: The separator for CSV results. The API supports this parameter + only for CSV responses. + :param format: The format for the response. You must specify a format using this + parameter or the `Accept` HTTP header. If you specify both, the API uses + this parameter. + :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -194,7 +200,7 @@ async def get_async_status( ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -221,6 +227,7 @@ async def get_async_status( @_rewrite_parameters( body_fields=( + "allow_partial_search_results", "catalog", "columnar", "cursor", @@ -243,6 +250,7 @@ async def get_async_status( async def query( self, *, + allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, @@ -277,36 +285,45 @@ async def query( ``_ - :param catalog: Default catalog (cluster) for queries. If unspecified, the queries - execute on the data in the local cluster only. - :param columnar: If true, the results in a columnar fashion: one row represents - all the values of a certain column from the current page of results. - :param cursor: Cursor used to retrieve a set of paginated results. If you specify - a cursor, the API only uses the `columnar` and `time_zone` request body parameters. - It ignores other request body parameters. - :param fetch_size: The maximum number of rows (or entries) to return in one response - :param field_multi_value_leniency: Throw an exception when encountering multiple - values for a field (default) or be lenient and return the first value from - the list (without any guarantees of what that will be - typically the first - in natural ascending order). - :param filter: Elasticsearch query DSL for additional filtering. - :param format: Format for the response. - :param index_using_frozen: If true, the search can run on frozen indices. Defaults - to false. - :param keep_alive: Retention period for an async or saved synchronous search. - :param keep_on_completion: If true, Elasticsearch stores synchronous searches - if you also specify the wait_for_completion_timeout parameter. If false, - Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. - :param page_timeout: The timeout before a pagination request fails. - :param params: Values for parameters in the query. - :param query: SQL query to run. + :param allow_partial_search_results: If `true`, the response has partial results + when there are shard request timeouts or shard failures. If `false`, the + API returns an error with no partial results. + :param catalog: The default catalog (cluster) for queries. If unspecified, the + queries execute on the data in the local cluster only. + :param columnar: If `true`, the results are in a columnar fashion: one row represents + all the values of a certain column from the current page of results. The + API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. + :param cursor: The cursor used to retrieve a set of paginated results. If you + specify a cursor, the API only uses the `columnar` and `time_zone` request + body parameters. It ignores other request body parameters. + :param fetch_size: The maximum number of rows (or entries) to return in one response. + :param field_multi_value_leniency: If `false`, the API returns an exception when + encountering multiple values for a field. If `true`, the API is lenient and + returns the first value from the array with no guarantee of consistent results. + :param filter: The Elasticsearch query DSL for additional filtering. + :param format: The format for the response. You can also specify a format using + the `Accept` HTTP header. If you specify both this parameter and the `Accept` + HTTP header, this parameter takes precedence. + :param index_using_frozen: If `true`, the search can run on frozen indices. + :param keep_alive: The retention period for an async or saved synchronous search. + :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches + if you also specify the `wait_for_completion_timeout` parameter. If `false`, + Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. + :param page_timeout: The minimum retention period for the scroll cursor. After + this time period, a pagination request might fail because the scroll cursor + is no longer available. Subsequent scroll requests prolong the lifetime of + the scroll cursor by the duration of `page_timeout` in the scroll request. + :param params: The values for parameters in the query. + :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. - :param time_zone: ISO-8601 time zone ID for the search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. If - the search doesn’t finish within this period, the search becomes async. + :param runtime_mappings: One or more runtime fields for the search request. These + fields take precedence over mapped fields with the same name. + :param time_zone: The ISO-8601 time zone ID for the search. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. If the search doesn't finish within this period, the search becomes + async. To save a synchronous search, you must specify this parameter and + the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" @@ -323,6 +340,8 @@ async def query( if pretty is not None: __query["pretty"] = pretty if not __body: + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: @@ -384,14 +403,15 @@ async def translate( ) -> ObjectApiResponse[t.Any]: """ Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. + API request containing Query DSL. It accepts the same request body parameters + as the SQL search API, excluding `cursor`. ``_ - :param query: SQL query to run. + :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. - :param filter: Elasticsearch query DSL for additional filtering. - :param time_zone: ISO-8601 time zone ID for the search. + :param filter: The Elasticsearch query DSL for additional filtering. + :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") diff --git a/elasticsearch_serverless/_async/client/synonyms.py b/elasticsearch_serverless/_async/client/synonyms.py index ee6e657..9cb6d9a 100644 --- a/elasticsearch_serverless/_async/client/synonyms.py +++ b/elasticsearch_serverless/_async/client/synonyms.py @@ -36,11 +36,25 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. + Delete a synonym set. You can only delete a synonyms set that is not in use by + any index analyzer. Synonyms sets can be used in synonym graph token filters + and synonym token filters. These synonym filters can be used as part of search + analyzers. Analyzers need to be loaded when an index is restored (such as when + a node starts, or the index becomes open). Even if the analyzer is not used on + any field mapping, it still needs to be loaded on the index recovery phase. If + any analyzers cannot be loaded, the index becomes unavailable and the cluster + status becomes red or yellow as index shards are not available. To prevent that, + synonyms sets that are used in analyzers can't be deleted. A delete request in + this case will return a 400 response code. To remove a synonyms set, you must + first remove all indices that contain analyzers using it. You can migrate an + index by creating a new index that does not contain the token filter with the + synonyms set, and use the reindex API in order to copy over the index data. Once + finished, you can delete the index. When the synonyms set is not used in analyzers, + you will be able to delete it. ``_ - :param id: The id of the synonyms set to be deleted + :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -81,8 +95,8 @@ async def delete_synonym_rule( ``_ - :param set_id: The id of the synonym set to be updated - :param rule_id: The id of the synonym rule to be deleted + :param set_id: The ID of the synonym set to update. + :param rule_id: The ID of the synonym rule to delete. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -131,9 +145,9 @@ async def get_synonym( ``_ - :param id: "The id of the synonyms set to be retrieved - :param from_: Starting offset for query rules to be retrieved - :param size: specifies a max number of query rules to retrieve + :param id: The synonyms set identifier to retrieve. + :param from_: The starting offset for query rules to retrieve. + :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -178,8 +192,8 @@ async def get_synonym_rule( ``_ - :param set_id: The id of the synonym set to retrieve the synonym rule from - :param rule_id: The id of the synonym rule to retrieve + :param set_id: The ID of the synonym set to retrieve the synonym rule from. + :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -225,10 +239,10 @@ async def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ - :param from_: Starting offset - :param size: specifies a max number of results to get + :param from_: The starting offset for synonyms sets to retrieve. + :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" @@ -274,12 +288,15 @@ async def put_synonym( """ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. + multiple synonym sets. When an existing synonyms set is updated, the search analyzers + that use the synonyms set are reloaded automatically for all indices. This is + equivalent to invoking the reload search analyzers API for all indices that use + the synonyms set. ``_ - :param id: The id of the synonyms set to be created or updated - :param synonyms_set: The synonym set information to update + :param id: The ID of the synonyms set to be created or updated. + :param synonyms_set: The synonym rules definitions for the synonyms set. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -328,13 +345,16 @@ async def put_synonym_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a synonym rule. Create or update a synonym rule in a synonym - set. + set. If any of the synonym rules included is invalid, the API returns an error. + When you update a synonym rule, all analyzers using the synonyms set will be + reloaded automatically to reflect the new rule. ``_ - :param set_id: The id of the synonym set to be updated with the synonym rule - :param rule_id: The id of the synonym rule to be updated or created - :param synonyms: + :param set_id: The ID of the synonym set. + :param rule_id: The ID of the synonym rule to be updated or created. + :param synonyms: The synonym rule information definition, which must be in Solr + format. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") diff --git a/elasticsearch_serverless/_async/client/transform.py b/elasticsearch_serverless/_async/client/transform.py index eebc2b3..9e52df1 100644 --- a/elasticsearch_serverless/_async/client/transform.py +++ b/elasticsearch_serverless/_async/client/transform.py @@ -489,6 +489,7 @@ async def reset_transform( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Reset a transform. Resets a transform. Before you can reset it, you must stop @@ -503,6 +504,8 @@ async def reset_transform( :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") @@ -519,6 +522,8 @@ async def reset_transform( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch_serverless/_sync/client/__init__.py b/elasticsearch_serverless/_sync/client/__init__.py index 569a774..74bca5e 100644 --- a/elasticsearch_serverless/_sync/client/__init__.py +++ b/elasticsearch_serverless/_sync/client/__init__.py @@ -2124,9 +2124,9 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" diff --git a/elasticsearch_serverless/_sync/client/cluster.py b/elasticsearch_serverless/_sync/client/cluster.py index 23d7318..622b30a 100644 --- a/elasticsearch_serverless/_sync/client/cluster.py +++ b/elasticsearch_serverless/_sync/client/cluster.py @@ -38,9 +38,8 @@ def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Deletes component templates. Component templates - are building blocks for constructing index templates that specify index mappings, - settings, and aliases. + Delete component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -148,7 +147,7 @@ def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Retrieves information about component templates. + Get component templates. Get information about component templates. ``_ @@ -266,20 +265,21 @@ def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Creates or updates a component template. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. An index template can be composed - of multiple component templates. To use a component template, specify it in an - index template’s `composed_of` list. Component templates are only applied to - new data streams and indices as part of a matching index template. Settings and - mappings specified directly in the index template or the create index request - override any settings or mappings specified in a component template. Component - templates are only used during index creation. For data streams, this includes - data stream creation and the creation of a stream’s backing indices. Changes - to component templates do not affect existing indices, including a stream’s backing - indices. You can use C-style `/* *\\/` block comments in component templates. + Create or update a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. You can include comments anywhere in the request body except before the opening - curly bracket. + curly bracket. **Applying component templates** You cannot directly apply a component + template to a data stream or index. To be applied, a component template must + be included in an index template's `composed_of` list. ``_ @@ -302,8 +302,8 @@ def put_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the component template. May have any - contents. This map is not automatically generated by Elasticsearch. This + :param meta: Optional user metadata about the component template. It may have + any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. diff --git a/elasticsearch_serverless/_sync/client/enrich.py b/elasticsearch_serverless/_sync/client/enrich.py index 591142e..3114828 100644 --- a/elasticsearch_serverless/_sync/client/enrich.py +++ b/elasticsearch_serverless/_sync/client/enrich.py @@ -33,6 +33,7 @@ def delete_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -41,6 +42,7 @@ def delete_policy( ``_ :param name: Enrich policy to delete. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -53,6 +55,8 @@ def delete_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -73,6 +77,7 @@ def execute_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -82,6 +87,7 @@ def execute_policy( ``_ :param name: Enrich policy to execute. + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ @@ -96,6 +102,8 @@ def execute_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: @@ -118,6 +126,7 @@ def get_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -127,6 +136,7 @@ def get_policy( :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -142,6 +152,8 @@ def get_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -165,6 +177,7 @@ def put_policy( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, @@ -178,6 +191,7 @@ def put_policy( :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. + :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. @@ -194,6 +208,8 @@ def put_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch_serverless/_sync/client/indices.py b/elasticsearch_serverless/_sync/client/indices.py index dbd2c78..23d81bc 100644 --- a/elasticsearch_serverless/_sync/client/indices.py +++ b/elasticsearch_serverless/_sync/client/indices.py @@ -137,8 +137,12 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) - on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs analysis on a text string + and returns the resulting tokens. Generating excessive amount of tokens may cause + a node to run out of memory. The `index.analyze.max_token_count` setting enables + you to limit the number of tokens that can be produced. If more than this limit + of tokens gets generated, an error occurs. The `_analyze` endpoint without a + specified index will always use `10000` as its limit. ``_ @@ -236,7 +240,26 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. Creates a new index. + Create an index. You can use the create index API to add a new index to an Elasticsearch + cluster. When creating an index, you can specify the following: * Settings for + the index. * Mappings for fields in the index. * Index aliases **Wait for active + shards** By default, index creation will only return a response to the client + when the primary copies of each shard have been started, or the request times + out. The index creation response will indicate what happened. For example, `acknowledged` + indicates whether the index was successfully created in the cluster, `while shards_acknowledged` + indicates whether the requisite number of shard copies were started for each + shard in the index before timing out. Note that it is still possible for either + `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation + to be successful. These values simply indicate whether the operation completed + before the timeout. If `acknowledged` is false, the request timed out before + the cluster state was updated with the newly created index, but it probably will + be created sometime soon. If `shards_acknowledged` is false, then the request + timed out before the requisite number of shards were started (by default just + the primaries), even if the cluster state was successfully updated to reflect + the newly created index (that is to say, `acknowledged` is `true`). You can change + the default of only waiting for the primary shards to start through the index + setting `index.write.wait_for_active_shards`. Note that changing this setting + will also affect the `wait_for_active_shards` value on all subsequent write operations. ``_ @@ -375,7 +398,11 @@ def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deletes one or more indices. + Delete indices. Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, + or dashboards. You cannot delete the current write index of a data stream. To + delete the index, you must roll over the data stream so a new write index is + created. You can then use the delete index API to delete the previous write index. ``_ @@ -447,7 +474,7 @@ def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -626,8 +653,7 @@ def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Checks if one or more indices, index aliases, or data streams - exist. + Check indices. Check if one or more indices, index aliases, or data streams exist. ``_ @@ -894,8 +920,8 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Returns information about one or more indices. For data - streams, the API returns information about the stream’s backing indices. + Get index information. Get information about one or more indices. For data streams, + the API returns information about the stream’s backing indices. ``_ @@ -986,8 +1012,6 @@ def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ - :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -1200,7 +1224,7 @@ def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Returns information about one or more index templates. + Get index templates. Get information about one or more index templates. ``_ @@ -1273,8 +1297,8 @@ def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more indices. - For data streams, the API retrieves mappings for the stream’s backing indices. + Get mapping definitions. For data streams, the API retrieves mappings for the + stream’s backing indices. ``_ @@ -1358,8 +1382,8 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Returns setting information for one or more indices. For - data streams, returns setting information for the stream’s backing indices. + Get index settings. Get setting information for one or more indices. For data + streams, it returns setting information for the stream's backing indices. ``_ @@ -1762,7 +1786,33 @@ def put_index_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an wildcard pattern that matches the index + name. Index templates are applied during data stream or index creation. For data + streams, these settings and mappings are applied when the stream's backing indices + are created. Settings and mappings specified in a create index API request override + any settings or mappings specified in an index template. Changes to index templates + do not affect existing indices, including the existing backing indices of a data + stream. You can use C-style `/* *\\/` block comments in index templates. You + can include comments anywhere in the request body, except before the opening + curly bracket. **Multiple matching templates** If multiple index templates match + the name of a new index or data stream, the template with the highest priority + is used. Multiple templates with overlapping index patterns at the same priority + are not allowed and an error will be thrown when attempting to create a template + matching an existing index template at identical priorities. **Composing aliases, + mappings, and settings** When multiple component templates are specified in the + `composed_of` field for an index template, they are merged in the order specified, + meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged + in next. Finally, any configuration on the index request itself is merged. Mapping + definitions are merged recursively, which means that later mapping components + can introduce new field mappings and update the mapping configuration. If a field + mapping is already contained in an earlier component, its definition will be + completely overwritten by the later one. This recursive merging strategy applies + not only to field mappings, but also root options like `dynamic_templates` and + `meta`. If an earlier component contains a `dynamic_templates` block, then by + default new `dynamic_templates` entries are appended onto the end. If an entry + already exists with the same key, then it is overwritten by the new definition. ``_ @@ -1792,8 +1842,11 @@ def put_index_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the index template. May have any contents. - This map is not automatically generated by Elasticsearch. + :param meta: Optional user metadata about the index template. It may have any + contents. It is not automatically generated or used by Elasticsearch. This + user-defined object is stored in the cluster state, so keeping it short is + preferable To unset the metadata, replace the template without specifying + it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though @@ -1802,7 +1855,9 @@ def put_index_template( :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. External systems + can use these version numbers to simplify template management. To unset a + version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1921,9 +1976,27 @@ def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Adds new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields. For data - streams, these changes are applied to all backing indices by default. + Update field mappings. Add new fields to an existing data stream or index. You + can also use this API to change the search settings of existing fields and add + new properties to existing object fields. For data streams, these changes are + applied to all backing indices by default. **Add multi-fields to an existing + field** Multi-fields let you index the same field in different ways. You can + use this API to update the fields mapping parameter and enable multi-fields for + an existing field. WARNING: If an index (or data stream) contains documents when + you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API. **Change supported + mapping parameters for an existing field** The documentation for each mapping + parameter indicates whether you can update it for an existing field using this + API. For example, you can use the update mapping API to update the `ignore_above` + parameter. **Change the mapping of an existing field** Except for supported mapping + parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed. If you + need to change the mapping of a field in a data stream's backing indices, refer + to documentation about modifying data streams. If you need to change the mapping + of a field in other indices, create a new index with the correct mapping and + reindex your data into that index. **Rename a field** Renaming a field would + invalidate data already indexed under the old field name. Instead, add an alias + field to create an alternate field name. ``_ @@ -2054,6 +2127,19 @@ def put_settings( """ Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + To revert a setting to the default value, use a null value. The list of per-index + settings that can be updated dynamically on live indices can be found in index + module documentation. To preserve existing settings from being updated, set the + `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers + on closed indices. To add an analyzer, you must close the index, define the analyzer, + and reopen the index. You cannot close the write index of a data stream. To update + the analyzer for a data stream's write index and future backing indices, update + the analyzer in the index template used by the stream. Then roll over the data + stream to apply the new analyzer to the stream's write index and future backing + indices. This affects searches and any new data added to the stream after the + rollover. However, it does not affect the data stream's backing indices or their + existing data. To change the analyzer for existing backing indices, you must + create a new data stream and reindex your data into it. ``_ @@ -2150,7 +2236,17 @@ def refresh( """ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. + on the stream’s backing indices. By default, Elasticsearch periodically refreshes + indices every second, but only on indices that have received one search request + or more in the last 30 seconds. You can change this default interval with the + `index.refresh_interval` setting. Refresh requests are synchronous and do not + return a response until the refresh operation completes. Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's + periodic refresh rather than performing an explicit refresh when possible. If + your application workflow indexes documents and then runs a search to retrieve + the indexed document, it's recommended to use the index API's `refresh=wait_for` + query parameter option. This option ensures the indexing operation waits for + a periodic refresh before running the search. ``_ @@ -2294,7 +2390,33 @@ def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. Creates a new index for a data stream or index alias. + Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover + action to automate rollovers. The rollover API creates a new index for a data + stream or index alias. The API behavior depends on the rollover target. **Roll + over a data stream** If you roll over a data stream, the API creates a new write + index for the stream. The stream's previous write index becomes a regular backing + index. A rollover also increments the data stream's generation. **Roll over an + index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically + use an index alias with a write index to manage time series data. Data streams + replace this functionality, require less maintenance, and automatically integrate + with data tiers. If an index alias points to multiple indices, one of the indices + must be a write index. The rollover API creates a new write index for the alias + with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` + for the previous write index. **Roll over an index alias with one index** If + you roll over an index alias that points to only one index, the API creates a + new index for the alias and removes the original index from the alias. NOTE: + A rollover creates a new index and is subject to the `wait_for_active_shards` + setting. **Increment index names for an alias** When you roll over an index alias, + you can specify a name for the new index. If you don't specify a name and the + current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, + the new index name increments that number. For example, if you roll over an alias + with a current index of `my-index-000001`, the rollover creates a new index named + `my-index-000002`. This number is always six characters and zero-padded, regardless + of the previous index's name. If you use an index alias for time series data, + you can use date math in the index name to track the rollover date. For example, + you can create an alias that points to an index named ``. + If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. ``_ @@ -2390,8 +2512,8 @@ def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Returns the index configuration that would be applied to the - specified index from an existing index template. + Simulate an index. Get the index configuration that would be applied to the specified + index from an existing index template. ``_ @@ -2468,7 +2590,7 @@ def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Returns the index configuration that would be applied + Simulate an index template. Get the index configuration that would be applied by a particular index template. ``_ diff --git a/elasticsearch_serverless/_sync/client/logstash.py b/elasticsearch_serverless/_sync/client/logstash.py index 0d5585d..b006c34 100644 --- a/elasticsearch_serverless/_sync/client/logstash.py +++ b/elasticsearch_serverless/_sync/client/logstash.py @@ -37,7 +37,8 @@ def delete_pipeline( ) -> ObjectApiResponse[t.Any]: """ Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. + Management. If the request succeeds, you receive an empty response with an appropriate + status code. ``_ diff --git a/elasticsearch_serverless/_sync/client/ml.py b/elasticsearch_serverless/_sync/client/ml.py index 64709c9..6dfa9b4 100644 --- a/elasticsearch_serverless/_sync/client/ml.py +++ b/elasticsearch_serverless/_sync/client/ml.py @@ -457,6 +457,7 @@ def delete_trained_model( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an unreferenced trained model. The request deletes a trained inference @@ -467,6 +468,8 @@ def delete_trained_model( :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -483,6 +486,8 @@ def delete_trained_model( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -2074,7 +2079,11 @@ def put_data_frame_analytics( """ Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination - index. + index. By default, the query used in the source configuration is `{"match_all": + {}}`. If the destination index does not exist, it is created automatically when + you start the job. If you supply only a subset of the regression or classification + parameters, hyperparameter optimization occurs. It determines a value for each + of the undefined parameters. ``_ @@ -2250,8 +2259,9 @@ def put_datafeed( Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) - at each interval. When Elasticsearch security features are enabled, your datafeed + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + at each interval. By default, the datafeed uses the following query: `{"match_all": + {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or @@ -2514,7 +2524,8 @@ def put_job( ) -> ObjectApiResponse[t.Any]: """ Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. + have read index privileges on the source index. If you include a `datafeed_config` + but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. ``_ diff --git a/elasticsearch_serverless/_sync/client/query_rules.py b/elasticsearch_serverless/_sync/client/query_rules.py index 57483d7..fa1e742 100644 --- a/elasticsearch_serverless/_sync/client/query_rules.py +++ b/elasticsearch_serverless/_sync/client/query_rules.py @@ -38,7 +38,9 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. This is a destructive + action that is only recoverable by re-adding the same rule with the create or + update query rule API. ``_ @@ -86,7 +88,8 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. + Delete a query ruleset. Remove a query ruleset and its associated data. This + is a destructive action that is not recoverable. ``_ @@ -222,8 +225,8 @@ def list_rulesets( ``_ - :param from_: Starting offset (default: 0) - :param size: specifies a max number of results to get + :param from_: The offset from the first result to fetch. + :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" @@ -272,16 +275,25 @@ def put_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a query rule. Create or update a query rule within a query ruleset. + IMPORTANT: Due to limitations within pinned queries, you can only pin documents + using ids or docs, but cannot use both in single rule. It is advised to use one + or the other in query rulesets, to avoid errors. Additionally, pinned queries + have a maximum limit of 100 pinned hits. If multiple matching rules pin more + than 100 documents, only the first 100 documents are pinned in the order they + are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset containing the - rule to be created or updated + rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified - ruleset to be created or updated - :param actions: - :param criteria: - :param type: + ruleset to be created or updated. + :param actions: The actions to take when the rule is matched. The format of this + action depends on the rule type. + :param criteria: The criteria that must be met for the rule to be applied. If + multiple criteria are specified for a rule, all criteria must be met for + the rule to be applied. + :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: @@ -346,12 +358,19 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. + Create or update a query ruleset. There is a limit of 100 rules per ruleset. + This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` + cluster setting. IMPORTANT: Due to limitations within pinned queries, you can + only select documents using `ids` or `docs`, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. Additionally, + pinned queries have a maximum limit of 100 pinned hits. If multiple matching + rules pin more than 100 documents, only the first 100 documents are pinned in + the order they are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or - updated + updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: @@ -406,7 +425,9 @@ def test( :param ruleset_id: The unique identifier of the query ruleset to be created or updated - :param match_criteria: + :param match_criteria: The match criteria to apply to rules in the given query + ruleset. Match criteria should match the keys defined in the `criteria.metadata` + field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") diff --git a/elasticsearch_serverless/_sync/client/security.py b/elasticsearch_serverless/_sync/client/security.py index 2fc8519..0050d31 100644 --- a/elasticsearch_serverless/_sync/client/security.py +++ b/elasticsearch_serverless/_sync/client/security.py @@ -903,6 +903,91 @@ def query_api_keys( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("from_", "query", "search_after", "size", "sort"), + parameter_aliases={"from": "from_"}, + ) + def query_role( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + from_: t.Optional[int] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[t.Mapping[str, t.Any]] = None, + search_after: t.Optional[ + t.Sequence[t.Union[None, bool, float, int, str, t.Any]] + ] = None, + size: t.Optional[int] = None, + sort: t.Optional[ + t.Union[ + t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], + t.Union[str, t.Mapping[str, t.Any]], + ] + ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. + + ``_ + + :param from_: Starting document offset. By default, you cannot page through more + than 10,000 hits using the from and size parameters. To page through more + hits, use the `search_after` parameter. + :param query: A query to filter which roles to return. If the query parameter + is missing, it is equivalent to a `match_all` query. The query supports a + subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + You can query the following information associated with roles: `name`, `description`, + `metadata`, `applications.application`, `applications.privileges`, `applications.resources`. + :param search_after: Search after definition + :param size: The number of hits to return. By default, you cannot page through + more than 10,000 hits using the `from` and `size` parameters. To page through + more hits, use the `search_after` parameter. + :param sort: All public fields of a role are eligible for sorting. In addition, + sort can also be applied to the `_doc` field to sort by index order. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/_query/role" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if from_ is not None: + __body["from"] = from_ + if query is not None: + __body["query"] = query + if search_after is not None: + __body["search_after"] = search_after + if size is not None: + __body["size"] = size + if sort is not None: + __body["sort"] = sort + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.query_role", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("expiration", "metadata", "role_descriptors"), ) diff --git a/elasticsearch_serverless/_sync/client/sql.py b/elasticsearch_serverless/_sync/client/sql.py index bf19021..9ab4f94 100644 --- a/elasticsearch_serverless/_sync/client/sql.py +++ b/elasticsearch_serverless/_sync/client/sql.py @@ -85,11 +85,14 @@ def delete_async( ) -> ObjectApiResponse[t.Any]: """ Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. + SQL search. If the search is still running, the API cancels it. If the Elasticsearch + security features are enabled, only the following users can use this API to delete + a search: * Users with the `cancel_task` cluster privilege. * The user who first + submitted the search. ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -132,20 +135,23 @@ def get_async( ) -> ObjectApiResponse[t.Any]: """ Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. + an async SQL search or stored synchronous SQL search. If the Elasticsearch security + features are enabled, only the user who first submitted the SQL search can retrieve + the search using this API. ``_ - :param id: Identifier for the search. - :param delimiter: Separator for CSV results. The API only supports this parameter - for CSV responses. - :param format: Format for the response. You must specify a format using this - parameter or the Accept HTTP header. If you specify both, the API uses this - parameter. - :param keep_alive: Retention period for the search and its results. Defaults + :param id: The identifier for the search. + :param delimiter: The separator for CSV results. The API supports this parameter + only for CSV responses. + :param format: The format for the response. You must specify a format using this + parameter or the `Accept` HTTP header. If you specify both, the API uses + this parameter. + :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -194,7 +200,7 @@ def get_async_status( ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -221,6 +227,7 @@ def get_async_status( @_rewrite_parameters( body_fields=( + "allow_partial_search_results", "catalog", "columnar", "cursor", @@ -243,6 +250,7 @@ def get_async_status( def query( self, *, + allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, @@ -277,36 +285,45 @@ def query( ``_ - :param catalog: Default catalog (cluster) for queries. If unspecified, the queries - execute on the data in the local cluster only. - :param columnar: If true, the results in a columnar fashion: one row represents - all the values of a certain column from the current page of results. - :param cursor: Cursor used to retrieve a set of paginated results. If you specify - a cursor, the API only uses the `columnar` and `time_zone` request body parameters. - It ignores other request body parameters. - :param fetch_size: The maximum number of rows (or entries) to return in one response - :param field_multi_value_leniency: Throw an exception when encountering multiple - values for a field (default) or be lenient and return the first value from - the list (without any guarantees of what that will be - typically the first - in natural ascending order). - :param filter: Elasticsearch query DSL for additional filtering. - :param format: Format for the response. - :param index_using_frozen: If true, the search can run on frozen indices. Defaults - to false. - :param keep_alive: Retention period for an async or saved synchronous search. - :param keep_on_completion: If true, Elasticsearch stores synchronous searches - if you also specify the wait_for_completion_timeout parameter. If false, - Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. - :param page_timeout: The timeout before a pagination request fails. - :param params: Values for parameters in the query. - :param query: SQL query to run. + :param allow_partial_search_results: If `true`, the response has partial results + when there are shard request timeouts or shard failures. If `false`, the + API returns an error with no partial results. + :param catalog: The default catalog (cluster) for queries. If unspecified, the + queries execute on the data in the local cluster only. + :param columnar: If `true`, the results are in a columnar fashion: one row represents + all the values of a certain column from the current page of results. The + API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. + :param cursor: The cursor used to retrieve a set of paginated results. If you + specify a cursor, the API only uses the `columnar` and `time_zone` request + body parameters. It ignores other request body parameters. + :param fetch_size: The maximum number of rows (or entries) to return in one response. + :param field_multi_value_leniency: If `false`, the API returns an exception when + encountering multiple values for a field. If `true`, the API is lenient and + returns the first value from the array with no guarantee of consistent results. + :param filter: The Elasticsearch query DSL for additional filtering. + :param format: The format for the response. You can also specify a format using + the `Accept` HTTP header. If you specify both this parameter and the `Accept` + HTTP header, this parameter takes precedence. + :param index_using_frozen: If `true`, the search can run on frozen indices. + :param keep_alive: The retention period for an async or saved synchronous search. + :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches + if you also specify the `wait_for_completion_timeout` parameter. If `false`, + Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. + :param page_timeout: The minimum retention period for the scroll cursor. After + this time period, a pagination request might fail because the scroll cursor + is no longer available. Subsequent scroll requests prolong the lifetime of + the scroll cursor by the duration of `page_timeout` in the scroll request. + :param params: The values for parameters in the query. + :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. - :param time_zone: ISO-8601 time zone ID for the search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. If - the search doesn’t finish within this period, the search becomes async. + :param runtime_mappings: One or more runtime fields for the search request. These + fields take precedence over mapped fields with the same name. + :param time_zone: The ISO-8601 time zone ID for the search. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. If the search doesn't finish within this period, the search becomes + async. To save a synchronous search, you must specify this parameter and + the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" @@ -323,6 +340,8 @@ def query( if pretty is not None: __query["pretty"] = pretty if not __body: + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: @@ -384,14 +403,15 @@ def translate( ) -> ObjectApiResponse[t.Any]: """ Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. + API request containing Query DSL. It accepts the same request body parameters + as the SQL search API, excluding `cursor`. ``_ - :param query: SQL query to run. + :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. - :param filter: Elasticsearch query DSL for additional filtering. - :param time_zone: ISO-8601 time zone ID for the search. + :param filter: The Elasticsearch query DSL for additional filtering. + :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") diff --git a/elasticsearch_serverless/_sync/client/synonyms.py b/elasticsearch_serverless/_sync/client/synonyms.py index 453a85a..a13c3ba 100644 --- a/elasticsearch_serverless/_sync/client/synonyms.py +++ b/elasticsearch_serverless/_sync/client/synonyms.py @@ -36,11 +36,25 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. + Delete a synonym set. You can only delete a synonyms set that is not in use by + any index analyzer. Synonyms sets can be used in synonym graph token filters + and synonym token filters. These synonym filters can be used as part of search + analyzers. Analyzers need to be loaded when an index is restored (such as when + a node starts, or the index becomes open). Even if the analyzer is not used on + any field mapping, it still needs to be loaded on the index recovery phase. If + any analyzers cannot be loaded, the index becomes unavailable and the cluster + status becomes red or yellow as index shards are not available. To prevent that, + synonyms sets that are used in analyzers can't be deleted. A delete request in + this case will return a 400 response code. To remove a synonyms set, you must + first remove all indices that contain analyzers using it. You can migrate an + index by creating a new index that does not contain the token filter with the + synonyms set, and use the reindex API in order to copy over the index data. Once + finished, you can delete the index. When the synonyms set is not used in analyzers, + you will be able to delete it. ``_ - :param id: The id of the synonyms set to be deleted + :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -81,8 +95,8 @@ def delete_synonym_rule( ``_ - :param set_id: The id of the synonym set to be updated - :param rule_id: The id of the synonym rule to be deleted + :param set_id: The ID of the synonym set to update. + :param rule_id: The ID of the synonym rule to delete. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -131,9 +145,9 @@ def get_synonym( ``_ - :param id: "The id of the synonyms set to be retrieved - :param from_: Starting offset for query rules to be retrieved - :param size: specifies a max number of query rules to retrieve + :param id: The synonyms set identifier to retrieve. + :param from_: The starting offset for query rules to retrieve. + :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -178,8 +192,8 @@ def get_synonym_rule( ``_ - :param set_id: The id of the synonym set to retrieve the synonym rule from - :param rule_id: The id of the synonym rule to retrieve + :param set_id: The ID of the synonym set to retrieve the synonym rule from. + :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -225,10 +239,10 @@ def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ - :param from_: Starting offset - :param size: specifies a max number of results to get + :param from_: The starting offset for synonyms sets to retrieve. + :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" @@ -274,12 +288,15 @@ def put_synonym( """ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. + multiple synonym sets. When an existing synonyms set is updated, the search analyzers + that use the synonyms set are reloaded automatically for all indices. This is + equivalent to invoking the reload search analyzers API for all indices that use + the synonyms set. ``_ - :param id: The id of the synonyms set to be created or updated - :param synonyms_set: The synonym set information to update + :param id: The ID of the synonyms set to be created or updated. + :param synonyms_set: The synonym rules definitions for the synonyms set. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -328,13 +345,16 @@ def put_synonym_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a synonym rule. Create or update a synonym rule in a synonym - set. + set. If any of the synonym rules included is invalid, the API returns an error. + When you update a synonym rule, all analyzers using the synonyms set will be + reloaded automatically to reflect the new rule. ``_ - :param set_id: The id of the synonym set to be updated with the synonym rule - :param rule_id: The id of the synonym rule to be updated or created - :param synonyms: + :param set_id: The ID of the synonym set. + :param rule_id: The ID of the synonym rule to be updated or created. + :param synonyms: The synonym rule information definition, which must be in Solr + format. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") diff --git a/elasticsearch_serverless/_sync/client/transform.py b/elasticsearch_serverless/_sync/client/transform.py index c09881b..f1c8c34 100644 --- a/elasticsearch_serverless/_sync/client/transform.py +++ b/elasticsearch_serverless/_sync/client/transform.py @@ -489,6 +489,7 @@ def reset_transform( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Reset a transform. Resets a transform. Before you can reset it, you must stop @@ -503,6 +504,8 @@ def reset_transform( :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") @@ -519,6 +522,8 @@ def reset_transform( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST",