diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index fcad59b33..3ad11a9b8 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -872,7 +872,7 @@ async def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2274,7 +2274,26 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3079,6 +3098,7 @@ async def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3113,6 +3133,10 @@ async def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3135,6 +3159,8 @@ async def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 4369b8224..2379b5002 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -145,6 +145,7 @@ async def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ async def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ async def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ async def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -268,7 +273,6 @@ async def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -282,7 +286,6 @@ async def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -375,9 +378,6 @@ async def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -392,10 +392,6 @@ async def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -404,13 +400,13 @@ async def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -507,16 +503,12 @@ async def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -529,8 +521,6 @@ async def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index a2f869867..4b3c1943c 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -33,7 +33,9 @@ async def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ async def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ async def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ async def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ async def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ async def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ async def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ async def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ async def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ async def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ async def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ async def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 9cb71c95f..44c4a7929 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -140,9 +140,10 @@ async def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of the number of shards allocated to each data node and their - disk space. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. + Get shard allocation information. Get a snapshot of the number of shards allocated + to each data node and their disk space. IMPORTANT: cat APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. ``_ @@ -388,10 +389,11 @@ async def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the amount of heap memory currently used by the field data cache on every - data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes stats API. + Get field data cache information. Get the amount of heap memory currently used + by the field data cache on every data node in the cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes stats API. ``_ @@ -469,17 +471,17 @@ async def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the cluster health API. This API is often used to check malfunctioning clusters. - To help you track cluster health alongside log files and alerting systems, the - API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but - includes no date information; `Unix epoch time`, which is machine-sortable and - includes date information. The latter format is useful for cluster recoveries - that take multiple days. You can use the cat health API to verify cluster health - across multiple nodes. You also can use the API to track the recovery of a large - cluster over a longer period of time. + Get the cluster health status. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. To help you track cluster + health alongside log files and alerting systems, the API returns timestamps in + two formats: `HH:MM:SS`, which is human-readable but includes no date information; + `Unix epoch time`, which is machine-sortable and includes date information. The + latter format is useful for cluster recoveries that take multiple days. You can + use the cat health API to verify cluster health across multiple nodes. You also + can use the API to track the recovery of a large cluster over a longer period + of time. ``_ @@ -733,10 +735,10 @@ async def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node, including the ID, bound IP address, - and name. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes info API. + Get master node information. Get information about the master node, including + the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for + human consumption using the command line or Kibana console. They are not intended + for use by applications. For application consumption, use the nodes info API. ``_ @@ -1713,10 +1715,10 @@ async def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node attribute information. Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -1791,10 +1793,10 @@ async def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node information. Get information about the nodes in a cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the nodes info API. ``_ @@ -1870,10 +1872,10 @@ async def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-level changes that have not yet been executed. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the pending cluster tasks API. + Get pending task information. Get information about cluster-level changes that + have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the pending cluster tasks API. ``_ @@ -1944,10 +1946,10 @@ async def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get plugin information. Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -2023,14 +2025,14 @@ async def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about ongoing and completed shard recoveries. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or syncing a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - For data streams, the API returns information about the stream’s backing indices. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the index recovery API. + Get shard recovery information. Get information about ongoing and completed shard + recoveries. Shard recovery is the process of initializing a shard copy, such + as restoring a primary shard from a snapshot or syncing a replica shard from + a primary shard. When a shard recovery completes, the recovered shard is available + for search and indexing. For data streams, the API returns information about + the stream’s backing indices. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the index recovery API. ``_ @@ -2112,10 +2114,10 @@ async def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - get snapshot repository API. + Get snapshot repository information. Get a list of snapshot repositories for + a cluster. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot repository API. ``_ @@ -2184,11 +2186,11 @@ async def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the backing indices. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the index segments API. + Get segment information. Get low-level information about the Lucene segments + in index shards. For data streams, the API returns information about the backing + indices. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the index segments API. ``_ @@ -2273,10 +2275,10 @@ async def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the shards in a cluster. For data streams, the API - returns information about the backing indices. IMPORTANT: cat APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + Get shard information. Get information about the shards in a cluster. For data + streams, the API returns information about the backing indices. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. ``_ @@ -2353,11 +2355,11 @@ async def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the snapshots stored in one or more repositories. A - snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get snapshot API. + Get snapshot information Get information about the snapshots stored in one or + more repositories. A snapshot is a backup of an index or running Elasticsearch + cluster. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot API. ``_ @@ -2438,10 +2440,10 @@ async def tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about tasks currently executing in the cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the task management API. + Get task information. Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the task management API. ``_ @@ -2521,11 +2523,11 @@ async def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index templates in a cluster. You can use index templates - to apply index settings and field mappings to new indices at creation. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get index template API. + Get index template information. Get information about the index templates in + a cluster. You can use index templates to apply index settings and field mappings + to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index template API. ``_ @@ -2607,11 +2609,11 @@ async def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns thread pool statistics for each node in a cluster. Returned information - includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get thread pool statistics. Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -2862,7 +2864,7 @@ async def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transforms. Returns configuration and usage information about transforms. + Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 806a3d8bd..0ccd6ba5d 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -44,7 +44,13 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 6663826b9..59edfeed9 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -77,7 +77,7 @@ async def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index ed21ddb3d..705a799f6 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -36,8 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -225,7 +225,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index f708a1a12..43b14a964 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -68,7 +68,8 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index 03d4a8521..b9683c00c 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -46,8 +46,8 @@ async def global_checkpoints( wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current global checkpoints for an index. This API is design for internal - use by the fleet server project. + Get global checkpoints. Get the current global checkpoints for an index. This + API is designed for internal use by the Fleet server project. ``_ @@ -132,10 +132,9 @@ async def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) - with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) - API. However, similar to the fleet search API, it supports the wait_for_checkpoints - parameter. + Run multiple Fleet searches. Run several Fleet searches with a single API request. + The API follows the same structure as the multi search API. However, similar + to the Fleet search API, it supports the `wait_for_checkpoints` parameter. :param searches: :param index: A single target to search. If the target is an index alias, it @@ -377,9 +376,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The purpose of the fleet search api is to provide a search api where the search - will only be executed after provided checkpoint has been processed and is visible - for searches inside of Elasticsearch. + Run a Fleet search. The purpose of the Fleet search API is to provide an API + where the search will be run only after the provided checkpoint has been processed + and is visible for searches inside of Elasticsearch. :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 963428a45..1cda9f1e1 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -45,8 +45,14 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 399702412..a284f40a8 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1280,6 +1280,7 @@ async def exists_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ @@ -1300,6 +1301,9 @@ async def exists_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1325,6 +1329,8 @@ async def exists_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1349,7 +1355,7 @@ async def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -1868,6 +1874,7 @@ async def get_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1889,6 +1896,9 @@ async def get_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1916,6 +1926,8 @@ async def get_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -3678,8 +3690,8 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 29906c000..b7fd1b7a3 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def delete( self, *, @@ -100,7 +93,6 @@ async def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, @@ -159,7 +151,6 @@ async def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) async def inference( self, *, @@ -246,7 +237,6 @@ async def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) async def put( self, *, diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index f8fa9d3a3..8fbb6876b 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -38,7 +38,8 @@ async def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ async def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ async def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ async def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 10774d761..589ad8d7d 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 884d2a7ab..a905a1f73 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -37,7 +37,7 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ async def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ async def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ async def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ async def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ async def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index a376d5296..c041681e9 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -39,7 +39,7 @@ async def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ async def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ async def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ async def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ async def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index 153c552af..ee6e65713 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -36,7 +36,7 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ async def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ async def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ async def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ async def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ async def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index e67e2e1ee..8a20bda38 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -870,7 +870,7 @@ def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2272,7 +2272,26 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3077,6 +3096,7 @@ def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3111,6 +3131,10 @@ def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3133,6 +3157,8 @@ def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 1dbca1afa..96138d26c 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -145,6 +145,7 @@ def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -268,7 +273,6 @@ def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -282,7 +286,6 @@ def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -375,9 +378,6 @@ def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -392,10 +392,6 @@ def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -404,13 +400,13 @@ def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -507,16 +503,12 @@ def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -529,8 +521,6 @@ def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index b271100c4..dbf2495f1 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -33,7 +33,9 @@ def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index b8af46650..e7028252d 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -140,9 +140,10 @@ def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of the number of shards allocated to each data node and their - disk space. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. + Get shard allocation information. Get a snapshot of the number of shards allocated + to each data node and their disk space. IMPORTANT: cat APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. ``_ @@ -388,10 +389,11 @@ def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the amount of heap memory currently used by the field data cache on every - data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes stats API. + Get field data cache information. Get the amount of heap memory currently used + by the field data cache on every data node in the cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes stats API. ``_ @@ -469,17 +471,17 @@ def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the cluster health API. This API is often used to check malfunctioning clusters. - To help you track cluster health alongside log files and alerting systems, the - API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but - includes no date information; `Unix epoch time`, which is machine-sortable and - includes date information. The latter format is useful for cluster recoveries - that take multiple days. You can use the cat health API to verify cluster health - across multiple nodes. You also can use the API to track the recovery of a large - cluster over a longer period of time. + Get the cluster health status. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. To help you track cluster + health alongside log files and alerting systems, the API returns timestamps in + two formats: `HH:MM:SS`, which is human-readable but includes no date information; + `Unix epoch time`, which is machine-sortable and includes date information. The + latter format is useful for cluster recoveries that take multiple days. You can + use the cat health API to verify cluster health across multiple nodes. You also + can use the API to track the recovery of a large cluster over a longer period + of time. ``_ @@ -733,10 +735,10 @@ def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node, including the ID, bound IP address, - and name. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes info API. + Get master node information. Get information about the master node, including + the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for + human consumption using the command line or Kibana console. They are not intended + for use by applications. For application consumption, use the nodes info API. ``_ @@ -1713,10 +1715,10 @@ def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node attribute information. Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -1791,10 +1793,10 @@ def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node information. Get information about the nodes in a cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the nodes info API. ``_ @@ -1870,10 +1872,10 @@ def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-level changes that have not yet been executed. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the pending cluster tasks API. + Get pending task information. Get information about cluster-level changes that + have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the pending cluster tasks API. ``_ @@ -1944,10 +1946,10 @@ def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get plugin information. Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -2023,14 +2025,14 @@ def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about ongoing and completed shard recoveries. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or syncing a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - For data streams, the API returns information about the stream’s backing indices. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the index recovery API. + Get shard recovery information. Get information about ongoing and completed shard + recoveries. Shard recovery is the process of initializing a shard copy, such + as restoring a primary shard from a snapshot or syncing a replica shard from + a primary shard. When a shard recovery completes, the recovered shard is available + for search and indexing. For data streams, the API returns information about + the stream’s backing indices. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the index recovery API. ``_ @@ -2112,10 +2114,10 @@ def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - get snapshot repository API. + Get snapshot repository information. Get a list of snapshot repositories for + a cluster. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot repository API. ``_ @@ -2184,11 +2186,11 @@ def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the backing indices. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the index segments API. + Get segment information. Get low-level information about the Lucene segments + in index shards. For data streams, the API returns information about the backing + indices. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the index segments API. ``_ @@ -2273,10 +2275,10 @@ def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the shards in a cluster. For data streams, the API - returns information about the backing indices. IMPORTANT: cat APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + Get shard information. Get information about the shards in a cluster. For data + streams, the API returns information about the backing indices. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. ``_ @@ -2353,11 +2355,11 @@ def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the snapshots stored in one or more repositories. A - snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get snapshot API. + Get snapshot information Get information about the snapshots stored in one or + more repositories. A snapshot is a backup of an index or running Elasticsearch + cluster. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot API. ``_ @@ -2438,10 +2440,10 @@ def tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about tasks currently executing in the cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the task management API. + Get task information. Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the task management API. ``_ @@ -2521,11 +2523,11 @@ def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index templates in a cluster. You can use index templates - to apply index settings and field mappings to new indices at creation. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get index template API. + Get index template information. Get information about the index templates in + a cluster. You can use index templates to apply index settings and field mappings + to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index template API. ``_ @@ -2607,11 +2609,11 @@ def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns thread pool statistics for each node in a cluster. Returned information - includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get thread pool statistics. Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. ``_ @@ -2862,7 +2864,7 @@ def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transforms. Returns configuration and usage information about transforms. + Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 2df719b90..01a82144d 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -44,7 +44,13 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 6a855c402..766134939 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -77,7 +77,7 @@ def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 63ef319fb..55d9a6d62 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -36,8 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -225,7 +225,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 19d8c71d5..6670f262a 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -68,7 +68,8 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 0cd678dbd..786ce206b 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -46,8 +46,8 @@ def global_checkpoints( wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current global checkpoints for an index. This API is design for internal - use by the fleet server project. + Get global checkpoints. Get the current global checkpoints for an index. This + API is designed for internal use by the Fleet server project. ``_ @@ -132,10 +132,9 @@ def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) - with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) - API. However, similar to the fleet search API, it supports the wait_for_checkpoints - parameter. + Run multiple Fleet searches. Run several Fleet searches with a single API request. + The API follows the same structure as the multi search API. However, similar + to the Fleet search API, it supports the `wait_for_checkpoints` parameter. :param searches: :param index: A single target to search. If the target is an index alias, it @@ -377,9 +376,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The purpose of the fleet search api is to provide a search api where the search - will only be executed after provided checkpoint has been processed and is visible - for searches inside of Elasticsearch. + Run a Fleet search. The purpose of the Fleet search API is to provide an API + where the search will be run only after the provided checkpoint has been processed + and is visible for searches inside of Elasticsearch. :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index f411f3242..82b95096f 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -45,8 +45,14 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index b0dae49b4..2d5368773 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1280,6 +1280,7 @@ def exists_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ @@ -1300,6 +1301,9 @@ def exists_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1325,6 +1329,8 @@ def exists_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1349,7 +1355,7 @@ def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -1868,6 +1874,7 @@ def get_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1889,6 +1896,9 @@ def get_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1916,6 +1926,8 @@ def get_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -3678,8 +3690,8 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 780db0aec..2fc2a8de6 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def delete( self, *, @@ -100,7 +93,6 @@ def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, @@ -159,7 +151,6 @@ def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) def inference( self, *, @@ -246,7 +237,6 @@ def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) def put( self, *, diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 2874acea5..445b5fe61 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -38,7 +38,8 @@ def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 97871cb53..0a6a4af65 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 48d4ae70a..24d3b2826 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -37,7 +37,7 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index bd8afc05e..bf190210a 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -39,7 +39,7 @@ def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index 9e2b66ee6..453a85a7d 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -36,7 +36,7 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_