From 29c6445dcce616f176f66118f07b4a776e5ee5c2 Mon Sep 17 00:00:00 2001 From: Raman Saparkhan <109868980+roma2023@users.noreply.github.com> Date: Fri, 15 Sep 2023 10:07:52 +0300 Subject: [PATCH 01/21] [CCI][GUIDE] Minor fixes to poetry docs (#494) * minor fixes to poetry docs Signed-off-by: Raman Saparkhan * updated CHANGELOG.md Signed-off-by: Raman Saparkhan * updated CHANGELOG.md Signed-off-by: Raman Saparkhan * fixed CHANGELOG.md Signed-off-by: Raman Saparkhan --------- Signed-off-by: Raman Saparkhan --- CHANGELOG.md | 1 + guides/plugins/knn.md | 6 +++--- samples/README.md | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 040fb8ab..762f2f5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Include parsed error info in `TransportError` in async connections ([#226](https://github.com/opensearch-project/opensearch-py/pull/226)) - Enhanced existing API generator to use OpenSearch OpenAPI spec ([#412](https://github.com/opensearch-project/opensearch-py/pull/412)) - Fix crash when attempting to authenticate with an async connection ([#424](https://github.com/opensearch-project/opensearch-py/pull/424)) +- Fixed poetry run command issue on Windows/Mac machines ([#494](https://github.com/opensearch-project/opensearch-py/pull/494)) ### Security - Fixed CVE-2022-23491 reported in opensearch-dsl-py ([#295](https://github.com/opensearch-project/opensearch-py/pull/295)) ### Dependencies diff --git a/guides/plugins/knn.md b/guides/plugins/knn.md index 7a3e6977..a7775c88 100644 --- a/guides/plugins/knn.md +++ b/guides/plugins/knn.md @@ -15,7 +15,7 @@ Short for k-nearest neighbors, the k-NN plugin enables users to search for the k In the following example we create a 5-dimensional k-NN index with random data. You can find a synchronous version of this working sample in [samples/knn/knn-basics.py](../../samples/knn/knn-basics.py) and an asynchronous one in [samples/knn/knn-async-basics.py](../../samples/knn/knn-async-basics.py). ```bash -$ poetry run knn/knn-basics.py +$ poetry run python knn/knn-basics.py Searching for [0.61, 0.05, 0.16, 0.75, 0.49] ... {'_index': 'my-index', '_id': '3', '_score': 0.9252405, '_source': {'values': [0.64, 0.3, 0.27, 0.68, 0.51]}} @@ -96,7 +96,7 @@ for hit in results["hits"]["hits"]: In [the boolean-filter.py sample](../../samples/knn/knn-boolean-filter.py) we create a 5-dimensional k-NN index with random data and a `metadata` field that contains a book genre (e.g. `fiction`). The search query is a k-NN search filtered by genre. The filter clause is outside the k-NN query clause and is applied after the k-NN search. ```bash -$ poetry run knn/knn-boolean-filter.py +$ poetry run python knn/knn-boolean-filter.py Searching for [0.08, 0.42, 0.04, 0.76, 0.41] with the 'romance' genre ... @@ -109,7 +109,7 @@ Searching for [0.08, 0.42, 0.04, 0.76, 0.41] with the 'romance' genre ... In [the lucene-filter.py sample](../../samples/knn/knn-efficient-filter.py) we implement the example in [the k-NN documentation](https://opensearch.org/docs/latest/search-plugins/knn/filter-search-knn/), which creates an index that uses the Lucene engine and HNSW as the method in the mapping, containing hotel location and parking data, then search for the top three hotels near the location with the coordinates `[5, 4]` that are rated between 8 and 10, inclusive, and provide parking. ```bash -$ poetry run knn/knn-efficient-filter.py +$ poetry run python knn/knn-efficient-filter.py {'_index': 'hotels-index', '_id': '3', '_score': 0.72992706, '_source': {'location': [4.9, 3.4], 'parking': 'true', 'rating': 9}} {'_index': 'hotels-index', '_id': '6', '_score': 0.3012048, '_source': {'location': [6.4, 3.4], 'parking': 'true', 'rating': 9}} diff --git a/samples/README.md b/samples/README.md index ad431cd8..b6e72f2c 100644 --- a/samples/README.md +++ b/samples/README.md @@ -15,5 +15,5 @@ Install [poetry](https://python-poetry.org/docs/). ``` poetry install -poetry run hello/hello.py +poetry run python hello/hello.py ``` From 69750e82d8d3e2522d94984ce373a86da821f399 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Fri, 15 Sep 2023 14:51:55 -0400 Subject: [PATCH 02/21] Fix: typo. (#497) Signed-off-by: dblock --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4b672604..3287c11b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,7 +16,7 @@ jobs: - { os: 'ubuntu-latest', python-version: "3.11" } - { os: 'macos-latest', python-version: "3.11" } - name: test (ruby=${{ matrix.entry.os }}, python=${{ matrix.entry.python-version }}) + name: test (os=${{ matrix.entry.os }}, python=${{ matrix.entry.python-version }}) continue-on-error: ${{ matrix.entry.experimental || false }} runs-on: ${{ matrix.entry.os }} steps: From c6c7df54613fc1a12ce429201ee031b5b4d55e0b Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 26 Sep 2023 09:46:18 -0700 Subject: [PATCH 03/21] Updated APIs to match other clients and opensearch openapi spec (#502) Signed-off-by: saimedhi --- CHANGELOG.md | 2 + opensearchpy/_async/client/__init__.py | 89 +++++++----- opensearchpy/_async/client/__init__.pyi | 35 +++-- opensearchpy/_async/client/_patch.py | 135 ++++++++++++++++++ opensearchpy/_async/client/_patch.pyi | 70 +++++++++ opensearchpy/_async/client/security.py | 19 +-- opensearchpy/_async/client/security.pyi | 39 ++++- opensearchpy/client/__init__.py | 88 +++++++----- opensearchpy/client/__init__.pyi | 35 +++-- opensearchpy/client/_patch.py | 133 +++++++++++++++++ opensearchpy/client/_patch.pyi | 69 +++++++++ opensearchpy/client/security.py | 19 +-- opensearchpy/client/security.pyi | 43 ++++-- .../test_security_plugin.py | 52 +++++++ .../test_client/test_point_in_time.py | 17 +++ .../test_security_plugin.py | 52 +++++++ utils/generate-api.py | 16 ++- 17 files changed, 786 insertions(+), 127 deletions(-) create mode 100644 opensearchpy/_async/client/_patch.py create mode 100644 opensearchpy/_async/client/_patch.pyi create mode 100644 opensearchpy/client/_patch.py create mode 100644 opensearchpy/client/_patch.pyi diff --git a/CHANGELOG.md b/CHANGELOG.md index 762f2f5a..5de04d90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,10 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Added generating imports and headers to API generator ([#467](https://github.com/opensearch-project/opensearch-py/pull/467)) +- Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Changed ### Deprecated +- Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed ### Fixed ### Security diff --git a/opensearchpy/_async/client/__init__.py b/opensearchpy/_async/client/__init__.py index 57f56b0f..2440b291 100644 --- a/opensearchpy/_async/client/__init__.py +++ b/opensearchpy/_async/client/__init__.py @@ -172,6 +172,12 @@ def default(self, obj): """ + from ._patch import ( + create_point_in_time, + delete_point_in_time, + list_all_point_in_time, + ) + def __init__(self, hosts=None, transport_class=AsyncTransport, **kwargs): """ :arg hosts: list of nodes, or a single node, we should connect to. @@ -1955,64 +1961,73 @@ async def get_script_languages(self, params=None, headers=None): "GET", "/_script_language", params=params, headers=headers ) - @query_params() - async def list_all_point_in_time(self, params=None, headers=None): + @query_params( + "allow_partial_pit_creation", + "expand_wildcards", + "keep_alive", + "preference", + "routing", + ) + async def create_pit(self, index, params=None, headers=None): """ - Returns the list of point in times which are alive + Creates point in time context. + + + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg allow_partial_pit_creation: Allow if point in time can be + created with partial failures. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg keep_alive: Specify the keep alive for point in time. + :arg preference: Specify the node or shard the operation should + be performed on. + :arg routing: Comma-separated list of specific routing values. """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + return await self.transport.perform_request( - "GET", - _make_path("_search", "point_in_time", "_all"), + "POST", + _make_path(index, "_search", "point_in_time"), params=params, headers=headers, ) @query_params() - async def delete_point_in_time( - self, body=None, all=False, params=None, headers=None - ): + async def delete_all_pits(self, params=None, headers=None): """ - Delete a point in time - + Deletes all active point in time searches. - :arg body: a point-in-time id to delete - :arg all: set it to `True` to delete all alive point in time. """ - path = ( - _make_path("_search", "point_in_time", "_all") - if all - else _make_path("_search", "point_in_time") - ) return await self.transport.perform_request( - "DELETE", path, params=params, headers=headers, body=body + "DELETE", "/_search/point_in_time/_all", params=params, headers=headers ) - @query_params( - "expand_wildcards", "ignore_unavailable", "keep_alive", "preference", "routing" - ) - async def create_point_in_time(self, index=None, params=None, headers=None): + @query_params() + async def delete_pit(self, body=None, params=None, headers=None): """ - Create a point in time that can be used in subsequent searches + Deletes one or more point in time searches based on the IDs passed. - :arg index: A comma-separated list of index names to create point - in time; use `_all` or empty string to perform the operation on all - indices - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg keep_alive: Specific the time to live for the point in time - :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg routing: Specific routing value + :arg body: a point-in-time id to delete """ return await self.transport.perform_request( - "POST", - _make_path(index, "_search", "point_in_time"), + "DELETE", + "/_search/point_in_time", params=params, headers=headers, + body=body, + ) + + @query_params() + async def get_all_pits(self, params=None, headers=None): + """ + Lists all active point in time searches. + """ + return await self.transport.perform_request( + "GET", "/_search/point_in_time/_all", params=params, headers=headers ) @query_params() diff --git a/opensearchpy/_async/client/__init__.pyi b/opensearchpy/_async/client/__init__.pyi index 27a47ed9..a016d791 100644 --- a/opensearchpy/_async/client/__init__.pyi +++ b/opensearchpy/_async/client/__init__.pyi @@ -1057,7 +1057,29 @@ class AsyncOpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def list_all_point_in_time( + async def create_pit( + self, + index: Any, + *, + allow_partial_pit_creation: Optional[Any] = ..., + expand_wildcards: Optional[Any] = ..., + keep_alive: Optional[Any] = ..., + preference: Optional[Any] = ..., + routing: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def delete_all_pits( self, *, pretty: Optional[bool] = ..., @@ -1073,11 +1095,10 @@ class AsyncOpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def delete_point_in_time( + async def delete_pit( self, *, body: Optional[Any] = ..., - all: Optional[bool] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1091,15 +1112,9 @@ class AsyncOpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def create_point_in_time( + async def get_all_pits( self, *, - index: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - keep_alive: Optional[Any] = ..., - preference: Optional[Any] = ..., - routing: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., diff --git a/opensearchpy/_async/client/_patch.py b/opensearchpy/_async/client/_patch.py new file mode 100644 index 00000000..b1b00942 --- /dev/null +++ b/opensearchpy/_async/client/_patch.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +import warnings + +from .utils import SKIP_IN_PATH, query_params + + +@query_params() +async def list_all_point_in_time(self, params=None, headers=None): + """ + Returns the list of active point in times searches + + .. warning:: + + This API will be removed in a future version + Use 'get_all_pits' API instead. + + """ + warnings.warn( + "The 'list_all_point_in_time' API is deprecated and will be removed in a future version. Use 'get_all_pits' API instead.", + DeprecationWarning, + ) + + return await self.get_all_pits(params=params, headers=headers) + + +@query_params( + "expand_wildcards", "ignore_unavailable", "keep_alive", "preference", "routing" +) +async def create_point_in_time(self, index, params=None, headers=None): + """ + Create a point in time that can be used in subsequent searches + + + :arg index: A comma-separated list of index names to open point + in time; use `_all` or empty string to perform the operation on all + indices + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg keep_alive: Specific the time to live for the point in time + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg routing: Specific routing value + + .. warning:: + + This API will be removed in a future version + Use 'create_pit' API instead. + + """ + warnings.warn( + "The 'create_point_in_time' API is deprecated and will be removed in a future version. Use 'create_pit' API instead.", + DeprecationWarning, + ) + + return await self.create_pit(index=index, params=params, headers=headers) + + +@query_params() +async def delete_point_in_time(self, body=None, all=False, params=None, headers=None): + """ + Delete a point in time + + + :arg body: a point-in-time id to delete + :arg all: set it to `True` to delete all alive point in time. + + .. warning:: + + This API will be removed in a future version + Use 'delete_all_pits' or 'delete_pit' API instead. + + """ + warnings.warn( + "The 'delete_point_in_time' API is deprecated and will be removed in a future version. Use 'delete_all_pits' or 'delete_pit' API instead.", + DeprecationWarning, + ) + + if all: + return await self.delete_all_pits(params=params, headers=headers) + else: + return await self.delete_pit(body=body, params=params, headers=headers) + + +@query_params() +async def health_check(self, params=None, headers=None): + """ + Checks to see if the Security plugin is up and running. + + .. warning:: + + This API will be removed in a future version + Use 'health' API instead. + + """ + warnings.warn( + "The 'health_check' API in security client is deprecated and will be removed in a future version. Use 'health' API instead.", + DeprecationWarning, + ) + + return await self.health(params=params, headers=headers) + + +@query_params() +async def update_audit_config(self, body, params=None, headers=None): + """ + A PUT call updates the audit configuration. + + .. warning:: + + This API will be removed in a future version + Use 'update_audit_configuration' API instead. + + """ + warnings.warn( + "The 'update_audit_config' API in security client is deprecated and will be removed in a future version. Use 'update_audit_configuration' API instead.", + DeprecationWarning, + ) + + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return await self.update_audit_configuration( + params=params, headers=headers, body=body + ) diff --git a/opensearchpy/_async/client/_patch.pyi b/opensearchpy/_async/client/_patch.pyi new file mode 100644 index 00000000..1912c180 --- /dev/null +++ b/opensearchpy/_async/client/_patch.pyi @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +from typing import Any, Collection, MutableMapping, Optional, Tuple, Type, Union + +async def list_all_point_in_time( + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +async def create_point_in_time( + *, + index: Optional[Any] = ..., + expand_wildcards: Optional[Any] = ..., + ignore_unavailable: Optional[Any] = ..., + keep_alive: Optional[Any] = ..., + preference: Optional[Any] = ..., + routing: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +async def delete_point_in_time( + *, + body: Optional[Any] = ..., + all: Optional[bool] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +async def health_check( + params: Union[Any, None] = ..., headers: Union[Any, None] = ... +) -> Union[bool, Any]: ... +async def update_audit_config( + body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... +) -> Union[bool, Any]: ... diff --git a/opensearchpy/_async/client/security.py b/opensearchpy/_async/client/security.py index 65021765..bc8e8671 100644 --- a/opensearchpy/_async/client/security.py +++ b/opensearchpy/_async/client/security.py @@ -7,10 +7,12 @@ # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. -from ..client.utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params +from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class SecurityClient(NamespacedClient): + from ._patch import health_check, update_audit_config + @query_params() async def get_account_details(self, params=None, headers=None): """ @@ -648,15 +650,13 @@ async def flush_cache(self, params=None, headers=None): ) @query_params() - async def health_check(self, params=None, headers=None): + async def health(self, params=None, headers=None): """ Checks to see if the Security plugin is up and running. + """ return await self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "health"), - params=params, - headers=headers, + "GET", "/_plugins/_security/health", params=params, headers=headers ) @query_params() @@ -672,16 +672,17 @@ async def get_audit_configuration(self, params=None, headers=None): ) @query_params() - async def update_audit_config(self, body, params=None, headers=None): + async def update_audit_configuration(self, body, params=None, headers=None): """ - A PUT call updates the audit configuration. + Updates the audit configuration. + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PUT", - _make_path("_opendistro", "_security", "api", "audit", "config"), + "/_plugins/_security/api/audit/config", params=params, headers=headers, body=body, diff --git a/opensearchpy/_async/client/security.pyi b/opensearchpy/_async/client/security.pyi index 77239296..7840445a 100644 --- a/opensearchpy/_async/client/security.pyi +++ b/opensearchpy/_async/client/security.pyi @@ -6,9 +6,9 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. -from typing import Any, Union +from typing import Any, Collection, MutableMapping, Optional, Tuple, Union -from ..client.utils import NamespacedClient as NamespacedClient +from .utils import NamespacedClient as NamespacedClient class SecurityClient(NamespacedClient): async def get_account_details( @@ -192,14 +192,41 @@ class SecurityClient(NamespacedClient): async def flush_cache( self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... ) -> Any: ... - async def health_check( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + async def health( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_audit_configuration( self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... ) -> Any: ... - async def update_audit_config( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + async def update_audit_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_audit_configuration( self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... diff --git a/opensearchpy/client/__init__.py b/opensearchpy/client/__init__.py index 1fe0c959..8f976879 100644 --- a/opensearchpy/client/__init__.py +++ b/opensearchpy/client/__init__.py @@ -173,6 +173,12 @@ def default(self, obj): """ + from ._patch import ( + create_point_in_time, + delete_point_in_time, + list_all_point_in_time, + ) + def __init__(self, hosts=None, transport_class=Transport, **kwargs): """ :arg hosts: list of nodes, or a single node, we should connect to. @@ -1955,63 +1961,73 @@ def get_script_languages(self, params=None, headers=None): "GET", "/_script_language", params=params, headers=headers ) - @query_params() - def list_all_point_in_time(self, params=None, headers=None): + @query_params( + "allow_partial_pit_creation", + "expand_wildcards", + "keep_alive", + "preference", + "routing", + ) + def create_pit(self, index, params=None, headers=None): """ - Returns the list of active point in times searches + Creates point in time context. + + + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg allow_partial_pit_creation: Allow if point in time can be + created with partial failures. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg keep_alive: Specify the keep alive for point in time. + :arg preference: Specify the node or shard the operation should + be performed on. + :arg routing: Comma-separated list of specific routing values. """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + return self.transport.perform_request( - "GET", - _make_path("_search", "point_in_time", "_all"), + "POST", + _make_path(index, "_search", "point_in_time"), params=params, headers=headers, ) @query_params() - def delete_point_in_time(self, body=None, all=False, params=None, headers=None): + def delete_all_pits(self, params=None, headers=None): """ - Delete a point in time - + Deletes all active point in time searches. - :arg body: a point-in-time id to delete - :arg all: set it to `True` to delete all alive point in time. """ - - path = ( - _make_path("_search", "point_in_time", "_all") - if all - else _make_path("_search", "point_in_time") - ) return self.transport.perform_request( - "DELETE", path, params=params, headers=headers, body=body + "DELETE", "/_search/point_in_time/_all", params=params, headers=headers ) - @query_params( - "expand_wildcards", "ignore_unavailable", "keep_alive", "preference", "routing" - ) - def create_point_in_time(self, index=None, params=None, headers=None): + @query_params() + def delete_pit(self, body=None, params=None, headers=None): """ - Create a point in time that can be used in subsequent searches + Deletes one or more point in time searches based on the IDs passed. - :arg index: A comma-separated list of index names to open point - in time; use `_all` or empty string to perform the operation on all - indices - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg keep_alive: Specific the time to live for the point in time - :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg routing: Specific routing value + :arg body: a point-in-time id to delete """ return self.transport.perform_request( - "POST", - _make_path(index, "_search", "point_in_time"), + "DELETE", + "/_search/point_in_time", params=params, headers=headers, + body=body, + ) + + @query_params() + def get_all_pits(self, params=None, headers=None): + """ + Lists all active point in time searches. + """ + return self.transport.perform_request( + "GET", "/_search/point_in_time/_all", params=params, headers=headers ) @query_params() diff --git a/opensearchpy/client/__init__.pyi b/opensearchpy/client/__init__.pyi index 64f21ca7..e1d1e359 100644 --- a/opensearchpy/client/__init__.pyi +++ b/opensearchpy/client/__init__.pyi @@ -1054,7 +1054,29 @@ class OpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def list_all_point_in_time( + def create_pit( + self, + index: Any, + *, + allow_partial_pit_creation: Optional[Any] = ..., + expand_wildcards: Optional[Any] = ..., + keep_alive: Optional[Any] = ..., + preference: Optional[Any] = ..., + routing: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def delete_all_pits( self, *, pretty: Optional[bool] = ..., @@ -1070,11 +1092,10 @@ class OpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def delete_point_in_time( + def delete_pit( self, *, body: Optional[Any] = ..., - all: Optional[bool] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1088,15 +1109,9 @@ class OpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def create_point_in_time( + def get_all_pits( self, *, - index: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - keep_alive: Optional[Any] = ..., - preference: Optional[Any] = ..., - routing: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., diff --git a/opensearchpy/client/_patch.py b/opensearchpy/client/_patch.py new file mode 100644 index 00000000..d92eae5a --- /dev/null +++ b/opensearchpy/client/_patch.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +import warnings + +from .utils import SKIP_IN_PATH, query_params + + +@query_params() +def list_all_point_in_time(self, params=None, headers=None): + """ + Returns the list of active point in times searches + + .. warning:: + + This API will be removed in a future version + Use 'get_all_pits' API instead. + + """ + warnings.warn( + "The 'list_all_point_in_time' API is deprecated and will be removed in a future version. Use 'get_all_pits' API instead.", + DeprecationWarning, + ) + + return self.get_all_pits(params=params, headers=headers) + + +@query_params( + "expand_wildcards", "ignore_unavailable", "keep_alive", "preference", "routing" +) +def create_point_in_time(self, index, params=None, headers=None): + """ + Create a point in time that can be used in subsequent searches + + + :arg index: A comma-separated list of index names to open point + in time; use `_all` or empty string to perform the operation on all + indices + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg keep_alive: Specific the time to live for the point in time + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg routing: Specific routing value + + .. warning:: + + This API will be removed in a future version + Use 'create_pit' API instead. + + """ + warnings.warn( + "The 'create_point_in_time' API is deprecated and will be removed in a future version. Use 'create_pit' API instead.", + DeprecationWarning, + ) + + return self.create_pit(index=index, params=params, headers=headers) + + +@query_params() +def delete_point_in_time(self, body=None, all=False, params=None, headers=None): + """ + Delete a point in time + + + :arg body: a point-in-time id to delete + :arg all: set it to `True` to delete all alive point in time. + + .. warning:: + + This API will be removed in a future version + Use 'delete_all_pits' or 'delete_pit' API instead. + + """ + warnings.warn( + "The 'delete_point_in_time' API is deprecated and will be removed in a future version. Use 'delete_all_pits' or 'delete_pit' API instead.", + DeprecationWarning, + ) + + if all: + return self.delete_all_pits(params=params, headers=headers) + else: + return self.delete_pit(body=body, params=params, headers=headers) + + +@query_params() +def health_check(self, params=None, headers=None): + """ + Checks to see if the Security plugin is up and running. + + .. warning:: + + This API will be removed in a future version + Use 'health' API instead. + + """ + warnings.warn( + "The 'health_check' API in security client is deprecated and will be removed in a future version. Use 'health' API instead.", + DeprecationWarning, + ) + + return self.health(params=params, headers=headers) + + +@query_params() +def update_audit_config(self, body, params=None, headers=None): + """ + A PUT call updates the audit configuration. + + .. warning:: + + This API will be removed in a future version + Use 'update_audit_configuration' API instead. + + """ + warnings.warn( + "The 'update_audit_config' API in security client is deprecated and will be removed in a future version. Use 'update_audit_configuration' API instead.", + DeprecationWarning, + ) + + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.update_audit_configuration(params=params, headers=headers, body=body) diff --git a/opensearchpy/client/_patch.pyi b/opensearchpy/client/_patch.pyi new file mode 100644 index 00000000..be6e12a0 --- /dev/null +++ b/opensearchpy/client/_patch.pyi @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +from typing import Any, Collection, MutableMapping, Optional, Tuple, Type, Union + +def list_all_point_in_time( + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +def create_point_in_time( + *, + index: Optional[Any] = ..., + expand_wildcards: Optional[Any] = ..., + ignore_unavailable: Optional[Any] = ..., + keep_alive: Optional[Any] = ..., + preference: Optional[Any] = ..., + routing: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +def delete_point_in_time( + *, + body: Optional[Any] = ..., + all: Optional[bool] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., +) -> Any: ... +def health_check( + params: Union[Any, None] = ..., headers: Union[Any, None] = ... +) -> Union[bool, Any]: ... +def update_audit_config( + body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... +) -> Union[bool, Any]: ... diff --git a/opensearchpy/client/security.py b/opensearchpy/client/security.py index 288f6676..32a362b3 100644 --- a/opensearchpy/client/security.py +++ b/opensearchpy/client/security.py @@ -7,10 +7,12 @@ # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. -from ..client.utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params +from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class SecurityClient(NamespacedClient): + from ._patch import health_check, update_audit_config + @query_params() def get_account_details(self, params=None, headers=None): """ @@ -644,15 +646,13 @@ def flush_cache(self, params=None, headers=None): ) @query_params() - def health_check(self, params=None, headers=None): + def health(self, params=None, headers=None): """ Checks to see if the Security plugin is up and running. + """ return self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "health"), - params=params, - headers=headers, + "GET", "/_plugins/_security/health", params=params, headers=headers ) @query_params() @@ -668,16 +668,17 @@ def get_audit_configuration(self, params=None, headers=None): ) @query_params() - def update_audit_config(self, body, params=None, headers=None): + def update_audit_configuration(self, body, params=None, headers=None): """ - A PUT call updates the audit configuration. + Updates the audit configuration. + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PUT", - _make_path("_opendistro", "_security", "api", "audit", "config"), + "/_plugins/_security/api/audit/config", params=params, headers=headers, body=body, diff --git a/opensearchpy/client/security.pyi b/opensearchpy/client/security.pyi index de50b8b2..c729d5d3 100644 --- a/opensearchpy/client/security.pyi +++ b/opensearchpy/client/security.pyi @@ -6,9 +6,9 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. -from typing import Any, Union +from typing import Any, Collection, MutableMapping, Optional, Tuple, Union -from ..client.utils import NamespacedClient as NamespacedClient +from .utils import NamespacedClient as NamespacedClient class SecurityClient(NamespacedClient): def get_account_details( @@ -192,15 +192,42 @@ class SecurityClient(NamespacedClient): def flush_cache( self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... ) -> Union[bool, Any]: ... - def health_check( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + def health( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def get_audit_configuration( self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... ) -> Union[bool, Any]: ... - def update_audit_config( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + def update_audit_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def patch_audit_configuration( self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... ) -> Union[bool, Any]: ... diff --git a/test_opensearchpy/test_async/test_server_secured/test_security_plugin.py b/test_opensearchpy/test_async/test_server_secured/test_security_plugin.py index 39189c21..9b1f7a5f 100644 --- a/test_opensearchpy/test_async/test_server_secured/test_security_plugin.py +++ b/test_opensearchpy/test_async/test_server_secured/test_security_plugin.py @@ -174,3 +174,55 @@ async def test_delete_user(self): # Try fetching the user with self.assertRaises(NotFoundError): response = await self.client.security.get_user(self.USER_NAME) + + async def test_health_check(self): + response = await self.client.security.health_check() + self.assertNotIn("errors", response) + self.assertEqual("UP", response.get("status")) + + async def test_health(self): + response = await self.client.security.health() + self.assertNotIn("errors", response) + self.assertEqual("UP", response.get("status")) + + AUDIT_CONFIG_SETTINGS = { + "enabled": True, + "audit": { + "ignore_users": [], + "ignore_requests": [], + "disabled_rest_categories": ["AUTHENTICATED", "GRANTED_PRIVILEGES"], + "disabled_transport_categories": ["AUTHENTICATED", "GRANTED_PRIVILEGES"], + "log_request_body": False, + "resolve_indices": False, + "resolve_bulk_requests": False, + "exclude_sensitive_headers": True, + "enable_transport": False, + "enable_rest": True, + }, + "compliance": { + "enabled": True, + "write_log_diffs": False, + "read_watched_fields": {}, + "read_ignore_users": [], + "write_watched_indices": [], + "write_ignore_users": [], + "read_metadata_only": True, + "write_metadata_only": True, + "external_config": False, + "internal_config": True, + }, + } + + async def test_update_audit_config(self): + response = await self.client.security.update_audit_config( + body=self.AUDIT_CONFIG_SETTINGS + ) + self.assertNotIn("errors", response) + self.assertEqual("OK", response.get("status")) + + async def test_update_audit_configuration(self): + response = await self.client.security.update_audit_configuration( + body=self.AUDIT_CONFIG_SETTINGS + ) + self.assertNotIn("errors", response) + self.assertEqual("OK", response.get("status")) diff --git a/test_opensearchpy/test_client/test_point_in_time.py b/test_opensearchpy/test_client/test_point_in_time.py index 53742dbe..e8546484 100644 --- a/test_opensearchpy/test_client/test_point_in_time.py +++ b/test_opensearchpy/test_client/test_point_in_time.py @@ -27,3 +27,20 @@ def test_delete_all_point_in_time(self): def test_list_all_point_in_time(self): self.client.list_all_point_in_time() self.assert_url_called("GET", "/_search/point_in_time/_all") + + def test_create_pit(self): + index_name = "test-index" + self.client.create_pit(index=index_name) + self.assert_url_called("POST", "/test-index/_search/point_in_time") + + def test_delete_pit(self): + self.client.delete_pit(body={"pit_id": ["Sample-PIT-ID"]}) + self.assert_url_called("DELETE", "/_search/point_in_time") + + def test_delete_all_pits(self): + self.client.delete_all_pits() + self.assert_url_called("DELETE", "/_search/point_in_time/_all") + + def test_get_all_pits(self): + self.client.get_all_pits() + self.assert_url_called("GET", "/_search/point_in_time/_all") diff --git a/test_opensearchpy/test_server_secured/test_security_plugin.py b/test_opensearchpy/test_server_secured/test_security_plugin.py index 1f46712a..90283af8 100644 --- a/test_opensearchpy/test_server_secured/test_security_plugin.py +++ b/test_opensearchpy/test_server_secured/test_security_plugin.py @@ -164,3 +164,55 @@ def test_delete_user(self): # Try fetching the user with self.assertRaises(NotFoundError): response = self.client.security.get_user(self.USER_NAME) + + def test_health_check(self): + response = self.client.security.health_check() + self.assertNotIn("errors", response) + self.assertEqual("UP", response.get("status")) + + def test_health(self): + response = self.client.security.health() + self.assertNotIn("errors", response) + self.assertEqual("UP", response.get("status")) + + AUDIT_CONFIG_SETTINGS = { + "enabled": True, + "audit": { + "ignore_users": [], + "ignore_requests": [], + "disabled_rest_categories": ["AUTHENTICATED", "GRANTED_PRIVILEGES"], + "disabled_transport_categories": ["AUTHENTICATED", "GRANTED_PRIVILEGES"], + "log_request_body": False, + "resolve_indices": False, + "resolve_bulk_requests": False, + "exclude_sensitive_headers": True, + "enable_transport": False, + "enable_rest": True, + }, + "compliance": { + "enabled": True, + "write_log_diffs": False, + "read_watched_fields": {}, + "read_ignore_users": [], + "write_watched_indices": [], + "write_ignore_users": [], + "read_metadata_only": True, + "write_metadata_only": True, + "external_config": False, + "internal_config": True, + }, + } + + def test_update_audit_config(self): + response = self.client.security.update_audit_config( + body=self.AUDIT_CONFIG_SETTINGS + ) + self.assertNotIn("errors", response) + self.assertEqual("OK", response.get("status")) + + def test_update_audit_configuration(self): + response = self.client.security.update_audit_configuration( + body=self.AUDIT_CONFIG_SETTINGS + ) + self.assertNotIn("errors", response) + self.assertEqual("OK", response.get("status")) diff --git a/utils/generate-api.py b/utils/generate-api.py index 40aea2ba..f13a41b5 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -120,6 +120,13 @@ def parse_orig(self): for line in content.split("\n"): header_lines.append(line) if line.startswith("class"): + if ( + "security.py" in str(self.filepath) + and not self.filepath.suffix == ".pyi" + ): + header_lines.append( + " from ._patch import health_check, update_audit_config" + ) break self.header = "\n".join(header_lines) self.orders = re.findall( @@ -375,8 +382,12 @@ def method(self): # To adhere to the HTTP RFC we shouldn't send # bodies in GET requests. default_method = self.path["methods"][0] + if self.name == "refresh" or self.name == "flush": + return "POST" if self.body and default_method == "GET" and "POST" in self.path["methods"]: return "POST" + if "POST" and "PUT" in self.path["methods"] and self.name != "bulk": + return "PUT" return default_method @property @@ -437,8 +448,9 @@ def read_modules(): for path in data["paths"]: for x in data["paths"][path]: - data["paths"][path][x].update({"path": path, "method": x}) - list_of_dicts.append(data["paths"][path][x]) + if "deprecated" not in data["paths"][path][x]: + data["paths"][path][x].update({"path": path, "method": x}) + list_of_dicts.append(data["paths"][path][x]) # Update parameters in each endpoint for p in list_of_dicts: From 2feccc22bb8300e9e0d9a4575e2717f2baec17ec Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 26 Sep 2023 09:49:25 -0700 Subject: [PATCH 04/21] Integrated generated APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility (#508) Signed-off-by: saimedhi --- CHANGELOG.md | 1 + opensearchpy/_async/client/tasks.py | 64 +++++++++++++--------------- opensearchpy/_async/client/tasks.pyi | 9 ++++ opensearchpy/client/tasks.py | 64 +++++++++++++--------------- opensearchpy/client/tasks.pyi | 9 ++++ utils/generated_file_headers.txt | 10 ++--- 6 files changed, 84 insertions(+), 73 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5de04d90..e4646f50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added generating imports and headers to API generator ([#467](https://github.com/opensearch-project/opensearch-py/pull/467)) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Changed +- Integrated generated `tasks client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/opensearchpy/_async/client/tasks.py b/opensearchpy/_async/client/tasks.py index 2b49ddc0..212b9e56 100644 --- a/opensearchpy/_async/client/tasks.py +++ b/opensearchpy/_async/client/tasks.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + import warnings from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -45,24 +55,20 @@ async def list(self, params=None, headers=None): Returns a list of tasks. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg actions: A comma-separated list of actions that should be + :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information (default: false) + :arg detailed: Return detailed task information. :arg group_by: Group tasks by nodes or parent/child - relationships Valid choices: nodes, parents, none Default: nodes - :arg nodes: A comma-separated list of node IDs or names to limit + relationships. Valid choices: nodes, parents, none + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. - :arg timeout: Explicit operation timeout - :arg wait_for_completion: Wait for the matching tasks to - complete (default: false) + :arg timeout: Operation timeout. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ return await self.transport.perform_request( "GET", "/_tasks", params=params, headers=headers @@ -74,23 +80,18 @@ async def cancel(self, task_id=None, params=None, headers=None): Cancels a task, if it can be cancelled through an API. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - :arg task_id: Cancel the task with specified task id - (node_id:task_number) - :arg actions: A comma-separated list of actions that should be + (node_id:task_number). + :arg actions: Comma-separated list of actions that should be cancelled. Leave empty to cancel all. - :arg nodes: A comma-separated list of node IDs or names to limit + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. - :arg wait_for_completion: Should the request block until the - cancellation of the task and its descendant tasks is completed. Defaults - to false + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ return await self.transport.perform_request( "POST", @@ -105,16 +106,11 @@ async def get(self, task_id=None, params=None, headers=None): Returns information about a task. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - :arg task_id: Return the task with specified id - (node_id:task_number) - :arg timeout: Explicit operation timeout - :arg wait_for_completion: Wait for the matching tasks to - complete (default: false) + (node_id:task_number). + :arg timeout: Operation timeout. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ if task_id in SKIP_IN_PATH: warnings.warn( diff --git a/opensearchpy/_async/client/tasks.pyi b/opensearchpy/_async/client/tasks.pyi index ae777158..14081a2d 100644 --- a/opensearchpy/_async/client/tasks.pyi +++ b/opensearchpy/_async/client/tasks.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient diff --git a/opensearchpy/client/tasks.py b/opensearchpy/client/tasks.py index fff32dd7..ff76a3a6 100644 --- a/opensearchpy/client/tasks.py +++ b/opensearchpy/client/tasks.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + import warnings from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -45,24 +55,20 @@ def list(self, params=None, headers=None): Returns a list of tasks. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg actions: A comma-separated list of actions that should be + :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information (default: false) + :arg detailed: Return detailed task information. :arg group_by: Group tasks by nodes or parent/child - relationships Valid choices: nodes, parents, none Default: nodes - :arg nodes: A comma-separated list of node IDs or names to limit + relationships. Valid choices: nodes, parents, none + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. - :arg timeout: Explicit operation timeout - :arg wait_for_completion: Wait for the matching tasks to - complete (default: false) + :arg timeout: Operation timeout. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ return self.transport.perform_request( "GET", "/_tasks", params=params, headers=headers @@ -74,23 +80,18 @@ def cancel(self, task_id=None, params=None, headers=None): Cancels a task, if it can be cancelled through an API. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - :arg task_id: Cancel the task with specified task id - (node_id:task_number) - :arg actions: A comma-separated list of actions that should be + (node_id:task_number). + :arg actions: Comma-separated list of actions that should be cancelled. Leave empty to cancel all. - :arg nodes: A comma-separated list of node IDs or names to limit + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. - :arg wait_for_completion: Should the request block until the - cancellation of the task and its descendant tasks is completed. Defaults - to false + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ return self.transport.perform_request( "POST", @@ -105,16 +106,11 @@ def get(self, task_id=None, params=None, headers=None): Returns information about a task. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - :arg task_id: Return the task with specified id - (node_id:task_number) - :arg timeout: Explicit operation timeout - :arg wait_for_completion: Wait for the matching tasks to - complete (default: false) + (node_id:task_number). + :arg timeout: Operation timeout. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. """ if task_id in SKIP_IN_PATH: warnings.warn( diff --git a/opensearchpy/client/tasks.pyi b/opensearchpy/client/tasks.pyi index 3577bae3..50ad69bb 100644 --- a/opensearchpy/client/tasks.pyi +++ b/opensearchpy/client/tasks.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient diff --git a/utils/generated_file_headers.txt b/utils/generated_file_headers.txt index 16c8aba5..135828ce 100644 --- a/utils/generated_file_headers.txt +++ b/utils/generated_file_headers.txt @@ -1,8 +1,8 @@ # ---------------------------------------------------- -# THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. -# -# To contribute, please make necessary modifications to either "Python generator": +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": # https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py -# or "OpenAPI specs": +# or the "OpenSearch API specification" available at: # https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json -# ----------------------------------------------------- \ No newline at end of file +# ----------------------------------------------------- From 7d3c528dff76bddc212084cc213b1090b8713314 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Thu, 28 Sep 2023 20:18:53 -0700 Subject: [PATCH 05/21] Integrated generated ingest client APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility (#513) Signed-off-by: saimedhi --- CHANGELOG.md | 1 + opensearchpy/_async/client/ingest.py | 71 ++++++++++++++------------- opensearchpy/_async/client/ingest.pyi | 32 +++++------- opensearchpy/client/ingest.py | 71 ++++++++++++++------------- opensearchpy/client/ingest.pyi | 32 +++++------- utils/generate-api.py | 17 ++++--- utils/templates/base | 2 +- 7 files changed, 109 insertions(+), 117 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4646f50..ca26dacb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Changed - Integrated generated `tasks client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) +- Integrated generated `ingest client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/opensearchpy/_async/client/ingest.py b/opensearchpy/_async/client/ingest.py index dfc8eb1c..cb5253eb 100644 --- a/opensearchpy/_async/client/ingest.py +++ b/opensearchpy/_async/client/ingest.py @@ -25,42 +25,52 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class IngestClient(NamespacedClient): - @query_params("master_timeout", "cluster_manager_timeout", "summary") + @query_params("cluster_manager_timeout", "master_timeout") async def get_pipeline(self, id=None, params=None, headers=None): """ Returns a pipeline. - :arg id: Comma separated list of pipeline ids. Wildcards - supported - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg summary: Return pipelines without their definitions - (default: false) + :arg id: Comma-separated list of pipeline ids. Wildcards + supported. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return await self.transport.perform_request( "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def put_pipeline(self, id, body, params=None, headers=None): """ Creates or updates a pipeline. - :arg id: Pipeline ID + :arg id: Pipeline ID. :arg body: The ingest definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ for param in (id, body): if param in SKIP_IN_PATH: @@ -74,18 +84,19 @@ async def put_pipeline(self, id, body, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_pipeline(self, id, params=None, headers=None): """ Deletes a pipeline. - :arg id: Pipeline ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg id: Pipeline ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -104,9 +115,9 @@ async def simulate(self, body, id=None, params=None, headers=None): :arg body: The simulate definition - :arg id: Pipeline ID + :arg id: Pipeline ID. :arg verbose: Verbose mode. Display data output for each - processor in executed pipeline + processor in executed pipeline. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -128,13 +139,3 @@ async def processor_grok(self, params=None, headers=None): return await self.transport.perform_request( "GET", "/_ingest/processor/grok", params=params, headers=headers ) - - @query_params() - async def geo_ip_stats(self, params=None, headers=None): - """ - Returns statistical information about geoip databases - - """ - return await self.transport.perform_request( - "GET", "/_ingest/geoip/stats", params=params, headers=headers - ) diff --git a/opensearchpy/_async/client/ingest.pyi b/opensearchpy/_async/client/ingest.pyi index 7e498b6c..40d3c7d9 100644 --- a/opensearchpy/_async/client/ingest.pyi +++ b/opensearchpy/_async/client/ingest.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -33,9 +42,8 @@ class IngestClient(NamespacedClient): self, *, id: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - summary: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -54,8 +62,8 @@ class IngestClient(NamespacedClient): id: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -74,8 +82,8 @@ class IngestClient(NamespacedClient): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -125,19 +133,3 @@ class IngestClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def geo_ip_stats( - self, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/opensearchpy/client/ingest.py b/opensearchpy/client/ingest.py index e40f1a3d..fb9d4f79 100644 --- a/opensearchpy/client/ingest.py +++ b/opensearchpy/client/ingest.py @@ -25,42 +25,52 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class IngestClient(NamespacedClient): - @query_params("master_timeout", "cluster_manager_timeout", "summary") + @query_params("cluster_manager_timeout", "master_timeout") def get_pipeline(self, id=None, params=None, headers=None): """ Returns a pipeline. - :arg id: Comma separated list of pipeline ids. Wildcards - supported - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg summary: Return pipelines without their definitions - (default: false) + :arg id: Comma-separated list of pipeline ids. Wildcards + supported. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return self.transport.perform_request( "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def put_pipeline(self, id, body, params=None, headers=None): """ Creates or updates a pipeline. - :arg id: Pipeline ID + :arg id: Pipeline ID. :arg body: The ingest definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ for param in (id, body): if param in SKIP_IN_PATH: @@ -74,18 +84,19 @@ def put_pipeline(self, id, body, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_pipeline(self, id, params=None, headers=None): """ Deletes a pipeline. - :arg id: Pipeline ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg id: Pipeline ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -104,9 +115,9 @@ def simulate(self, body, id=None, params=None, headers=None): :arg body: The simulate definition - :arg id: Pipeline ID + :arg id: Pipeline ID. :arg verbose: Verbose mode. Display data output for each - processor in executed pipeline + processor in executed pipeline. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -128,13 +139,3 @@ def processor_grok(self, params=None, headers=None): return self.transport.perform_request( "GET", "/_ingest/processor/grok", params=params, headers=headers ) - - @query_params() - def geo_ip_stats(self, params=None, headers=None): - """ - Returns statistical information about geoip databases - - """ - return self.transport.perform_request( - "GET", "/_ingest/geoip/stats", params=params, headers=headers - ) diff --git a/opensearchpy/client/ingest.pyi b/opensearchpy/client/ingest.pyi index bbc5aba2..251071e3 100644 --- a/opensearchpy/client/ingest.pyi +++ b/opensearchpy/client/ingest.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -33,9 +42,8 @@ class IngestClient(NamespacedClient): self, *, id: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - summary: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -54,8 +62,8 @@ class IngestClient(NamespacedClient): id: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -74,8 +82,8 @@ class IngestClient(NamespacedClient): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -125,19 +133,3 @@ class IngestClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def geo_ip_stats( - self, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/utils/generate-api.py b/utils/generate-api.py index f13a41b5..adab04bf 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -448,9 +448,11 @@ def read_modules(): for path in data["paths"]: for x in data["paths"][path]: - if "deprecated" not in data["paths"][path][x]: - data["paths"][path][x].update({"path": path, "method": x}) - list_of_dicts.append(data["paths"][path][x]) + if data["paths"][path][x]["x-operation-group"] == "nodes.hot_threads": + if "deprecated" in data["paths"][path][x]: + continue + data["paths"][path][x].update({"path": path, "method": x}) + list_of_dicts.append(data["paths"][path][x]) # Update parameters in each endpoint for p in list_of_dicts: @@ -484,12 +486,15 @@ def read_modules(): A.update({"type": "enum"}) A.update({"options": m["schema"]["enum"]}) - if "deprecated" in m: - A.update({"deprecated": m["deprecated"]}) + if "deprecated" in m["schema"]: + A.update({"deprecated": m["schema"]["deprecated"]}) + A.update( + {"deprecation_message": m["schema"]["x-deprecation-message"]} + ) params_new.update({m["name"]: A}) # Removing the deprecated "type" - if "type" in params_new: + if p["x-operation-group"] != "nodes.hot_threads" and "type" in params_new: params_new.pop("type") if bool(params_new): diff --git a/utils/templates/base b/utils/templates/base index 4a1249ed..971efbc9 100644 --- a/utils/templates/base +++ b/utils/templates/base @@ -21,7 +21,7 @@ {% for p, info in api.params %} {% filter wordwrap(72, wrapstring="\n ") %} - :arg {{ p }}: {{ info.description }}{% if info.options %} Valid choices: {{ info.options|join(", ") }}{% endif %}{% if info.default %} Default: {{ info.default }}{% endif %} + :arg {{ p }}{% if info.deprecated %} (Deprecated: {{ info['deprecation_message'][:-1] }}){% endif %}: {{ info.description }}{% if info.options %} Valid choices: {{ info.options|join(", ") }}{% endif %}{% if info.default %} Default: {{ info.default }}{% endif %} {% endfilter %} {% endfor %} From 9fe5f5ad625067178e9f8dc6e8f3a11cbb3b2cd2 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 3 Oct 2023 07:40:51 -0700 Subject: [PATCH 06/21] Integrated generated dangling_indices client APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility (#511) Signed-off-by: saimedhi Signed-off-by: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> --- CHANGELOG.md | 1 + .../_async/client/dangling_indices.py | 44 +++++++++++++------ .../_async/client/dangling_indices.pyi | 13 +++++- opensearchpy/client/dangling_indices.py | 44 +++++++++++++------ opensearchpy/client/dangling_indices.pyi | 13 +++++- 5 files changed, 83 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca26dacb..3a2fe62a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Changed - Integrated generated `tasks client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Integrated generated `ingest client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) +- Integrated generated `dangling_indices client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/opensearchpy/_async/client/dangling_indices.py b/opensearchpy/_async/client/dangling_indices.py index ff9e533f..cf382c52 100644 --- a/opensearchpy/_async/client/dangling_indices.py +++ b/opensearchpy/_async/client/dangling_indices.py @@ -25,24 +25,37 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class DanglingIndicesClient(NamespacedClient): @query_params( - "accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout" + "accept_data_loss", "cluster_manager_timeout", "master_timeout", "timeout" ) async def delete_dangling_index(self, index_uuid, params=None, headers=None): """ - Deletes the specified dangling index + Deletes the specified dangling index. - :arg index_uuid: The UUID of the dangling index + :arg index_uuid: The UUID of the dangling index. :arg accept_data_loss: Must be set to true in order to delete - the dangling index - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + the dangling index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index_uuid'.") @@ -55,19 +68,22 @@ async def delete_dangling_index(self, index_uuid, params=None, headers=None): ) @query_params( - "accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout" + "accept_data_loss", "cluster_manager_timeout", "master_timeout", "timeout" ) async def import_dangling_index(self, index_uuid, params=None, headers=None): """ - Imports the specified dangling index + Imports the specified dangling index. - :arg index_uuid: The UUID of the dangling index + :arg index_uuid: The UUID of the dangling index. :arg accept_data_loss: Must be set to true in order to import - the dangling index - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + the dangling index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index_uuid'.") diff --git a/opensearchpy/_async/client/dangling_indices.pyi b/opensearchpy/_async/client/dangling_indices.pyi index c9bb9ec1..17ab1ac8 100644 --- a/opensearchpy/_async/client/dangling_indices.pyi +++ b/opensearchpy/_async/client/dangling_indices.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -34,8 +43,8 @@ class DanglingIndicesClient(NamespacedClient): index_uuid: Any, *, accept_data_loss: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -55,8 +64,8 @@ class DanglingIndicesClient(NamespacedClient): index_uuid: Any, *, accept_data_loss: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., diff --git a/opensearchpy/client/dangling_indices.py b/opensearchpy/client/dangling_indices.py index 4d1b5a36..b04698ad 100644 --- a/opensearchpy/client/dangling_indices.py +++ b/opensearchpy/client/dangling_indices.py @@ -25,24 +25,37 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class DanglingIndicesClient(NamespacedClient): @query_params( - "accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout" + "accept_data_loss", "cluster_manager_timeout", "master_timeout", "timeout" ) def delete_dangling_index(self, index_uuid, params=None, headers=None): """ - Deletes the specified dangling index + Deletes the specified dangling index. - :arg index_uuid: The UUID of the dangling index + :arg index_uuid: The UUID of the dangling index. :arg accept_data_loss: Must be set to true in order to delete - the dangling index - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + the dangling index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index_uuid'.") @@ -55,19 +68,22 @@ def delete_dangling_index(self, index_uuid, params=None, headers=None): ) @query_params( - "accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout" + "accept_data_loss", "cluster_manager_timeout", "master_timeout", "timeout" ) def import_dangling_index(self, index_uuid, params=None, headers=None): """ - Imports the specified dangling index + Imports the specified dangling index. - :arg index_uuid: The UUID of the dangling index + :arg index_uuid: The UUID of the dangling index. :arg accept_data_loss: Must be set to true in order to import - the dangling index - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + the dangling index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index_uuid'.") diff --git a/opensearchpy/client/dangling_indices.pyi b/opensearchpy/client/dangling_indices.pyi index 56e4a72f..203805a1 100644 --- a/opensearchpy/client/dangling_indices.pyi +++ b/opensearchpy/client/dangling_indices.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -34,8 +43,8 @@ class DanglingIndicesClient(NamespacedClient): index_uuid: Any, *, accept_data_loss: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -55,8 +64,8 @@ class DanglingIndicesClient(NamespacedClient): index_uuid: Any, *, accept_data_loss: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., From 781744c6b142193528b101eb05a758932cba00de Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 3 Oct 2023 07:41:41 -0700 Subject: [PATCH 07/21] Bumps from >=1.21.1 to >=1.26.9 (#518) Signed-off-by: saimedhi --- CHANGELOG.md | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a2fe62a..4807f079 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Dependencies - Bumps `sphinx` from <7.1 to <7.3 - Bumps `urllib3` from >=1.21.1, <2 to >=1.21.1 ([#466](https://github.com/opensearch-project/opensearch-py/pull/466)) +- Bumps `urllib3` from >=1.21.1 to >=1.26.9 ([#518](https://github.com/opensearch-project/opensearch-py/pull/518)) ## [2.3.1] ### Added diff --git a/setup.py b/setup.py index 3bce64d1..c21e053f 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,7 @@ if package == module_dir or package.startswith(module_dir + ".") ] install_requires = [ - "urllib3>=1.21.1", + "urllib3>=1.26.9", "requests>=2.4.0, <3.0.0", "six", "python-dateutil", From 70db37a3c091c10befd34c80d94ab0f48a399a86 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 3 Oct 2023 07:43:05 -0700 Subject: [PATCH 08/21] Modified generator to generate 'options' and 'default value' for parameters in description (#519) Signed-off-by: saimedhi --- utils/generate-api.py | 7 +++++++ utils/templates/base | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/utils/generate-api.py b/utils/generate-api.py index adab04bf..cfe12af4 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -482,6 +482,10 @@ def read_modules(): for m in params: A = dict(type=m["schema"]["type"], description=m["description"]) + + if "default" in m["schema"]: + A.update({"default": m["schema"]["default"]}) + if "enum" in m["schema"]: A.update({"type": "enum"}) A.update({"options": m["schema"]["enum"]}) @@ -508,6 +512,9 @@ def read_modules(): if "description" in n: B.update({"description": n["description"]}) + if "x-enum-options" in n["schema"]: + B.update({"options": n["schema"]["x-enum-options"]}) + deprecated_new = {} if "deprecated" in n: B.update({"deprecated": n["deprecated"]}) diff --git a/utils/templates/base b/utils/templates/base index 971efbc9..9b58b6c2 100644 --- a/utils/templates/base +++ b/utils/templates/base @@ -21,7 +21,8 @@ {% for p, info in api.params %} {% filter wordwrap(72, wrapstring="\n ") %} - :arg {{ p }}{% if info.deprecated %} (Deprecated: {{ info['deprecation_message'][:-1] }}){% endif %}: {{ info.description }}{% if info.options %} Valid choices: {{ info.options|join(", ") }}{% endif %}{% if info.default %} Default: {{ info.default }}{% endif %} + :arg {{ p }}{% if info.deprecated %} (Deprecated: {{ info['deprecation_message'][:-1] }}){% endif %}: {{ info.description }}{% if info.options %} Valid choices: {{ info.options|join(", ") }}{% endif %} + {% if info.default is defined %}{% if info.default is not none %}{% if info.default is sameas(false) %} (default: false){% else %} (default: {{ info.default }}){% endif %}{% endif %}{% endif %} {% endfilter %} {% endfor %} From 1a47e6fa1f802c6bd9eb0185e23e506079e5be59 Mon Sep 17 00:00:00 2001 From: Bhavani Ravi Date: Tue, 3 Oct 2023 20:15:35 +0530 Subject: [PATCH 09/21] fixes: #512 Wrong return type hint in async_scan (#520) * fixes: #512 Wrong return type hint in async_scan Signed-off-by: Bhavani Ravi * add: changelog Signed-off-by: Bhavani Ravi --------- Signed-off-by: Bhavani Ravi --- CHANGELOG.md | 1 + opensearchpy/_async/helpers/actions.pyi | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4807f079..9ddb62a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,6 +125,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Removed patch versions in integration tests for OpenSearch 1.0.0 - 2.3.0 to reduce Github Action jobs ([#262](https://github.com/opensearch-project/opensearch-py/pull/262)) ### Fixed - Fixed DeprecationWarning emitted from urllib3 1.26.13+ ([#246](https://github.com/opensearch-project/opensearch-py/pull/246)) +- Fixed Wrong return type hint in `async_scan` ([520](https://github.com/opensearch-project/opensearch-py/pull/520)) ### Security [Unreleased]: https://github.com/opensearch-project/opensearch-py/compare/v2.3.1...HEAD diff --git a/opensearchpy/_async/helpers/actions.pyi b/opensearchpy/_async/helpers/actions.pyi index be000ae8..cd6b6974 100644 --- a/opensearchpy/_async/helpers/actions.pyi +++ b/opensearchpy/_async/helpers/actions.pyi @@ -100,7 +100,7 @@ def async_scan( clear_scroll: bool = ..., scroll_kwargs: Optional[Mapping[str, Any]] = ..., **kwargs: Any -) -> AsyncGenerator[int, None]: ... +) -> AsyncGenerator[dict[str, Any], None]: ... async def async_reindex( client: AsyncOpenSearch, source_index: Union[str, Collection[str]], From 80b96812a786b5c504df883023e8a2c885ba0015 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:00:49 -0700 Subject: [PATCH 10/21] Integrated generated 'nodes' client APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility (#514) Signed-off-by: saimedhi Signed-off-by: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + opensearchpy/_async/client/nodes.py | 118 ++++++++++++++------------- opensearchpy/_async/client/nodes.pyi | 10 ++- opensearchpy/client/nodes.py | 118 ++++++++++++++------------- opensearchpy/client/nodes.pyi | 10 ++- 5 files changed, 143 insertions(+), 114 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ddb62a0..f41f54c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Integrated generated `tasks client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Integrated generated `ingest client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) - Integrated generated `dangling_indices client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) +- Integrated generated `nodes client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/opensearchpy/_async/client/nodes.py b/opensearchpy/_async/client/nodes.py index d437fd17..e0e8b06b 100644 --- a/opensearchpy/_async/client/nodes.py +++ b/opensearchpy/_async/client/nodes.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import NamespacedClient, _make_path, query_params @@ -37,12 +47,12 @@ async def reload_secure_settings( Reloads secure settings. - :arg body: An object containing the password for the - opensearch keystore - :arg node_id: A comma-separated list of node IDs to span the + :arg body: An object containing the password for the opensearch + keystore + :arg node_id: Comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "POST", @@ -58,16 +68,16 @@ async def info(self, node_id=None, metric=None, params=None, headers=None): Returns information about nodes in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg metric: A comma-separated list of metrics you wish - returned. Leave empty to return all. Valid choices: settings, os, - process, jvm, thread_pool, transport, http, plugins, ingest - :arg flat_settings: Return settings in flat format (default: + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg metric: Comma-separated list of metrics you wish returned. + Leave empty to return all. Valid choices: settings, os, process, jvm, + thread_pool, transport, http, plugins, ingest + :arg flat_settings: Return settings in flat format. (default: false) - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers @@ -79,7 +89,6 @@ async def info(self, node_id=None, metric=None, params=None, headers=None): "fields", "groups", "include_segment_file_sizes", - "include_unloaded_segments", "level", "timeout", "types", @@ -91,37 +100,34 @@ async def stats( Returns statistical information about nodes in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, breaker, fs, http, indices, jvm, os, + metrics. Valid choices: _all, breaker, fs, http, indices, jvm, os, process, thread_pool, transport, discovery, indexing_pressure :arg index_metric: Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) - metric isn't specified. Valid choices: _all, completion, docs, - fielddata, query_cache, flush, get, indexing, merge, request_cache, - refresh, search, segments, store, warmer, suggest - :arg completion_fields: A comma-separated list of fields for - `fielddata` and `suggest` index metric (supports wildcards) - :arg fielddata_fields: A comma-separated list of fields for - `fielddata` index metric (supports wildcards) - :arg fields: A comma-separated list of fields for `fielddata` - and `completion` index metric (supports wildcards) - :arg groups: A comma-separated list of search groups for - `search` index metric + metric isn't specified. Valid choices: _all, store, indexing, get, + search, merge, flush, refresh, query_cache, fielddata, docs, warmer, + completion, segments, translog, suggest, request_cache, recovery + :arg completion_fields: Comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards). + :arg fielddata_fields: Comma-separated list of fields for + `fielddata` index metric (supports wildcards). + :arg fields: Comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards). + :arg groups: Comma-separated list of search groups for `search` + index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested) - :arg include_unloaded_segments: If set to true segment stats - will include stats for segments that are not currently loaded into - memory + applies if segment stats are requested). (default: false) :arg level: Return indices stats aggregated at index, node or - shard level Valid choices: indices, node, shards Default: node - :arg timeout: Explicit operation timeout - :arg types: A comma-separated list of document types for the - `indexing` index metric + shard level. Valid choices: indices, node, shards + :arg timeout: Operation timeout. + :arg types: Comma-separated list of document types for the + `indexing` index metric. """ return await self.transport.perform_request( "GET", @@ -138,21 +144,21 @@ async def hot_threads(self, node_id=None, params=None, headers=None): Returns information about hot threads on each node in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg doc_type: The type to sample (default: cpu) Valid choices: - cpu, wait, block + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg doc_type: The type to sample. Valid choices: cpu, wait, + block :arg ignore_idle_threads: Don't show threads that are in known- idle places, such as waiting on a socket select or pulling from an empty - task queue (default: true) - :arg interval: The interval for the second sampling of threads - :arg snapshots: Number of samples of thread stacktrace (default: - 10) + task queue. (default: True) + :arg interval: The interval for the second sampling of threads. + :arg snapshots: Number of samples of thread stacktrace. + (default: 10) :arg threads: Specify the number of threads to provide - information for (default: 3) - :arg timeout: Explicit operation timeout + information for. (default: 3) + :arg timeout: Operation timeout. """ # type is a reserved word so it cannot be used, use doc_type instead if "doc_type" in params: @@ -171,13 +177,13 @@ async def usage(self, node_id=None, metric=None, params=None, headers=None): Returns low-level information about REST actions usage on nodes. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, rest_actions - :arg timeout: Explicit operation timeout + metrics. Valid choices: _all, rest_actions + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "GET", diff --git a/opensearchpy/_async/client/nodes.pyi b/opensearchpy/_async/client/nodes.pyi index 5f108df4..b34a7ba9 100644 --- a/opensearchpy/_async/client/nodes.pyi +++ b/opensearchpy/_async/client/nodes.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -79,7 +88,6 @@ class NodesClient(NamespacedClient): fields: Optional[Any] = ..., groups: Optional[Any] = ..., include_segment_file_sizes: Optional[Any] = ..., - include_unloaded_segments: Optional[Any] = ..., level: Optional[Any] = ..., timeout: Optional[Any] = ..., types: Optional[Any] = ..., diff --git a/opensearchpy/client/nodes.py b/opensearchpy/client/nodes.py index 2773002b..9dc1e1ab 100644 --- a/opensearchpy/client/nodes.py +++ b/opensearchpy/client/nodes.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import NamespacedClient, _make_path, query_params @@ -37,12 +47,12 @@ def reload_secure_settings( Reloads secure settings. - :arg body: An object containing the password for the - opensearch keystore - :arg node_id: A comma-separated list of node IDs to span the + :arg body: An object containing the password for the opensearch + keystore + :arg node_id: Comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return self.transport.perform_request( "POST", @@ -58,16 +68,16 @@ def info(self, node_id=None, metric=None, params=None, headers=None): Returns information about nodes in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg metric: A comma-separated list of metrics you wish - returned. Leave empty to return all. Valid choices: settings, os, - process, jvm, thread_pool, transport, http, plugins, ingest - :arg flat_settings: Return settings in flat format (default: + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg metric: Comma-separated list of metrics you wish returned. + Leave empty to return all. Valid choices: settings, os, process, jvm, + thread_pool, transport, http, plugins, ingest + :arg flat_settings: Return settings in flat format. (default: false) - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return self.transport.perform_request( "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers @@ -79,7 +89,6 @@ def info(self, node_id=None, metric=None, params=None, headers=None): "fields", "groups", "include_segment_file_sizes", - "include_unloaded_segments", "level", "timeout", "types", @@ -91,37 +100,34 @@ def stats( Returns statistical information about nodes in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, breaker, fs, http, indices, jvm, os, + metrics. Valid choices: _all, breaker, fs, http, indices, jvm, os, process, thread_pool, transport, discovery, indexing_pressure :arg index_metric: Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) - metric isn't specified. Valid choices: _all, completion, docs, - fielddata, query_cache, flush, get, indexing, merge, request_cache, - refresh, search, segments, store, warmer, suggest - :arg completion_fields: A comma-separated list of fields for - `fielddata` and `suggest` index metric (supports wildcards) - :arg fielddata_fields: A comma-separated list of fields for - `fielddata` index metric (supports wildcards) - :arg fields: A comma-separated list of fields for `fielddata` - and `completion` index metric (supports wildcards) - :arg groups: A comma-separated list of search groups for - `search` index metric + metric isn't specified. Valid choices: _all, store, indexing, get, + search, merge, flush, refresh, query_cache, fielddata, docs, warmer, + completion, segments, translog, suggest, request_cache, recovery + :arg completion_fields: Comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards). + :arg fielddata_fields: Comma-separated list of fields for + `fielddata` index metric (supports wildcards). + :arg fields: Comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards). + :arg groups: Comma-separated list of search groups for `search` + index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested) - :arg include_unloaded_segments: If set to true segment stats - will include stats for segments that are not currently loaded into - memory + applies if segment stats are requested). (default: false) :arg level: Return indices stats aggregated at index, node or - shard level Valid choices: indices, node, shards Default: node - :arg timeout: Explicit operation timeout - :arg types: A comma-separated list of document types for the - `indexing` index metric + shard level. Valid choices: indices, node, shards + :arg timeout: Operation timeout. + :arg types: Comma-separated list of document types for the + `indexing` index metric. """ return self.transport.perform_request( "GET", @@ -138,21 +144,21 @@ def hot_threads(self, node_id=None, params=None, headers=None): Returns information about hot threads on each node in the cluster. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg doc_type: The type to sample (default: cpu) Valid choices: - cpu, wait, block + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg doc_type: The type to sample. Valid choices: cpu, wait, + block :arg ignore_idle_threads: Don't show threads that are in known- idle places, such as waiting on a socket select or pulling from an empty - task queue (default: true) - :arg interval: The interval for the second sampling of threads - :arg snapshots: Number of samples of thread stacktrace (default: - 10) + task queue. (default: True) + :arg interval: The interval for the second sampling of threads. + :arg snapshots: Number of samples of thread stacktrace. + (default: 10) :arg threads: Specify the number of threads to provide - information for (default: 3) - :arg timeout: Explicit operation timeout + information for. (default: 3) + :arg timeout: Operation timeout. """ # type is a reserved word so it cannot be used, use doc_type instead if "doc_type" in params: @@ -171,13 +177,13 @@ def usage(self, node_id=None, metric=None, params=None, headers=None): Returns low-level information about REST actions usage on nodes. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, rest_actions - :arg timeout: Explicit operation timeout + metrics. Valid choices: _all, rest_actions + :arg timeout: Operation timeout. """ return self.transport.perform_request( "GET", diff --git a/opensearchpy/client/nodes.pyi b/opensearchpy/client/nodes.pyi index d0f7beb4..67e5a05c 100644 --- a/opensearchpy/client/nodes.pyi +++ b/opensearchpy/client/nodes.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -79,7 +88,6 @@ class NodesClient(NamespacedClient): fields: Optional[Any] = ..., groups: Optional[Any] = ..., include_segment_file_sizes: Optional[Any] = ..., - include_unloaded_segments: Optional[Any] = ..., level: Optional[Any] = ..., timeout: Optional[Any] = ..., types: Optional[Any] = ..., From be26adb3696b8c01fc45d5e29caaa4b3e35d3eda Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Tue, 3 Oct 2023 16:01:41 -0700 Subject: [PATCH 11/21] updated changelog (#522) Signed-off-by: saimedhi --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f41f54c8..c32f5cb0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,10 +6,10 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added generating imports and headers to API generator ([#467](https://github.com/opensearch-project/opensearch-py/pull/467)) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Changed -- Integrated generated `tasks client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) -- Integrated generated `ingest client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) -- Integrated generated `dangling_indices client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) -- Integrated generated `nodes client` APIs into the existing module, ensuring alignment with the server and maintaining backward compatibility ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) +- Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) +- Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) +- Generate `dangling_indices` client from API specs ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) +- Generate `nodes` client from API specs ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed From 49c2f6e87d7dc8cea4436b8c8d3d34961319dfad Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Fri, 6 Oct 2023 13:37:56 -0700 Subject: [PATCH 12/21] Bump version to 2.3.2 (#524) Signed-off-by: saimedhi --- .github/workflows/integration.yml | 2 +- .github/workflows/unified-release.yml | 2 +- CHANGELOG.md | 15 ++++++++++++--- COMPATIBILITY.md | 11 ++++++----- noxfile.py | 2 +- opensearchpy/_version.py | 2 +- 6 files changed, 22 insertions(+), 12 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 89d9f46c..106e940a 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -9,7 +9,7 @@ jobs: strategy: fail-fast: false matrix: - opensearch_version: [ '1.0.1', '1.1.0', '1.2.4', '1.3.7', '2.0.1', '2.1.0', '2.2.1', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0' ] + opensearch_version: [ '1.0.1', '1.1.0', '1.2.4', '1.3.7', '2.0.1', '2.1.0', '2.2.1', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0', '2.10.0' ] secured: [ "true", "false" ] steps: diff --git a/.github/workflows/unified-release.yml b/.github/workflows/unified-release.yml index 1551cfe2..cddea14a 100644 --- a/.github/workflows/unified-release.yml +++ b/.github/workflows/unified-release.yml @@ -9,7 +9,7 @@ jobs: strategy: fail-fast: false matrix: - stack_version: ['2.3.1'] + stack_version: ['2.3.2'] steps: - name: Checkout diff --git a/CHANGELOG.md b/CHANGELOG.md index c32f5cb0..61b6e968 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,8 +17,16 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security ### Dependencies - Bumps `sphinx` from <7.1 to <7.3 -- Bumps `urllib3` from >=1.21.1, <2 to >=1.21.1 ([#466](https://github.com/opensearch-project/opensearch-py/pull/466)) -- Bumps `urllib3` from >=1.21.1 to >=1.26.9 ([#518](https://github.com/opensearch-project/opensearch-py/pull/518)) + +## [2.3.2] +### Added +### Changed +### Deprecated +### Removed +### Fixed +### Security +### Dependencies +- Bumps `urllib3` from >=1.21.1, <2 to >=1.26.9 ([#518](https://github.com/opensearch-project/opensearch-py/pull/518)) ## [2.3.1] ### Added @@ -129,10 +137,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed Wrong return type hint in `async_scan` ([520](https://github.com/opensearch-project/opensearch-py/pull/520)) ### Security -[Unreleased]: https://github.com/opensearch-project/opensearch-py/compare/v2.3.1...HEAD +[Unreleased]: https://github.com/opensearch-project/opensearch-py/compare/v2.3.2...HEAD [2.0.1]: https://github.com/opensearch-project/opensearch-py/compare/v2.0.0...v2.0.1 [2.1.0]: https://github.com/opensearch-project/opensearch-py/compare/v2.0.1...v2.1.0 [2.1.1]: https://github.com/opensearch-project/opensearch-py/compare/v2.1.0...v2.1.1 [2.2.0]: https://github.com/opensearch-project/opensearch-py/compare/v2.1.1...v2.2.0 [2.3.0]: https://github.com/opensearch-project/opensearch-py/compare/v2.2.0...v2.3.0 [2.3.1]: https://github.com/opensearch-project/opensearch-py/compare/v2.3.0...v2.3.1 +[2.3.2]: https://github.com/opensearch-project/opensearch-py/compare/v2.3.1...v2.3.2 diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md index a72261b3..0634f6cc 100644 --- a/COMPATIBILITY.md +++ b/COMPATIBILITY.md @@ -9,11 +9,12 @@ The below matrix shows the compatibility of the [`opensearch-py`](https://pypi.o | --- | --- | --- | | 1.0.0 | 1.0.0-1.2.4 | | | 1.1.0 | 1.3.0-1.3.7 | | -| 2.0.x | 1.0.0-2.8.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | -| 2.1.x | 1.0.0-2.8.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | -| 2.2.0 | 1.0.0-2.8.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | -| 2.3.0 | 1.0.0-2.8.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | -| 2.3.1 | 1.0.0-2.8.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.0.x | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.1.x | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.2.0 | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.3.0 | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.3.1 | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | +| 2.3.2 | 1.0.0-2.10.0 | client works against Opensearch Version 1.x as long as features removed in 2.0 are not used | ## Upgrading diff --git a/noxfile.py b/noxfile.py index 3504ff75..a5da2b60 100644 --- a/noxfile.py +++ b/noxfile.py @@ -36,7 +36,7 @@ ) -@nox.session(python=["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]) +@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]) def test(session): session.install(".") session.install("-r", "dev-requirements.txt") diff --git a/opensearchpy/_version.py b/opensearchpy/_version.py index 3985132a..82fac929 100644 --- a/opensearchpy/_version.py +++ b/opensearchpy/_version.py @@ -24,4 +24,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "2.3.1" +__versionstr__ = "2.3.2" From 2bfc4091779c11b79f0d9941d4c9a7d24a84f2cc Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Mon, 9 Oct 2023 13:45:18 -0400 Subject: [PATCH 13/21] Fix: typos. (#526) * Fix: typo. Signed-off-by: dblock * Fix: typo. Signed-off-by: dblock * Fixed its. Signed-off-by: dblock * Added Visual Code settings to .gitignore. Signed-off-by: dblock * Added loop type for async client. Signed-off-by: dblock --------- Signed-off-by: dblock --- .gitignore | 5 ++++- opensearchpy/_async/http_aiohttp.pyi | 3 ++- opensearchpy/_async/transport.py | 6 +++--- opensearchpy/connection/base.py | 4 ++-- opensearchpy/connection_pool.py | 14 +++++++------- opensearchpy/helpers/utils.py | 6 +++--- opensearchpy/transport.py | 6 +++--- .../test_server/test_helpers/test_data.py | 2 +- .../test_server/test_plugins/test_alerting.py | 6 +++--- .../test_async/test_server/test_rest_api_spec.py | 6 +++--- .../test_server/test_helpers/test_data.py | 2 +- .../test_server/test_plugins/test_alerting.py | 6 +++--- .../test_server/test_rest_api_spec.py | 2 +- 13 files changed, 36 insertions(+), 32 deletions(-) diff --git a/.gitignore b/.gitignore index 019de716..153eea24 100644 --- a/.gitignore +++ b/.gitignore @@ -148,6 +148,9 @@ test_opensearch/cover test_opensearch/local.py .ci/output -#Vi text editor +# vi text editor .*.swp *~ + +# Visual Studio Code +.vscode \ No newline at end of file diff --git a/opensearchpy/_async/http_aiohttp.pyi b/opensearchpy/_async/http_aiohttp.pyi index 4dea4317..223fdfff 100644 --- a/opensearchpy/_async/http_aiohttp.pyi +++ b/opensearchpy/_async/http_aiohttp.pyi @@ -24,6 +24,7 @@ # specific language governing permissions and limitations # under the License. +from asyncio import AbstractEventLoop from typing import Any, Collection, Mapping, Optional, Tuple, Union from ..connection import Connection @@ -65,7 +66,7 @@ class AIOHttpConnection(AsyncConnection): ssl_context: Optional[Any] = ..., http_compress: Optional[bool] = ..., opaque_id: Optional[str] = ..., - loop: Any = ..., + loop: Optional[AbstractEventLoop] = ..., trust_env: bool = ..., **kwargs: Any ) -> None: ... diff --git a/opensearchpy/_async/transport.py b/opensearchpy/_async/transport.py index e93344bc..3db4516c 100644 --- a/opensearchpy/_async/transport.py +++ b/opensearchpy/_async/transport.py @@ -343,14 +343,14 @@ def get_connection(self): async def perform_request(self, method, url, headers=None, params=None, body=None): """ Perform the actual request. Retrieve a connection from the connection - pool, pass all the information to it's perform_request method and + pool, pass all the information to its perform_request method and return the data. If an exception was raised, mark the connection as failed and retry (up to `max_retries` times). If the operation was successful and the connection used was previously - marked as dead, mark it as live, resetting it's failure count. + marked as dead, mark it as live, resetting its failure count. :arg method: HTTP method to use :arg url: absolute url (without host) to target @@ -412,7 +412,7 @@ async def perform_request(self, method, url, headers=None, params=None, body=Non raise e else: - # connection didn't fail, confirm it's live status + # connection didn't fail, confirm its live status self.connection_pool.mark_live(connection) if method == "HEAD": diff --git a/opensearchpy/connection/base.py b/opensearchpy/connection/base.py index 435996b9..05edca73 100644 --- a/opensearchpy/connection/base.py +++ b/opensearchpy/connection/base.py @@ -56,7 +56,7 @@ class Connection(object): """ Class responsible for maintaining a connection to an OpenSearch node. It - holds persistent connection pool to it and it's main interface + holds persistent connection pool to it and its main interface (`perform_request`) is thread-safe. Also responsible for logging. @@ -158,7 +158,7 @@ def _raise_warnings(self, warning_headers): # Format is: '(number) OpenSearch-(version)-(instance) "(message)"' warning_messages = [] for header in warning_headers: - # Because 'Requests' does it's own folding of multiple HTTP headers + # Because 'Requests' does its own folding of multiple HTTP headers # into one header delimited by commas (totally standard compliant, just # annoying for cases like this) we need to expect there may be # more than one message per 'Warning' header. diff --git a/opensearchpy/connection_pool.py b/opensearchpy/connection_pool.py index 0416fbec..61873748 100644 --- a/opensearchpy/connection_pool.py +++ b/opensearchpy/connection_pool.py @@ -55,8 +55,8 @@ class ConnectionSelector(object): process it will be the dictionary returned by the `host_info_callback`. Example of where this would be useful is a zone-aware selector that would - only select connections from it's own zones and only fall back to other - connections where there would be none in it's zones. + only select connections from its own zones and only fall back to other + connections where there would be none in its zones. """ def __init__(self, opts): @@ -112,7 +112,7 @@ class ConnectionPool(object): future reference. Upon each request the `Transport` will ask for a `Connection` via the - `get_connection` method. If the connection fails (it's `perform_request` + `get_connection` method. If the connection fails (its `perform_request` raises a `ConnectionError`) it will be marked as dead (via `mark_dead`) and put on a timeout (if it fails N times in a row the timeout is exponentially longer - the formula is `default_timeout * 2 ** (fail_count - 1)`). When @@ -132,7 +132,7 @@ def __init__( ): """ :arg connections: list of tuples containing the - :class:`~opensearchpy.Connection` instance and it's options + :class:`~opensearchpy.Connection` instance and its options :arg dead_timeout: number of seconds a connection should be retired for after a failure, increases on consecutive failures :arg timeout_cutoff: number of consecutive failures after which the @@ -211,7 +211,7 @@ def mark_live(self, connection): def resurrect(self, force=False): """ Attempt to resurrect a connection from the dead pool. It will try to - locate one (not all) eligible (it's timeout is over) connection to + locate one (not all) eligible (its timeout is over) connection to return to the live pool. Any resurrected connection is also returned. :arg force: resurrect a connection even if there is none eligible (used @@ -245,7 +245,7 @@ def resurrect(self, force=False): self.dead.put((timeout, connection)) return - # either we were forced or the connection is elligible to be retried + # either we were forced or the connection is eligible to be retried self.connections.append(connection) logger.info("Resurrecting connection %r (force=%s).", connection, force) return connection @@ -259,7 +259,7 @@ def get_connection(self): no connections are available and passes the list of live connections to the selector instance to choose from. - Returns a connection instance and it's current fail count. + Returns a connection instance and its current fail count. """ self.resurrect() connections = self.connections[:] diff --git a/opensearchpy/helpers/utils.py b/opensearchpy/helpers/utils.py index 3ebea18e..04f2ee37 100644 --- a/opensearchpy/helpers/utils.py +++ b/opensearchpy/helpers/utils.py @@ -222,7 +222,7 @@ class DslMeta(type): It then uses the information from that registry (as well as `name` and `shortcut` attributes from the base class) to construct any subclass based - on it's name. + on its name. For typical use see `QueryMeta` and `Query` in `opensearchpy.query`. """ @@ -235,7 +235,7 @@ def __init__(cls, name, bases, attrs): if not hasattr(cls, "_type_shortcut"): return if cls.name is None: - # abstract base class, register it's shortcut + # abstract base class, register its shortcut cls._types[cls._type_name] = cls._type_shortcut # and create a registry for subclasses if not hasattr(cls, "_classes"): @@ -264,7 +264,7 @@ class DslBase(object): - to_dict method to serialize into dict (to be sent via opensearch-py) - basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO: move into a class specific for Query/Filter - - respects the definition of the class and (de)serializes it's + - respects the definition of the class and (de)serializes its attributes based on the `_param_defs` definition (for example turning all values in the `must` attribute into Query objects) """ diff --git a/opensearchpy/transport.py b/opensearchpy/transport.py index c1d69d2c..32c9baf4 100644 --- a/opensearchpy/transport.py +++ b/opensearchpy/transport.py @@ -341,14 +341,14 @@ def mark_dead(self, connection): def perform_request(self, method, url, headers=None, params=None, body=None): """ Perform the actual request. Retrieve a connection from the connection - pool, pass all the information to it's perform_request method and + pool, pass all the information to its perform_request method and return the data. If an exception was raised, mark the connection as failed and retry (up to `max_retries` times). If the operation was successful and the connection used was previously - marked as dead, mark it as live, resetting it's failure count. + marked as dead, mark it as live, resetting its failure count. :arg method: HTTP method to use :arg url: absolute url (without host) to target @@ -409,7 +409,7 @@ def perform_request(self, method, url, headers=None, params=None, body=None): raise e else: - # connection didn't fail, confirm it's live status + # connection didn't fail, confirm its live status self.connection_pool.mark_live(connection) if method == "HEAD": diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_data.py b/test_opensearchpy/test_async/test_server/test_helpers/test_data.py index d513bcff..1194304e 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_data.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_data.py @@ -947,7 +947,7 @@ async def create_git_index(client, index): ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 53, "lines": 53, "files": 2}, - "description": "From_dict, Q(dict) and bool query parses it's subqueries", + "description": "From_dict, Q(dict) and bool query parses its subqueries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["d407f99d1959b7b862a541c066d9fd737ce913f3"], "committed_date": "2014-03-06T20:24:30", diff --git a/test_opensearchpy/test_async/test_server/test_plugins/test_alerting.py b/test_opensearchpy/test_async/test_server/test_plugins/test_alerting.py index f3f7fe32..2ef87bd3 100644 --- a/test_opensearchpy/test_async/test_server/test_plugins/test_alerting.py +++ b/test_opensearchpy/test_async/test_server/test_plugins/test_alerting.py @@ -127,7 +127,7 @@ async def test_search_monitor(self): # Create a dummy monitor await self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query @@ -145,7 +145,7 @@ async def test_get_monitor(self): # Create a dummy monitor await self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query @@ -169,7 +169,7 @@ async def test_run_monitor(self): # Create a dummy monitor await self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query diff --git a/test_opensearchpy/test_async/test_server/test_rest_api_spec.py b/test_opensearchpy/test_async/test_server/test_rest_api_spec.py index 27b20113..0773aab0 100644 --- a/test_opensearchpy/test_async/test_server/test_rest_api_spec.py +++ b/test_opensearchpy/test_async/test_server/test_rest_api_spec.py @@ -26,7 +26,7 @@ """ -Dynamically generated set of TestCases based on set of yaml files decribing +Dynamically generated set of TestCases based on set of yaml files describing some integration tests. These files are shared among all official OpenSearch clients. """ @@ -106,7 +106,7 @@ async def run(self): pass async def run_code(self, test): - """Execute an instruction based on it's type.""" + """Execute an instruction based on its type.""" for action in test: assert len(action) == 1 action_type, action = list(action.items())[0] @@ -126,7 +126,7 @@ async def run_do(self, action): assert len(action) == 1 # Remove the x_pack_rest_user authentication - # if it's given via headers. We're already authenticated + # if its given via headers. We're already authenticated # via the 'elastic' user. if ( headers diff --git a/test_opensearchpy/test_server/test_helpers/test_data.py b/test_opensearchpy/test_server/test_helpers/test_data.py index 20b63e39..059a983a 100644 --- a/test_opensearchpy/test_server/test_helpers/test_data.py +++ b/test_opensearchpy/test_server/test_helpers/test_data.py @@ -964,7 +964,7 @@ def create_git_index(client, index): ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 53, "lines": 53, "files": 2}, - "description": "From_dict, Q(dict) and bool query parses it's subqueries", + "description": "From_dict, Q(dict) and bool query parses its subqueries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["d407f99d1959b7b862a541c066d9fd737ce913f3"], "committed_date": "2014-03-06T20:24:30", diff --git a/test_opensearchpy/test_server/test_plugins/test_alerting.py b/test_opensearchpy/test_server/test_plugins/test_alerting.py index 406bd71f..3a503e43 100644 --- a/test_opensearchpy/test_server/test_plugins/test_alerting.py +++ b/test_opensearchpy/test_server/test_plugins/test_alerting.py @@ -123,7 +123,7 @@ def test_search_monitor(self): # Create a dummy monitor self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query @@ -141,7 +141,7 @@ def test_get_monitor(self): # Create a dummy monitor self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query @@ -165,7 +165,7 @@ def test_run_monitor(self): # Create a dummy monitor self.test_create_monitor() - # Create a monitor search query by it's name + # Create a monitor search query by its name query = {"query": {"match": {"monitor.name": "test-monitor"}}} # Perform the search with the above query diff --git a/test_opensearchpy/test_server/test_rest_api_spec.py b/test_opensearchpy/test_server/test_rest_api_spec.py index 7d1cbf51..b5d890ab 100644 --- a/test_opensearchpy/test_server/test_rest_api_spec.py +++ b/test_opensearchpy/test_server/test_rest_api_spec.py @@ -205,7 +205,7 @@ def run(self): pass def run_code(self, test): - """Execute an instruction based on it's type.""" + """Execute an instruction based on its type.""" for action in test: assert len(action) == 1 action_type, action = list(action.items())[0] From e4c59e47320c58c151599f2da796d84a7e4084f2 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Mon, 9 Oct 2023 13:47:26 -0700 Subject: [PATCH 14/21] Modified generator to generate api deprecation warnings (#527) Signed-off-by: saimedhi --- utils/generate-api.py | 4 ++++ utils/templates/base | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/utils/generate-api.py b/utils/generate-api.py index cfe12af4..2cb93eb5 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -263,6 +263,7 @@ def __init__(self, namespace, name, definition, is_pyi=False): self.description = "" self.doc_url = "" self.stability = self._def.get("stability", "stable") + self.deprecation_message = self._def.get("deprecation_message") if isinstance(definition["documentation"], str): self.doc_url = definition["documentation"] @@ -560,6 +561,9 @@ def read_modules(): documentation = {"description": z["description"]} api.update({"documentation": documentation}) + if "deprecation_message" not in api and "x-deprecation-message" in z: + api.update({"deprecation_message": z["x-deprecation-message"]}) + if "params" not in api and "params" in z: api.update({"params": z["params"]}) diff --git a/utils/templates/base b/utils/templates/base index 9b58b6c2..47bb5956 100644 --- a/utils/templates/base +++ b/utils/templates/base @@ -28,6 +28,10 @@ {% endfor %} {% endif %} """ + {% if api.deprecation_message %} + from warnings import warn + warn("Deprecated: {{ api.deprecation_message }}") + {% endif %} {% include "substitutions" %} {% include "required" %} {% if api.body.serialize == "bulk" %} From 388fb9d9839f1ecbb7db6bc1a9e81c000d04b693 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:55:51 -0700 Subject: [PATCH 15/21] Generate cat client from API specs (#529) Signed-off-by: saimedhi --- CHANGELOG.md | 1 + opensearchpy/_async/client/cat.py | 655 ++++++++++++++++------------- opensearchpy/_async/client/cat.pyi | 85 +++- opensearchpy/client/cat.py | 653 +++++++++++++++------------- opensearchpy/client/cat.pyi | 85 +++- 5 files changed, 861 insertions(+), 618 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61b6e968..06675018 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) - Generate `dangling_indices` client from API specs ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) - Generate `nodes` client from API specs ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) +- Generate `cat` client from API specs ([#529](https://github.com/opensearch-project/opensearch-py/pull/529)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/opensearchpy/_async/client/cat.py b/opensearchpy/_async/client/cat.py index f7268308..a4dd9786 100644 --- a/opensearchpy/_async/client/cat.py +++ b/opensearchpy/_async/client/cat.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import NamespacedClient, _make_path, query_params @@ -36,19 +46,19 @@ async def aliases(self, name=None, params=None, headers=None): filter and routing infos. - :arg name: A comma-separated list of alias names to return + :arg name: Comma-separated list of alias names. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers @@ -56,12 +66,12 @@ async def aliases(self, name=None, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -71,25 +81,24 @@ async def allocation(self, node_id=None, params=None, headers=None): much disk space they are using. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information - :arg bytes: The unit in which to display byte values Valid + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", @@ -105,15 +114,15 @@ async def count(self, index=None, params=None, headers=None): individual indices. - :arg index: A comma-separated list of index names to limit the - returned information - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg index: Comma-separated list of indices to limit the + returned information. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "count", index), params=params, headers=headers @@ -125,16 +134,16 @@ async def health(self, params=None, headers=None): Returns a concise representation of the cluster health. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg ts: Set to false to disable timestamping Default: True - :arg v: Verbose mode. Display column headers + :arg ts: Set to false to disable timestamping. (default: True) + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/health", params=params, headers=headers @@ -146,9 +155,9 @@ async def help(self, params=None, headers=None): Returns help for the Cat APIs. - :arg help: Return help information + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by + to sort by. """ return await self.transport.perform_request( "GET", "/_cat", params=params, headers=headers @@ -156,6 +165,7 @@ async def help(self, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "expand_wildcards", "format", "h", @@ -164,7 +174,6 @@ async def help(self, params=None, headers=None): "include_unloaded_segments", "local", "master_timeout", - "cluster_manager_timeout", "pri", "s", "time", @@ -176,93 +185,110 @@ async def indices(self, index=None, params=None, headers=None): counts, disk size, ... - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg health: A health status ("green", "yellow", or "red" to - filter only indices matching the specified health status Valid choices: - green, yellow, red - :arg help: Return help information + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg health: Health status ('green', 'yellow', or 'red') to + filter only indices matching the specified health status. Valid + choices: green, yellow, red + :arg help: Return help information. (default: false) :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory + memory. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg pri: Set to true to return stats only for primary shards + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg pri: Set to true to return stats only for primary shards. + (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "indices", index), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) async def master(self, params=None, headers=None): """ - Returns information about the master node. + Returns information about the cluster-manager node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ from warnings import warn - warn("Deprecated: use `cluster_manager` instead") + warn( + "Deprecated: To promote inclusive language, please use '/_cat/cluster_manager' instead." + ) return await self.transport.perform_request( "GET", "/_cat/master", params=params, headers=headers ) - @query_params("format", "h", "help", "local", "cluster_manager", "s", "v") + @query_params( + "cluster_manager_timeout", + "format", + "h", + "help", + "local", + "master_timeout", + "s", + "v", + ) async def cluster_manager(self, params=None, headers=None): """ - Returns information about the cluster_manager node. + Returns information about the cluster-manager node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/cluster_manager", params=params, headers=headers @@ -270,14 +296,13 @@ async def cluster_manager(self, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "full_id", "h", "help", - "include_unloaded_segments", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -287,28 +312,27 @@ async def nodes(self, params=None, headers=None): Returns basic statistics about performance of cluster nodes. - :arg bytes: The unit in which to display byte values Valid + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. :arg full_id: Return the full node ID instead of the shortened - version (default: false) - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg include_unloaded_segments: If set to true segment stats - will include stats for segments that are not currently loaded into - memory - :arg local: Calculate the selected nodes using the local cluster - state rather than the state from master node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + version. (default: false) + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg local (Deprecated: This parameter does not cause this API + to act locally): Return local information, do not retrieve the state + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/nodes", params=params, headers=headers @@ -323,22 +347,22 @@ async def recovery(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index - names to limit the returned information + names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - shard recoveries - :arg bytes: The unit in which to display byte values Valid + shard recoveries. (default: false) + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb :arg detailed: If `true`, the response includes detailed - information about shard recoveries - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + information about shard recoveries. (default: false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "recovery", index), params=params, headers=headers @@ -346,12 +370,12 @@ async def recovery(self, index=None, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -361,59 +385,74 @@ async def shards(self, index=None, params=None, headers=None): Provides a detailed view of shard allocation on nodes. - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "shards", index), params=params, headers=headers ) - @query_params("bytes", "format", "h", "help", "s", "v") + @query_params( + "bytes", + "cluster_manager_timeout", + "format", + "h", + "help", + "master_timeout", + "s", + "v", + ) async def segments(self, index=None, params=None, headers=None): """ Provides low-level information about the segments in the shards of an index. - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "segments", index), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -423,33 +462,34 @@ async def pending_tasks(self, params=None, headers=None): Returns a concise representation of the cluster pending tasks. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/pending_tasks", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "size", "v", @@ -460,23 +500,23 @@ async def thread_pool(self, thread_pool_patterns=None, params=None, headers=None queue and rejected statistics are returned for all thread pools. - :arg thread_pool_patterns: A comma-separated list of regular- - expressions to filter the thread pools in the output - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg thread_pool_patterns: Comma-separated list of regular- + expressions to filter the thread pools in the output. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg size: The multiplier in which to display values Valid - choices: , k, m, g, t, p - :arg v: Verbose mode. Display column headers + to sort by. + :arg size: The multiplier in which to display values. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", @@ -492,17 +532,17 @@ async def fielddata(self, fields=None, params=None, headers=None): node in the cluster. - :arg fields: A comma-separated list of fields to return in the - output - :arg bytes: The unit in which to display byte values Valid + :arg fields: Comma-separated list of fields to return in the + output. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", @@ -512,13 +552,12 @@ async def fielddata(self, fields=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "format", "h", "help", - "include_bootstrap", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -527,33 +566,32 @@ async def plugins(self, params=None, headers=None): Returns information about installed plugins across nodes node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg include_bootstrap: Include bootstrap plugins in the - response + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/plugins", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -562,31 +600,32 @@ async def nodeattrs(self, params=None, headers=None): Returns information about custom node attributes. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/nodeattrs", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -595,31 +634,32 @@ async def repositories(self, params=None, headers=None): Returns information about snapshot repositories registered in the cluster. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from master node - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/repositories", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -629,23 +669,23 @@ async def snapshots(self, repository=None, params=None, headers=None): Returns all snapshots in a specific repository. - :arg repository: Name of repository from which to fetch the - snapshot information - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg ignore_unavailable: Set to true to ignore unavailable - snapshots - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg repository: Comma-separated list of repository names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", @@ -672,35 +712,37 @@ async def tasks(self, params=None, headers=None): the cluster. - :arg actions: A comma-separated list of actions that should be + :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information (default: false) - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg nodes: A comma-separated list of node IDs or names to limit + :arg detailed: Return detailed task information. (default: + false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", "/_cat/tasks", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -709,58 +751,91 @@ async def templates(self, name=None, params=None, headers=None): Returns information about existing templates. - :arg name: A pattern that returned template names must match - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return await self.transport.perform_request( "GET", _make_path("_cat", "templates", name), params=params, headers=headers ) + @query_params() + async def all_pit_segments(self, params=None, headers=None): + """ + Lists all active point-in-time segments. + + """ + return await self.transport.perform_request( + "GET", "/_cat/pit_segments/_all", params=params, headers=headers + ) + + @query_params() + async def pit_segments(self, body=None, params=None, headers=None): + """ + List segments for one or several PITs. + + + :arg body: + """ + return await self.transport.perform_request( + "GET", "/_cat/pit_segments", params=params, headers=headers, body=body + ) + @query_params( - "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v" + "active_only", + "bytes", + "completed_only", + "detailed", + "format", + "h", + "help", + "s", + "shards", + "time", + "v", ) - async def transforms(self, transform_id=None, params=None, headers=None): + async def segment_replication(self, index=None, params=None, headers=None): """ - Gets configuration and usage information about transforms. + Returns information about both on-going and latest completed Segment + Replication events. - :arg transform_id: The id of the transform for which to get - stats. '_all' or '*' implies all transforms - :arg allow_no_match: Whether to ignore if a wildcard expression - matches no transforms. (This includes `_all` string or when no - transforms have been specified) - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg from_: skips a number of transform configs, defaults to 0 - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg index: Comma-separated list or wildcard expression of index + names to limit the returned information. + :arg active_only: If `true`, the response only includes ongoing + segment replication events. (default: false) + :arg bytes: The unit in which to display byte values. Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg completed_only: If `true`, the response only includes + latest completed segment replication events. (default: false) + :arg detailed: If `true`, the response includes detailed + information about segment replications. (default: false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg size: specifies a max number of transforms to get, defaults - to 100 - :arg time: The unit in which to display time values Valid + to sort by. + :arg shards: Comma-separated list of shards to display. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ - # from is a reserved word so it cannot be used, use from_ instead - if "from_" in params: - params["from"] = params.pop("from_") - return await self.transport.perform_request( "GET", - _make_path("_cat", "transforms", transform_id), + _make_path("_cat", "segment_replication", index), params=params, headers=headers, ) diff --git a/opensearchpy/_async/client/cat.pyi b/opensearchpy/_async/client/cat.pyi index c53d43bc..435403e9 100644 --- a/opensearchpy/_async/client/cat.pyi +++ b/opensearchpy/_async/client/cat.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -57,12 +66,12 @@ class CatClient(NamespacedClient): *, node_id: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -143,6 +152,7 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., @@ -151,7 +161,6 @@ class CatClient(NamespacedClient): include_unloaded_segments: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pri: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., @@ -171,12 +180,12 @@ class CatClient(NamespacedClient): async def master( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -194,11 +203,12 @@ class CatClient(NamespacedClient): async def cluster_manager( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -217,14 +227,13 @@ class CatClient(NamespacedClient): self, *, bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., full_id: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., - include_unloaded_segments: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -270,12 +279,12 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -296,9 +305,11 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -316,12 +327,12 @@ class CatClient(NamespacedClient): async def pending_tasks( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -341,12 +352,12 @@ class CatClient(NamespacedClient): self, *, thread_pool_patterns: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., size: Optional[Any] = ..., v: Optional[Any] = ..., @@ -387,13 +398,12 @@ class CatClient(NamespacedClient): async def plugins( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., - include_bootstrap: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -411,12 +421,12 @@ class CatClient(NamespacedClient): async def nodeattrs( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -434,12 +444,12 @@ class CatClient(NamespacedClient): async def repositories( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -458,12 +468,12 @@ class CatClient(NamespacedClient): self, *, repository: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -508,12 +518,12 @@ class CatClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -528,17 +538,52 @@ class CatClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def transforms( + async def all_pit_segments( self, *, - transform_id: Optional[Any] = ..., - allow_no_match: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def pit_segments( + self, + *, + body: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def segment_replication( + self, + *, + index: Optional[Any] = ..., + active_only: Optional[Any] = ..., + bytes: Optional[Any] = ..., + completed_only: Optional[Any] = ..., + detailed: Optional[Any] = ..., format: Optional[Any] = ..., - from_: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., s: Optional[Any] = ..., - size: Optional[Any] = ..., + shards: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., diff --git a/opensearchpy/client/cat.py b/opensearchpy/client/cat.py index cc1106d5..8dac68cd 100644 --- a/opensearchpy/client/cat.py +++ b/opensearchpy/client/cat.py @@ -25,6 +25,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import NamespacedClient, _make_path, query_params @@ -36,19 +46,19 @@ def aliases(self, name=None, params=None, headers=None): filter and routing infos. - :arg name: A comma-separated list of alias names to return + :arg name: Comma-separated list of alias names. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers @@ -56,12 +66,12 @@ def aliases(self, name=None, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -71,23 +81,24 @@ def allocation(self, node_id=None, params=None, headers=None): much disk space they are using. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information - :arg bytes: The unit in which to display byte values Valid + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", @@ -103,15 +114,15 @@ def count(self, index=None, params=None, headers=None): individual indices. - :arg index: A comma-separated list of index names to limit the - returned information - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg index: Comma-separated list of indices to limit the + returned information. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "count", index), params=params, headers=headers @@ -123,16 +134,16 @@ def health(self, params=None, headers=None): Returns a concise representation of the cluster health. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg ts: Set to false to disable timestamping Default: True - :arg v: Verbose mode. Display column headers + :arg ts: Set to false to disable timestamping. (default: True) + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/health", params=params, headers=headers @@ -144,9 +155,9 @@ def help(self, params=None, headers=None): Returns help for the Cat APIs. - :arg help: Return help information + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by + to sort by. """ return self.transport.perform_request( "GET", "/_cat", params=params, headers=headers @@ -154,6 +165,7 @@ def help(self, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "expand_wildcards", "format", "h", @@ -162,7 +174,6 @@ def help(self, params=None, headers=None): "include_unloaded_segments", "local", "master_timeout", - "cluster_manager_timeout", "pri", "s", "time", @@ -174,93 +185,110 @@ def indices(self, index=None, params=None, headers=None): counts, disk size, ... - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg health: A health status ("green", "yellow", or "red" to - filter only indices matching the specified health status Valid choices: - green, yellow, red - :arg help: Return help information + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg health: Health status ('green', 'yellow', or 'red') to + filter only indices matching the specified health status. Valid + choices: green, yellow, red + :arg help: Return help information. (default: false) :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory + memory. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg pri: Set to true to return stats only for primary shards + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg pri: Set to true to return stats only for primary shards. + (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "indices", index), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) def master(self, params=None, headers=None): """ - Returns information about the master node. + Returns information about the cluster-manager node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ from warnings import warn - warn("Deprecated: use `cluster_manager` instead") + warn( + "Deprecated: To promote inclusive language, please use '/_cat/cluster_manager' instead." + ) return self.transport.perform_request( "GET", "/_cat/master", params=params, headers=headers ) - @query_params("format", "h", "help", "local", "cluster_manager_timeout", "s", "v") + @query_params( + "cluster_manager_timeout", + "format", + "h", + "help", + "local", + "master_timeout", + "s", + "v", + ) def cluster_manager(self, params=None, headers=None): """ - Returns information about the cluster_manager node. + Returns information about the cluster-manager node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/cluster_manager", params=params, headers=headers @@ -268,14 +296,13 @@ def cluster_manager(self, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "full_id", "h", "help", - "include_unloaded_segments", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -285,28 +312,27 @@ def nodes(self, params=None, headers=None): Returns basic statistics about performance of cluster nodes. - :arg bytes: The unit in which to display byte values Valid + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. :arg full_id: Return the full node ID instead of the shortened - version (default: false) - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg include_unloaded_segments: If set to true segment stats - will include stats for segments that are not currently loaded into - memory - :arg local: Calculate the selected nodes using the local cluster - state rather than the state from master node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + version. (default: false) + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg local (Deprecated: This parameter does not cause this API + to act locally): Return local information, do not retrieve the state + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/nodes", params=params, headers=headers @@ -321,22 +347,22 @@ def recovery(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index - names to limit the returned information + names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - shard recoveries - :arg bytes: The unit in which to display byte values Valid + shard recoveries. (default: false) + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb :arg detailed: If `true`, the response includes detailed - information about shard recoveries - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + information about shard recoveries. (default: false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "recovery", index), params=params, headers=headers @@ -344,12 +370,12 @@ def recovery(self, index=None, params=None, headers=None): @query_params( "bytes", + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -359,59 +385,74 @@ def shards(self, index=None, params=None, headers=None): Provides a detailed view of shard allocation on nodes. - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "shards", index), params=params, headers=headers ) - @query_params("bytes", "format", "h", "help", "s", "v") + @query_params( + "bytes", + "cluster_manager_timeout", + "format", + "h", + "help", + "master_timeout", + "s", + "v", + ) def segments(self, index=None, params=None, headers=None): """ Provides low-level information about the segments in the shards of an index. - :arg index: A comma-separated list of index names to limit the - returned information - :arg bytes: The unit in which to display byte values Valid + :arg index: Comma-separated list of indices to limit the + returned information. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "segments", index), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -421,33 +462,34 @@ def pending_tasks(self, params=None, headers=None): Returns a concise representation of the cluster pending tasks. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/pending_tasks", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "size", "v", @@ -458,23 +500,23 @@ def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): queue and rejected statistics are returned for all thread pools. - :arg thread_pool_patterns: A comma-separated list of regular- - expressions to filter the thread pools in the output - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg thread_pool_patterns: Comma-separated list of regular- + expressions to filter the thread pools in the output. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg size: The multiplier in which to display values Valid - choices: , k, m, g, t, p - :arg v: Verbose mode. Display column headers + to sort by. + :arg size: The multiplier in which to display values. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", @@ -490,17 +532,17 @@ def fielddata(self, fields=None, params=None, headers=None): node in the cluster. - :arg fields: A comma-separated list of fields to return in the - output - :arg bytes: The unit in which to display byte values Valid + :arg fields: Comma-separated list of fields to return in the + output. + :arg bytes: The unit in which to display byte values. Valid choices: b, k, kb, m, mb, g, gb, t, tb, p, pb - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", @@ -510,13 +552,12 @@ def fielddata(self, fields=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "format", "h", "help", - "include_bootstrap", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -525,33 +566,32 @@ def plugins(self, params=None, headers=None): Returns information about installed plugins across nodes node. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg include_bootstrap: Include bootstrap plugins in the - response + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/plugins", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -560,31 +600,32 @@ def nodeattrs(self, params=None, headers=None): Returns information about custom node attributes. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/nodeattrs", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -593,31 +634,32 @@ def repositories(self, params=None, headers=None): Returns information about snapshot repositories registered in the cluster. - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from master node - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/repositories", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "s", "time", "v", @@ -627,23 +669,23 @@ def snapshots(self, repository=None, params=None, headers=None): Returns all snapshots in a specific repository. - :arg repository: Name of repository from which to fetch the - snapshot information - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg ignore_unavailable: Set to true to ignore unavailable - snapshots - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg repository: Comma-separated list of repository names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", @@ -670,35 +712,37 @@ def tasks(self, params=None, headers=None): the cluster. - :arg actions: A comma-separated list of actions that should be + :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information (default: false) - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information - :arg nodes: A comma-separated list of node IDs or names to limit + :arg detailed: Return detailed task information. (default: + false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) + :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the - node you're connecting to, leave empty to get information from all nodes + node you're connecting to, leave empty to get information from all + nodes. :arg parent_task_id: Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg time: The unit in which to display time values Valid + to sort by. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", "/_cat/tasks", params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "format", "h", "help", "local", "master_timeout", - "cluster_manager_timeout", "s", "v", ) @@ -707,58 +751,91 @@ def templates(self, name=None, params=None, headers=None): Returns information about existing templates. - :arg name: A pattern that returned template names must match - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg s: Comma-separated list of column names or column aliases - to sort by - :arg v: Verbose mode. Display column headers + to sort by. + :arg v: Verbose mode. Display column headers. (default: false) """ return self.transport.perform_request( "GET", _make_path("_cat", "templates", name), params=params, headers=headers ) + @query_params() + def all_pit_segments(self, params=None, headers=None): + """ + Lists all active point-in-time segments. + + """ + return self.transport.perform_request( + "GET", "/_cat/pit_segments/_all", params=params, headers=headers + ) + + @query_params() + def pit_segments(self, body=None, params=None, headers=None): + """ + List segments for one or several PITs. + + + :arg body: + """ + return self.transport.perform_request( + "GET", "/_cat/pit_segments", params=params, headers=headers, body=body + ) + @query_params( - "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v" + "active_only", + "bytes", + "completed_only", + "detailed", + "format", + "h", + "help", + "s", + "shards", + "time", + "v", ) - def transforms(self, transform_id=None, params=None, headers=None): + def segment_replication(self, index=None, params=None, headers=None): """ - Gets configuration and usage information about transforms. + Returns information about both on-going and latest completed Segment + Replication events. - :arg transform_id: The id of the transform for which to get - stats. '_all' or '*' implies all transforms - :arg allow_no_match: Whether to ignore if a wildcard expression - matches no transforms. (This includes `_all` string or when no - transforms have been specified) - :arg format: a short version of the Accept header, e.g. json, - yaml - :arg from_: skips a number of transform configs, defaults to 0 - :arg h: Comma-separated list of column names to display - :arg help: Return help information + :arg index: Comma-separated list or wildcard expression of index + names to limit the returned information. + :arg active_only: If `true`, the response only includes ongoing + segment replication events. (default: false) + :arg bytes: The unit in which to display byte values. Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg completed_only: If `true`, the response only includes + latest completed segment replication events. (default: false) + :arg detailed: If `true`, the response includes detailed + information about segment replications. (default: false) + :arg format: A short version of the Accept header, e.g. json, + yaml. + :arg h: Comma-separated list of column names to display. + :arg help: Return help information. (default: false) :arg s: Comma-separated list of column names or column aliases - to sort by - :arg size: specifies a max number of transforms to get, defaults - to 100 - :arg time: The unit in which to display time values Valid + to sort by. + :arg shards: Comma-separated list of shards to display. + :arg time: The unit in which to display time values. Valid choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers + :arg v: Verbose mode. Display column headers. (default: false) """ - # from is a reserved word so it cannot be used, use from_ instead - if "from_" in params: - params["from"] = params.pop("from_") - return self.transport.perform_request( "GET", - _make_path("_cat", "transforms", transform_id), + _make_path("_cat", "segment_replication", index), params=params, headers=headers, ) diff --git a/opensearchpy/client/cat.pyi b/opensearchpy/client/cat.pyi index e29e9253..fc076ef8 100644 --- a/opensearchpy/client/cat.pyi +++ b/opensearchpy/client/cat.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -57,12 +66,12 @@ class CatClient(NamespacedClient): *, node_id: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -143,6 +152,7 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., @@ -151,7 +161,6 @@ class CatClient(NamespacedClient): include_unloaded_segments: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pri: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., @@ -171,12 +180,12 @@ class CatClient(NamespacedClient): def master( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -194,11 +203,12 @@ class CatClient(NamespacedClient): def cluster_manager( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -217,14 +227,13 @@ class CatClient(NamespacedClient): self, *, bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., full_id: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., - include_unloaded_segments: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -270,12 +279,12 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -296,9 +305,11 @@ class CatClient(NamespacedClient): *, index: Optional[Any] = ..., bytes: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -316,12 +327,12 @@ class CatClient(NamespacedClient): def pending_tasks( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -341,12 +352,12 @@ class CatClient(NamespacedClient): self, *, thread_pool_patterns: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., size: Optional[Any] = ..., v: Optional[Any] = ..., @@ -387,13 +398,12 @@ class CatClient(NamespacedClient): def plugins( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., - include_bootstrap: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -411,12 +421,12 @@ class CatClient(NamespacedClient): def nodeattrs( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -434,12 +444,12 @@ class CatClient(NamespacedClient): def repositories( self, *, + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -458,12 +468,12 @@ class CatClient(NamespacedClient): self, *, repository: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., @@ -508,12 +518,12 @@ class CatClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., format: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., s: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -528,17 +538,52 @@ class CatClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def transforms( + def all_pit_segments( self, *, - transform_id: Optional[Any] = ..., - allow_no_match: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def pit_segments( + self, + *, + body: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def segment_replication( + self, + *, + index: Optional[Any] = ..., + active_only: Optional[Any] = ..., + bytes: Optional[Any] = ..., + completed_only: Optional[Any] = ..., + detailed: Optional[Any] = ..., format: Optional[Any] = ..., - from_: Optional[Any] = ..., h: Optional[Any] = ..., help: Optional[Any] = ..., s: Optional[Any] = ..., - size: Optional[Any] = ..., + shards: Optional[Any] = ..., time: Optional[Any] = ..., v: Optional[Any] = ..., pretty: Optional[bool] = ..., From ce835fb59d7333f74ff9f1a32f2d3a6eed36c515 Mon Sep 17 00:00:00 2001 From: Sai Medhini Reddy Maryada <117196660+saimedhi@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:57:09 -0700 Subject: [PATCH 16/21] Generate cluster client from API specs (#530) Signed-off-by: saimedhi --- CHANGELOG.md | 1 + opensearchpy/_async/client/cluster.py | 367 +++++++++++++++++-------- opensearchpy/_async/client/cluster.pyi | 130 ++++++++- opensearchpy/client/cluster.py | 367 +++++++++++++++++-------- opensearchpy/client/cluster.pyi | 130 ++++++++- utils/generate-api.py | 6 + 6 files changed, 765 insertions(+), 236 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06675018..5d462bba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) - Generate `dangling_indices` client from API specs ([#511](https://github.com/opensearch-project/opensearch-py/pull/511)) +- Generate `cluster` client from API specs ([#530](https://github.com/opensearch-project/opensearch-py/pull/530)) - Generate `nodes` client from API specs ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) - Generate `cat` client from API specs ([#529](https://github.com/opensearch-project/opensearch-py/pull/529)) ### Deprecated diff --git a/opensearchpy/_async/client/cluster.py b/opensearchpy/_async/client/cluster.py index 174245cb..b64bdc5b 100644 --- a/opensearchpy/_async/client/cluster.py +++ b/opensearchpy/_async/client/cluster.py @@ -25,16 +25,27 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class ClusterClient(NamespacedClient): @query_params( + "awareness_attribute", + "cluster_manager_timeout", "expand_wildcards", "level", "local", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", "wait_for_events", @@ -48,31 +59,35 @@ async def health(self, index=None, params=None, headers=None): Returns basic information about the health of the cluster. - :arg index: Limit the information returned to a specific index + :arg index: Limit the information returned to specific indicies. + :arg awareness_attribute: The awareness attribute for which the + health is required. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg level: Specify the level of detail for returned information - Valid choices: cluster, indices, shards Default: cluster + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg level: Specify the level of detail for returned + information. Valid choices: cluster, indices, shards, + awareness_attributes :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Wait until the specified number of - shards is active + shards is active. :arg wait_for_events: Wait until all currently queued events - with the given priority are processed Valid choices: immediate, urgent, - high, normal, low, languid + with the given priority are processed. Valid choices: immediate, + urgent, high, normal, low, languid :arg wait_for_no_initializing_shards: Whether to wait until - there are no initializing shards in the cluster + there are no initializing shards in the cluster. :arg wait_for_no_relocating_shards: Whether to wait until there - are no relocating shards in the cluster + are no relocating shards in the cluster. :arg wait_for_nodes: Wait until the specified number of nodes is - available - :arg wait_for_status: Wait until cluster is in a specific state + available. + :arg wait_for_status: Wait until cluster is in a specific state. Valid choices: green, yellow, red """ return await self.transport.perform_request( @@ -82,17 +97,20 @@ async def health(self, index=None, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") async def pending_tasks(self, params=None, headers=None): """ Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return await self.transport.perform_request( "GET", "/_cluster/pending_tasks", params=params, headers=headers @@ -100,12 +118,12 @@ async def pending_tasks(self, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "local", "master_timeout", - "cluster_manager_timeout", "wait_for_metadata_version", "wait_for_timeout", ) @@ -115,28 +133,31 @@ async def state(self, metric=None, index=None, params=None, headers=None): :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, blocks, metadata, nodes, routing_table, - routing_nodes, master_node, version - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + metrics. Valid choices: _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, cluster_manager_node, version + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg flat_settings: Return settings in flat format. (default: false) :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg wait_for_metadata_version: Wait for the metadata version to - be equal or greater than the specified metadata version + be equal or greater than the specified metadata version. :arg wait_for_timeout: The maximum time to wait for - wait_for_metadata_version before timing out + wait_for_metadata_version before timing out. """ if index and metric in SKIP_IN_PATH: metric = "_all" @@ -154,13 +175,13 @@ async def stats(self, node_id=None, params=None, headers=None): Returns high-level overview of cluster statistics. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg flat_settings: Return settings in flat format (default: + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg flat_settings: Return settings in flat format. (default: false) - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "GET", @@ -172,10 +193,10 @@ async def stats(self, node_id=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "dry_run", "explain", "master_timeout", - "cluster_manager_timeout", "metric", "retry_failed", "timeout", @@ -187,30 +208,30 @@ async def reroute(self, body=None, params=None, headers=None): :arg body: The definition of `commands` to perform (`move`, `cancel`, `allocate`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg dry_run: Simulate the operation only and return the - resulting state + resulting state. :arg explain: Return an explanation of why the commands can or - cannot be executed - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + cannot be executed. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg metric: Limit the information returned to the specified - metrics. Defaults to all but metadata Valid choices: _all, blocks, - metadata, nodes, routing_table, master_node, version + metrics. Defaults to all but metadata. :arg retry_failed: Retries allocation of shards that are blocked - due to too many subsequent allocation failures - :arg timeout: Explicit operation timeout + due to too many subsequent allocation failures. + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "POST", "/_cluster/reroute", params=params, headers=headers, body=body ) @query_params( + "cluster_manager_timeout", "flat_settings", "include_defaults", "master_timeout", - "cluster_manager_timeout", "timeout", ) async def get_settings(self, params=None, headers=None): @@ -218,22 +239,23 @@ async def get_settings(self, params=None, headers=None): Returns cluster settings. - :arg flat_settings: Return settings in flat format (default: + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. (default: false) :arg include_defaults: Whether to return all default clusters - setting. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + setting. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "GET", "/_cluster/settings", params=params, headers=headers ) @query_params( - "flat_settings", "master_timeout", "cluster_manager_timeout", "timeout" + "cluster_manager_timeout", "flat_settings", "master_timeout", "timeout" ) async def put_settings(self, body, params=None, headers=None): """ @@ -242,13 +264,14 @@ async def put_settings(self, body, params=None, headers=None): :arg body: The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). - :arg flat_settings: Return settings in flat format (default: + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -276,9 +299,9 @@ async def allocation_explain(self, body=None, params=None, headers=None): :arg body: The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' :arg include_disk_info: Return information about disk usage and - shard sizes (default: false) + shard sizes. (default: false) :arg include_yes_decisions: Return 'YES' decisions in - explanation (default: false) + explanation. (default: false) """ return await self.transport.perform_request( "POST", @@ -288,16 +311,19 @@ async def allocation_explain(self, body=None, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_component_template(self, name, params=None, headers=None): """ - Deletes a component template + Deletes a component template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -309,19 +335,20 @@ async def delete_component_template(self, name, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") async def get_component_template(self, name=None, params=None, headers=None): """ - Returns one or more component templates + Returns one or more component templates. - :arg name: The comma separated names of the component templates + :arg name: The Comma-separated names of the component templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return await self.transport.perform_request( "GET", @@ -330,19 +357,22 @@ async def get_component_template(self, name=None, params=None, headers=None): headers=headers, ) - @query_params("create", "master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "create", "master_timeout", "timeout") async def put_component_template(self, name, body, params=None, headers=None): """ - Creates or updates a component template + Creates or updates a component template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + new or can also replace an existing one. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ for param in (name, body): if param in SKIP_IN_PATH: @@ -356,19 +386,18 @@ async def put_component_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("local", "master_timeout") async def exists_component_template(self, name, params=None, headers=None): """ - Returns information about whether a particular component template exist + Returns information about whether a particular component template exist. - :arg name: The name of the template + :arg name: The name of the template. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -388,7 +417,7 @@ async def delete_voting_config_exclusions(self, params=None, headers=None): :arg wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting - configuration exclusions list. Default: True + configuration exclusions list. (default: True) """ return await self.transport.perform_request( "DELETE", @@ -403,14 +432,136 @@ async def post_voting_config_exclusions(self, params=None, headers=None): Updates the cluster voting config exclusions by node ids or node names. - :arg node_ids: A comma-separated list of the persistent ids of - the nodes to exclude from the voting configuration. If specified, you - may not also specify ?node_names. - :arg node_names: A comma-separated list of the names of the + :arg node_ids: Comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may - not also specify ?node_ids. - :arg timeout: Explicit operation timeout Default: 30s + not also specify ?node_names. + :arg node_names: Comma-separated list of the names of the nodes + to exclude from the voting configuration. If specified, you may not also + specify ?node_ids. + :arg timeout: Operation timeout. """ return await self.transport.perform_request( "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers ) + + @query_params() + async def delete_decommission_awareness(self, params=None, headers=None): + """ + Delete any existing decommission. + + """ + return await self.transport.perform_request( + "DELETE", + "/_cluster/decommission/awareness/", + params=params, + headers=headers, + ) + + @query_params() + async def delete_weighted_routing(self, params=None, headers=None): + """ + Delete weighted shard routing weights. + + """ + return await self.transport.perform_request( + "DELETE", + "/_cluster/routing/awareness/weights", + params=params, + headers=headers, + ) + + @query_params() + async def get_decommission_awareness( + self, awareness_attribute_name, params=None, headers=None + ): + """ + Get details and status of decommissioned attribute. + + + :arg awareness_attribute_name: Awareness attribute name. + """ + if awareness_attribute_name in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'awareness_attribute_name'." + ) + + return await self.transport.perform_request( + "GET", + _make_path( + "_cluster", + "decommission", + "awareness", + awareness_attribute_name, + "_status", + ), + params=params, + headers=headers, + ) + + @query_params() + async def get_weighted_routing(self, attribute, params=None, headers=None): + """ + Fetches weighted shard routing weights. + + + :arg attribute: Awareness attribute name. + """ + if attribute in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'attribute'.") + + return await self.transport.perform_request( + "GET", + _make_path("_cluster", "routing", "awareness", attribute, "weights"), + params=params, + headers=headers, + ) + + @query_params() + async def put_decommission_awareness( + self, + awareness_attribute_name, + awareness_attribute_value, + params=None, + headers=None, + ): + """ + Decommissions an awareness attribute. + + + :arg awareness_attribute_name: Awareness attribute name. + :arg awareness_attribute_value: Awareness attribute value. + """ + for param in (awareness_attribute_name, awareness_attribute_value): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return await self.transport.perform_request( + "PUT", + _make_path( + "_cluster", + "decommission", + "awareness", + awareness_attribute_name, + awareness_attribute_value, + ), + params=params, + headers=headers, + ) + + @query_params() + async def put_weighted_routing(self, attribute, params=None, headers=None): + """ + Updates weighted shard routing weights. + + + :arg attribute: Awareness attribute name. + """ + if attribute in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'attribute'.") + + return await self.transport.perform_request( + "PUT", + _make_path("_cluster", "routing", "awareness", attribute, "weights"), + params=params, + headers=headers, + ) diff --git a/opensearchpy/_async/client/cluster.pyi b/opensearchpy/_async/client/cluster.pyi index f2cd948c..2685cbb5 100644 --- a/opensearchpy/_async/client/cluster.pyi +++ b/opensearchpy/_async/client/cluster.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -33,11 +42,12 @@ class ClusterClient(NamespacedClient): self, *, index: Optional[Any] = ..., + awareness_attribute: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., level: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., wait_for_events: Optional[Any] = ..., @@ -61,9 +71,9 @@ class ClusterClient(NamespacedClient): async def pending_tasks( self, *, + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -83,12 +93,12 @@ class ClusterClient(NamespacedClient): metric: Optional[Any] = ..., index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., wait_for_metadata_version: Optional[Any] = ..., wait_for_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -127,10 +137,10 @@ class ClusterClient(NamespacedClient): self, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., dry_run: Optional[Any] = ..., explain: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., metric: Optional[Any] = ..., retry_failed: Optional[Any] = ..., timeout: Optional[Any] = ..., @@ -150,10 +160,10 @@ class ClusterClient(NamespacedClient): async def get_settings( self, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., include_defaults: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -172,9 +182,9 @@ class ClusterClient(NamespacedClient): self, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -228,8 +238,8 @@ class ClusterClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -248,9 +258,9 @@ class ClusterClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -269,9 +279,9 @@ class ClusterClient(NamespacedClient): name: Any, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -292,7 +302,6 @@ class ClusterClient(NamespacedClient): *, local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -342,3 +351,104 @@ class ClusterClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... + async def delete_decommission_awareness( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def delete_weighted_routing( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def get_decommission_awareness( + self, + awareness_attribute_name: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def get_weighted_routing( + self, + attribute: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def put_decommission_awareness( + self, + awareness_attribute_name: Any, + awareness_attribute_value: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def put_weighted_routing( + self, + attribute: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... diff --git a/opensearchpy/client/cluster.py b/opensearchpy/client/cluster.py index fd749cbc..28f1f0e8 100644 --- a/opensearchpy/client/cluster.py +++ b/opensearchpy/client/cluster.py @@ -25,16 +25,27 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class ClusterClient(NamespacedClient): @query_params( + "awareness_attribute", + "cluster_manager_timeout", "expand_wildcards", "level", "local", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", "wait_for_events", @@ -48,31 +59,35 @@ def health(self, index=None, params=None, headers=None): Returns basic information about the health of the cluster. - :arg index: Limit the information returned to a specific index + :arg index: Limit the information returned to specific indicies. + :arg awareness_attribute: The awareness attribute for which the + health is required. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg level: Specify the level of detail for returned information - Valid choices: cluster, indices, shards Default: cluster + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg level: Specify the level of detail for returned + information. Valid choices: cluster, indices, shards, + awareness_attributes :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Wait until the specified number of - shards is active + shards is active. :arg wait_for_events: Wait until all currently queued events - with the given priority are processed Valid choices: immediate, urgent, - high, normal, low, languid + with the given priority are processed. Valid choices: immediate, + urgent, high, normal, low, languid :arg wait_for_no_initializing_shards: Whether to wait until - there are no initializing shards in the cluster + there are no initializing shards in the cluster. :arg wait_for_no_relocating_shards: Whether to wait until there - are no relocating shards in the cluster + are no relocating shards in the cluster. :arg wait_for_nodes: Wait until the specified number of nodes is - available - :arg wait_for_status: Wait until cluster is in a specific state + available. + :arg wait_for_status: Wait until cluster is in a specific state. Valid choices: green, yellow, red """ return self.transport.perform_request( @@ -82,17 +97,20 @@ def health(self, index=None, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") def pending_tasks(self, params=None, headers=None): """ Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return self.transport.perform_request( "GET", "/_cluster/pending_tasks", params=params, headers=headers @@ -100,12 +118,12 @@ def pending_tasks(self, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "local", "master_timeout", - "cluster_manager_timeout", "wait_for_metadata_version", "wait_for_timeout", ) @@ -115,28 +133,31 @@ def state(self, metric=None, index=None, params=None, headers=None): :arg metric: Limit the information returned to the specified - metrics Valid choices: _all, blocks, metadata, nodes, routing_table, - routing_nodes, master_node, version - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + metrics. Valid choices: _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, cluster_manager_node, version + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: + concrete indices that are open, closed or both. Valid choices: all, + open, closed, hidden, none + :arg flat_settings: Return settings in flat format. (default: false) :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg wait_for_metadata_version: Wait for the metadata version to - be equal or greater than the specified metadata version + be equal or greater than the specified metadata version. :arg wait_for_timeout: The maximum time to wait for - wait_for_metadata_version before timing out + wait_for_metadata_version before timing out. """ if index and metric in SKIP_IN_PATH: metric = "_all" @@ -154,13 +175,13 @@ def stats(self, node_id=None, params=None, headers=None): Returns high-level overview of cluster statistics. - :arg node_id: A comma-separated list of node IDs or names to - limit the returned information; use `_local` to return information from - the node you're connecting to, leave empty to get information from all - nodes - :arg flat_settings: Return settings in flat format (default: + :arg node_id: Comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes. + :arg flat_settings: Return settings in flat format. (default: false) - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. """ return self.transport.perform_request( "GET", @@ -172,10 +193,10 @@ def stats(self, node_id=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "dry_run", "explain", "master_timeout", - "cluster_manager_timeout", "metric", "retry_failed", "timeout", @@ -187,30 +208,30 @@ def reroute(self, body=None, params=None, headers=None): :arg body: The definition of `commands` to perform (`move`, `cancel`, `allocate`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg dry_run: Simulate the operation only and return the - resulting state + resulting state. :arg explain: Return an explanation of why the commands can or - cannot be executed - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + cannot be executed. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. :arg metric: Limit the information returned to the specified - metrics. Defaults to all but metadata Valid choices: _all, blocks, - metadata, nodes, routing_table, master_node, version + metrics. Defaults to all but metadata. :arg retry_failed: Retries allocation of shards that are blocked - due to too many subsequent allocation failures - :arg timeout: Explicit operation timeout + due to too many subsequent allocation failures. + :arg timeout: Operation timeout. """ return self.transport.perform_request( "POST", "/_cluster/reroute", params=params, headers=headers, body=body ) @query_params( + "cluster_manager_timeout", "flat_settings", "include_defaults", "master_timeout", - "cluster_manager_timeout", "timeout", ) def get_settings(self, params=None, headers=None): @@ -218,22 +239,23 @@ def get_settings(self, params=None, headers=None): Returns cluster settings. - :arg flat_settings: Return settings in flat format (default: + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. (default: false) :arg include_defaults: Whether to return all default clusters - setting. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + setting. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ return self.transport.perform_request( "GET", "/_cluster/settings", params=params, headers=headers ) @query_params( - "flat_settings", "master_timeout", "cluster_manager_timeout", "timeout" + "cluster_manager_timeout", "flat_settings", "master_timeout", "timeout" ) def put_settings(self, body, params=None, headers=None): """ @@ -242,13 +264,14 @@ def put_settings(self, body, params=None, headers=None): :arg body: The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). - :arg flat_settings: Return settings in flat format (default: + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -276,9 +299,9 @@ def allocation_explain(self, body=None, params=None, headers=None): :arg body: The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' :arg include_disk_info: Return information about disk usage and - shard sizes (default: false) + shard sizes. (default: false) :arg include_yes_decisions: Return 'YES' decisions in - explanation (default: false) + explanation. (default: false) """ return self.transport.perform_request( "POST", @@ -288,16 +311,19 @@ def allocation_explain(self, body=None, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_component_template(self, name, params=None, headers=None): """ - Deletes a component template + Deletes a component template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -309,19 +335,20 @@ def delete_component_template(self, name, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") def get_component_template(self, name=None, params=None, headers=None): """ - Returns one or more component templates + Returns one or more component templates. - :arg name: The comma separated names of the component templates + :arg name: The Comma-separated names of the component templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ return self.transport.perform_request( "GET", @@ -330,19 +357,22 @@ def get_component_template(self, name=None, params=None, headers=None): headers=headers, ) - @query_params("create", "master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "create", "master_timeout", "timeout") def put_component_template(self, name, body, params=None, headers=None): """ - Creates or updates a component template + Creates or updates a component template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + new or can also replace an existing one. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. + :arg timeout: Operation timeout. """ for param in (name, body): if param in SKIP_IN_PATH: @@ -356,19 +386,18 @@ def put_component_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("local", "master_timeout") def exists_component_template(self, name, params=None, headers=None): """ - Returns information about whether a particular component template exist + Returns information about whether a particular component template exist. - :arg name: The name of the template + :arg name: The name of the template. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. (default: false) + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead): Operation timeout for connection + to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -388,7 +417,7 @@ def delete_voting_config_exclusions(self, params=None, headers=None): :arg wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting - configuration exclusions list. Default: True + configuration exclusions list. (default: True) """ return self.transport.perform_request( "DELETE", @@ -403,14 +432,136 @@ def post_voting_config_exclusions(self, params=None, headers=None): Updates the cluster voting config exclusions by node ids or node names. - :arg node_ids: A comma-separated list of the persistent ids of - the nodes to exclude from the voting configuration. If specified, you - may not also specify ?node_names. - :arg node_names: A comma-separated list of the names of the + :arg node_ids: Comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may - not also specify ?node_ids. - :arg timeout: Explicit operation timeout Default: 30s + not also specify ?node_names. + :arg node_names: Comma-separated list of the names of the nodes + to exclude from the voting configuration. If specified, you may not also + specify ?node_ids. + :arg timeout: Operation timeout. """ return self.transport.perform_request( "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers ) + + @query_params() + def delete_decommission_awareness(self, params=None, headers=None): + """ + Delete any existing decommission. + + """ + return self.transport.perform_request( + "DELETE", + "/_cluster/decommission/awareness/", + params=params, + headers=headers, + ) + + @query_params() + def delete_weighted_routing(self, params=None, headers=None): + """ + Delete weighted shard routing weights. + + """ + return self.transport.perform_request( + "DELETE", + "/_cluster/routing/awareness/weights", + params=params, + headers=headers, + ) + + @query_params() + def get_decommission_awareness( + self, awareness_attribute_name, params=None, headers=None + ): + """ + Get details and status of decommissioned attribute. + + + :arg awareness_attribute_name: Awareness attribute name. + """ + if awareness_attribute_name in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'awareness_attribute_name'." + ) + + return self.transport.perform_request( + "GET", + _make_path( + "_cluster", + "decommission", + "awareness", + awareness_attribute_name, + "_status", + ), + params=params, + headers=headers, + ) + + @query_params() + def get_weighted_routing(self, attribute, params=None, headers=None): + """ + Fetches weighted shard routing weights. + + + :arg attribute: Awareness attribute name. + """ + if attribute in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'attribute'.") + + return self.transport.perform_request( + "GET", + _make_path("_cluster", "routing", "awareness", attribute, "weights"), + params=params, + headers=headers, + ) + + @query_params() + def put_decommission_awareness( + self, + awareness_attribute_name, + awareness_attribute_value, + params=None, + headers=None, + ): + """ + Decommissions an awareness attribute. + + + :arg awareness_attribute_name: Awareness attribute name. + :arg awareness_attribute_value: Awareness attribute value. + """ + for param in (awareness_attribute_name, awareness_attribute_value): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path( + "_cluster", + "decommission", + "awareness", + awareness_attribute_name, + awareness_attribute_value, + ), + params=params, + headers=headers, + ) + + @query_params() + def put_weighted_routing(self, attribute, params=None, headers=None): + """ + Updates weighted shard routing weights. + + + :arg attribute: Awareness attribute name. + """ + if attribute in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'attribute'.") + + return self.transport.perform_request( + "PUT", + _make_path("_cluster", "routing", "awareness", attribute, "weights"), + params=params, + headers=headers, + ) diff --git a/opensearchpy/client/cluster.pyi b/opensearchpy/client/cluster.pyi index 49b27c54..ccc3737a 100644 --- a/opensearchpy/client/cluster.pyi +++ b/opensearchpy/client/cluster.pyi @@ -24,6 +24,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -33,11 +42,12 @@ class ClusterClient(NamespacedClient): self, *, index: Optional[Any] = ..., + awareness_attribute: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., level: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., wait_for_events: Optional[Any] = ..., @@ -61,9 +71,9 @@ class ClusterClient(NamespacedClient): def pending_tasks( self, *, + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -83,12 +93,12 @@ class ClusterClient(NamespacedClient): metric: Optional[Any] = ..., index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., wait_for_metadata_version: Optional[Any] = ..., wait_for_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -127,10 +137,10 @@ class ClusterClient(NamespacedClient): self, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., dry_run: Optional[Any] = ..., explain: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., metric: Optional[Any] = ..., retry_failed: Optional[Any] = ..., timeout: Optional[Any] = ..., @@ -150,10 +160,10 @@ class ClusterClient(NamespacedClient): def get_settings( self, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., include_defaults: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -172,9 +182,9 @@ class ClusterClient(NamespacedClient): self, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -228,8 +238,8 @@ class ClusterClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -248,9 +258,9 @@ class ClusterClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -269,9 +279,9 @@ class ClusterClient(NamespacedClient): name: Any, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -292,7 +302,6 @@ class ClusterClient(NamespacedClient): *, local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -342,3 +351,104 @@ class ClusterClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... + def delete_decommission_awareness( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def delete_weighted_routing( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_decommission_awareness( + self, + awareness_attribute_name: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_weighted_routing( + self, + attribute: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def put_decommission_awareness( + self, + awareness_attribute_name: Any, + awareness_attribute_value: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def put_weighted_routing( + self, + attribute: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... diff --git a/utils/generate-api.py b/utils/generate-api.py index 2cb93eb5..fffd0e82 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -502,6 +502,12 @@ def read_modules(): if p["x-operation-group"] != "nodes.hot_threads" and "type" in params_new: params_new.pop("type") + if ( + p["x-operation-group"] == "cluster.health" + and "ensure_node_commissioned" in params_new + ): + params_new.pop("ensure_node_commissioned") + if bool(params_new): p.update({"params": params_new}) From 84ac172ddc54b3e6c975d36221d16ec3e78a2fe9 Mon Sep 17 00:00:00 2001 From: DJ Carrillo <60985926+Djcarrillo6@users.noreply.github.com> Date: Wed, 11 Oct 2023 10:11:35 -0700 Subject: [PATCH 17/21] Added new guide & sample module for using index templates. (#531) Added index_template guide and sample Signed-off-by: Djcarrillo6 --- CHANGELOG.md | 1 + USER_GUIDE.md | 1 + guides/index_template.md | 184 ++++++++++++++++++ .../index_template/index_template_sample.py | 143 ++++++++++++++ 4 files changed, 329 insertions(+) create mode 100644 guides/index_template.md create mode 100644 samples/index_template/index_template_sample.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d462bba..141ca15a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Added generating imports and headers to API generator ([#467](https://github.com/opensearch-project/opensearch-py/pull/467)) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) +- Added new guide for using index templates with the client ([#531](https://github.com/opensearch-project/opensearch-py/pull/531)) ### Changed - Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) diff --git a/USER_GUIDE.md b/USER_GUIDE.md index 4b6b89c2..b14ee0ad 100644 --- a/USER_GUIDE.md +++ b/USER_GUIDE.md @@ -153,6 +153,7 @@ print(response) - [Search](guides/search.md) - [Point in Time](guides/point_in_time.md) - [Using a Proxy](guides/proxy.md) +- [Index Templates](guides/index_template.md) ## Plugins diff --git a/guides/index_template.md b/guides/index_template.md new file mode 100644 index 00000000..3afdd1dc --- /dev/null +++ b/guides/index_template.md @@ -0,0 +1,184 @@ +# Index Template +Index templates are a convenient way to define settings, mappings, and aliases for one or more indices when they are created. In this guide, you'll learn how to create an index template and apply it to an index. + +## Setup + +Assuming you have OpenSearch running locally on port 9200, you can create a client instance with the following code: +```python +from opensearchpy import OpenSearch +client = OpenSearch( + hosts=['https://localhost:9200'], + use_ssl=True, + verify_certs=False, + http_auth=('admin', 'admin') +) +``` + +## Index Template API Actions + +### Create an Index Template +You can create an index template to define default settings and mappings for indices of certain patterns. The following example creates an index template named `books` with default settings and mappings for indices of the `books-*` pattern: + +```python +client.indices.put_index_template( + name='books', + body={ + 'index_patterns': ['books-*'], + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + }, + 'mappings': { + 'properties': { + 'title': { 'type': 'text' }, + 'author': { 'type': 'text' }, + 'published_on': { 'type': 'date' }, + 'pages': { 'type': 'integer' } + } + } + } + } +) +``` + +Now, when you create an index that matches the `books-*` pattern, OpenSearch will automatically apply the template's settings and mappings to the index. Let's create an index named `books-nonfiction` and verify that its settings and mappings match those of the template: + +```python +client.indices.create(index='books-nonfiction') +print(client.indices.get(index='books-nonfiction')) +``` + +### Multiple Index Templates +If multiple index templates match the index's name, OpenSearch will apply the template with the highest priority. The following example creates two index templates named `books-*` and `books-fiction-*` with different settings: + +```python +client.indices.put_index_template( + name='books', + body={ + 'index_patterns': ['books-*'], + 'priority': 0, # default priority + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + } + } + } +) + +client.indices.put_index_template( + name='books-fiction', + body={ + 'index_patterns': ['books-fiction-*'], + 'priority': 1, # higher priority than the `books` template + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 1, + 'number_of_replicas': 1 + } + } + } + } +) +``` + +When we create an index named `books-fiction-romance`, OpenSearch will apply the `books-fiction-*` template's settings to the index: + +```python +client.indices.create(index='books-fiction-romance') +print(client.indices.get(index='books-fiction-romance')) +``` + +### Composable Index Templates +Composable index templates are a new type of index template that allow you to define multiple component templates and compose them into a final template. The following example creates a component template named `books_mappings` with default mappings for indices of the `books-*` and `books-fiction-*` patterns: + +```python +client.cluster.put_component_template( + name='books_mappings', + body={ + 'template': { + 'mappings': { + 'properties': { + 'title': { 'type': 'text' }, + 'author': { 'type': 'text' }, + 'published_on': { 'type': 'date' }, + 'pages': { 'type': 'integer' } + } + } + } + } +) + +client.indices.put_index_template( + name='books', + body={ + 'index_patterns': ['books-*'], + 'composed_of': ['books_mappings'], # use the `books_mappings` component template + 'priority': 0, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + } + } + } +) + +client.indices.put_index_template( + name='books', + body={ + 'index_patterns': ['books-*'], + 'composed_of': ['books_mappings'], # use the `books_mappings` component template + 'priority': 1, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 1, + 'number_of_replicas': 1 + } + } + } + } +) +``` + +When we create an index named `books-fiction-horror`, OpenSearch will apply the `books-fiction-*` template's settings, and `books_mappings` template mappings to the index: + +```python +client.indices.create(index='books-fiction-horror') +print(client.indices.get(index='books-fiction-horror')) +``` + +### Get an Index Template +You can get an index template with the `get_index_template` API action: + +```python +print(client.indices.get_index_template(name='books')) +``` + +### Delete an Index Template +You can delete an index template with the `delete_template` API action: + +```python +client.indices.delete_index_template(name='books') +``` + +## Cleanup +Let's delete all resources created in this guide: + +```python +client.indices.delete(index='books-*') +client.indices.delete_index_template(name='books-fiction') +client.cluster.delete_component_template(name='books_mappings') +``` + +# Sample Code +See [index_template_sample.py](/samples/index_template/index_template_sample.py) for a working sample of the concepts in this guide. \ No newline at end of file diff --git a/samples/index_template/index_template_sample.py b/samples/index_template/index_template_sample.py new file mode 100644 index 00000000..dab504be --- /dev/null +++ b/samples/index_template/index_template_sample.py @@ -0,0 +1,143 @@ +from opensearchpy import OpenSearch + +# Create a client instance +client = OpenSearch( + hosts=['https://localhost:9200'], + use_ssl=True, + verify_certs=False, + http_auth=('admin', 'admin') +) + +# You can create an index template to define default settings and mappings for indices of certain patterns. The following example creates an index template named `books` with default settings and mappings for indices of the `books-*` pattern: +client.indices.put_index_template( +name='books', +body={ + 'index_patterns': ['books-*'], + 'priority': 1, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + }, + 'mappings': { + 'properties': { + 'title': { 'type': 'text' }, + 'author': { 'type': 'text' }, + 'published_on': { 'type': 'date' }, + 'pages': { 'type': 'integer' } + } + } + } +} +) + +# Now, when you create an index that matches the `books-*` pattern, OpenSearch will automatically apply the template's settings and mappings to the index. Let's create an index named books-nonfiction and verify that its settings and mappings match those of the template: +client.indices.create(index='books-nonfiction') +print(client.indices.get(index='books-nonfiction')) + +# If multiple index templates match the index's name, OpenSearch will apply the template with the highest `priority`. The following example creates two index templates named `books-*` and `books-fiction-*` with different settings: +client.indices.put_index_template( +name='books', +body={ + 'index_patterns': ['books-*'], + 'priority': 1, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + } + } +} +) + +client.indices.put_index_template( +name='books-fiction', +body={ + 'index_patterns': ['books-fiction-*'], + 'priority': 2, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 1, + 'number_of_replicas': 1 + } + } + } +} +) + +# # Test multiple index templates +client.indices.create(index='books-fiction-romance') +print(client.indices.get(index='books-fiction-romance')) + + +# Composable index templates are a new type of index template that allow you to define multiple component templates and compose them into a final template. The following example creates a component template named `books_mappings` with default mappings for indices of the `books-*` and `books-fiction-*` patterns: +client.cluster.put_component_template( +name='books_mappings', +body={ + 'template': { + 'mappings': { + 'properties': { + 'title': { 'type': 'text' }, + 'author': { 'type': 'text' }, + 'published_on': { 'type': 'date' }, + 'pages': { 'type': 'integer' } + } + } + } +} +) + +client.indices.put_index_template( +name='books', +body={ + 'index_patterns': ['books-*'], + 'composed_of': ['books_mappings'], + 'priority': 4, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 3, + 'number_of_replicas': 0 + } + } + } +} +) + +client.indices.put_index_template( +name='books-fiction', +body={ + 'index_patterns': ['books-fiction-*'], + 'composed_of': ['books_mappings'], + 'priority': 5, + 'template': { + 'settings': { + 'index': { + 'number_of_shards': 1, + 'number_of_replicas': 1 + } + } + } +} +) + + +# Test composable index templates +client.indices.create(index='books-fiction-horror') +print(client.indices.get(index='books-fiction-horror')) + +# Get an index template +print(client.indices.get_index_template(name='books')) + +# Delete an index template +client.indices.delete_index_template(name='books') + +# Cleanup +client.indices.delete(index='books-*') +client.indices.delete_index_template(name='books-fiction') +client.cluster.delete_component_template(name='books_mappings') \ No newline at end of file From 62b408bbd11a36051f5b1bd3318620aba4a1a2a2 Mon Sep 17 00:00:00 2001 From: DJ Carrillo <60985926+Djcarrillo6@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:11:21 -0700 Subject: [PATCH 18/21] Removed EOL Python3.5 & bumped urllib3 version to patch security vulnerability (#533) Updated CHANGELOG with pull # Updated CHANGELOG with pull # Updated CHANGELOG removed section. Updated CHANGELOG removed section again Signed-off-by: Djcarrillo6 --- .github/workflows/test.yml | 1 - CHANGELOG.md | 2 ++ noxfile.py | 2 +- setup.py | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3287c11b..bd0ac738 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,6 @@ jobs: strategy: matrix: entry: - - { os: 'ubuntu-20.04', python-version: "3.5" } - { os: 'ubuntu-20.04', python-version: "3.6" } - { os: 'ubuntu-latest', python-version: "3.7" } - { os: 'ubuntu-latest', python-version: "3.8" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 141ca15a..6658e562 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed race condition in AWSV4SignerAuth & AWSV4SignerAsyncAuth when using refreshable credentials ([#470](https://github.com/opensearch-project/opensearch-py/pull/470)) ### Security ### Dependencies +- Bumps `urllib3` from >= 1.26.9 to >= 1.26.17 [#533](https://github.com/opensearch-project/opensearch-py/pull/533) ## [2.3.0] ### Added @@ -66,6 +67,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Deprecated ### Removed - Removed support for Python 2.7 ([#421](https://github.com/opensearch-project/opensearch-py/pull/421)) +- Removed support for Python 3.5 ([#533](https://github.com/opensearch-project/opensearch-py/pull/533)) ### Fixed - Fixed flaky CI tests by replacing httpbin with a simple http_server ([#395](https://github.com/opensearch-project/opensearch-py/pull/395)) - Fixed import cycle when importing async helpers ([#311](https://github.com/opensearch-project/opensearch-py/pull/311)) diff --git a/noxfile.py b/noxfile.py index a5da2b60..80b4e400 100644 --- a/noxfile.py +++ b/noxfile.py @@ -36,7 +36,7 @@ ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]) +@nox.session(python=["2.7", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]) def test(session): session.install(".") session.install("-r", "dev-requirements.txt") diff --git a/setup.py b/setup.py index c21e053f..8bde5f40 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,7 @@ if package == module_dir or package.startswith(module_dir + ".") ] install_requires = [ - "urllib3>=1.26.9", + "urllib3>=1.26.17", "requests>=2.4.0, <3.0.0", "six", "python-dateutil", From 7a638cdafcae8c377724b59f238e64d7ff028182 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Thu, 12 Oct 2023 18:55:31 -0400 Subject: [PATCH 19/21] Align pool_maxsize for different connection pool implementations. (#535) * Align pool_maxsize for different connection pool implementations. Signed-off-by: dblock * Document connection classes and settings. Signed-off-by: dblock * Undo change in async for backwards compatibility. Signed-off-by: dblock * Fix: typo. Signed-off-by: dblock --------- Signed-off-by: dblock --- CHANGELOG.md | 1 + USER_GUIDE.md | 1 + guides/auth.md | 1 - guides/connection_classes.md | 81 +++++++++++++++++++ guides/proxy.md | 1 - opensearchpy/connection/http_urllib3.py | 9 ++- opensearchpy/transport.py | 6 ++ .../test_client/test_requests.py | 32 ++++++++ test_opensearchpy/test_client/test_urllib3.py | 39 +++++++++ 9 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 guides/connection_classes.md create mode 100644 test_opensearchpy/test_client/test_requests.py create mode 100644 test_opensearchpy/test_client/test_urllib3.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6658e562..a2317635 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added generating imports and headers to API generator ([#467](https://github.com/opensearch-project/opensearch-py/pull/467)) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) - Added new guide for using index templates with the client ([#531](https://github.com/opensearch-project/opensearch-py/pull/531)) +- Added `pool_maxsize` for `Urllib3HttpConnection` ([#535](https://github.com/opensearch-project/opensearch-py/pull/535)) ### Changed - Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) diff --git a/USER_GUIDE.md b/USER_GUIDE.md index b14ee0ad..416bbc4d 100644 --- a/USER_GUIDE.md +++ b/USER_GUIDE.md @@ -154,6 +154,7 @@ print(response) - [Point in Time](guides/point_in_time.md) - [Using a Proxy](guides/proxy.md) - [Index Templates](guides/index_template.md) +- [Connection Classes](guides/connection_classes.md) ## Plugins diff --git a/guides/auth.md b/guides/auth.md index 4b314764..3e7f4092 100644 --- a/guides/auth.md +++ b/guides/auth.md @@ -113,7 +113,6 @@ client = OpenSearch( ['htps://...'], use_ssl=True, verify_certs=True, - connection_class=RequestsHttpConnection, http_auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL) ) diff --git a/guides/connection_classes.md b/guides/connection_classes.md new file mode 100644 index 00000000..da7357fb --- /dev/null +++ b/guides/connection_classes.md @@ -0,0 +1,81 @@ +- [Connection Classes](#connection-classes) + - [Selecting a Connection Class](#selecting-a-connection-class) + - [Urllib3HttpConnection](#urllib3httpconnection) + - [RequestsHttpConnection](#requestshttpconnection) + - [AsyncHttpConnection](#asynchttpconnection) + - [Connection Pooling](#connection-pooling) + +# Connection Classes + +The OpenSearch Python synchronous client supports both the `Urllib3HttpConnection` connection class (default) from the [urllib3](https://pypi.org/project/urllib3/) library, and `RequestsHttpConnection` from the [requests](https://pypi.org/project/requests/) library. We recommend you use the default, unless your application is standardized on `requests`. + +The faster, asynchronous client, implements a class called `AsyncHttpConnection`, which uses [aiohttp](https://pypi.org/project/aiohttp/). + +## Selecting a Connection Class + +### Urllib3HttpConnection + +```python +from opensearchpy import OpenSearch, Urllib3HttpConnection + +client = OpenSearch( + hosts = [{'host': 'localhost', 'port': 9200}], + http_auth = ('admin', 'admin'), + use_ssl = True, + verify_certs = False, + ssl_show_warn = False, + connection_class = Urllib3HttpConnection +) +``` + +### RequestsHttpConnection + +```python +from opensearchpy import OpenSearch, RequestsHttpConnection + +client = OpenSearch( + hosts = [{'host': 'localhost', 'port': 9200}], + http_auth = ('admin', 'admin'), + use_ssl = True, + verify_certs = False, + ssl_show_warn = False, + connection_class = RequestsHttpConnection +) +``` + +### AsyncHttpConnection + +```python +from opensearchpy import AsyncOpenSearch, AsyncHttpConnection + +async def main(): + client = AsyncOpenSearch( + hosts = [{'host': 'localhost', 'port': 9200}], + http_auth = ('admin', 'admin'), + use_ssl = True, + verify_certs = False, + ssl_show_warn = False, + connection_class = AsyncHttpConnection + ) +``` + +## Connection Pooling + +The OpenSearch Python client has a connection pool for each `host` value specified during initialization, and a connection pool for HTTP connections to each host implemented in the underlying HTTP libraries. You can adjust the max size of the latter connection pool with `pool_maxsize`. + +If you don't set this value, each connection library implementation will provide its default, which is typically `10`. Changing the pool size may improve performance in some multithreaded scenarios. + +The following example sets the number of connections in the connection pool to 12. + +```python +from opensearchpy import OpenSearch + +client = OpenSearch( + hosts = [{'host': 'localhost', 'port': 9200}], + http_auth = ('admin', 'admin'), + use_ssl = True, + verify_certs = False, + ssl_show_warn = False, + pool_maxsize = 12, +) +``` \ No newline at end of file diff --git a/guides/proxy.md b/guides/proxy.md index 5be7edf4..96b7d441 100644 --- a/guides/proxy.md +++ b/guides/proxy.md @@ -13,7 +13,6 @@ OpenSearch( hosts=["htps://..."], use_ssl=True, verify_certs=True, - connection_class=RequestsHttpConnection, trust_env=True, ) ``` diff --git a/opensearchpy/connection/http_urllib3.py b/opensearchpy/connection/http_urllib3.py index 6fc09e72..4bc27bbb 100644 --- a/opensearchpy/connection/http_urllib3.py +++ b/opensearchpy/connection/http_urllib3.py @@ -86,7 +86,7 @@ class Urllib3HttpConnection(Connection): ``ssl`` module for exact options for your environment). :arg ssl_assert_hostname: use hostname verification if not `False` :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None` - :arg maxsize: the number of connections which will be kept open to this + :arg pool_maxsize: the number of connections which will be kept open to this host. See https://urllib3.readthedocs.io/en/1.4/pools.html#api for more information. :arg headers: any custom http headers to be add to requests @@ -109,7 +109,7 @@ def __init__( ssl_version=None, ssl_assert_hostname=None, ssl_assert_fingerprint=None, - maxsize=10, + pool_maxsize=None, headers=None, ssl_context=None, http_compress=None, @@ -203,8 +203,11 @@ def __init__( if not ssl_show_warn: urllib3.disable_warnings() + if pool_maxsize and isinstance(pool_maxsize, int): + kw["maxsize"] = pool_maxsize + self.pool = pool_class( - self.hostname, port=self.port, timeout=self.timeout, maxsize=maxsize, **kw + self.hostname, port=self.port, timeout=self.timeout, **kw ) def perform_request( diff --git a/opensearchpy/transport.py b/opensearchpy/transport.py index 32c9baf4..301955df 100644 --- a/opensearchpy/transport.py +++ b/opensearchpy/transport.py @@ -83,6 +83,7 @@ def __init__( serializers=None, default_mimetype="application/json", max_retries=3, + pool_maxsize=None, retry_on_status=(502, 503, 504), retry_on_timeout=False, send_get_body_as="GET", @@ -120,6 +121,8 @@ def __init__( don't support passing bodies with GET requests. If you set this to 'POST' a POST method will be used instead, if to 'source' then the body will be serialized and passed as a query parameter `source`. + :arg pool_maxsize: Maximum connection pool size used by pool-manager + For custom connection-pooling on current session Any extra keyword arguments will be passed to the `connection_class` when creating and instance unless overridden by that connection's @@ -139,6 +142,7 @@ def __init__( self.deserializer = Deserializer(_serializers, default_mimetype) self.max_retries = max_retries + self.pool_maxsize = pool_maxsize self.retry_on_timeout = retry_on_timeout self.retry_on_status = retry_on_status self.send_get_body_as = send_get_body_as @@ -211,6 +215,8 @@ def _create_connection(host): # previously unseen params, create new connection kwargs = self.kwargs.copy() kwargs.update(host) + if self.pool_maxsize and isinstance(self.pool_maxsize, int): + kwargs["pool_maxsize"] = self.pool_maxsize return self.connection_class(**kwargs) connections = map(_create_connection, hosts) diff --git a/test_opensearchpy/test_client/test_requests.py b/test_opensearchpy/test_client/test_requests.py new file mode 100644 index 00000000..11434a17 --- /dev/null +++ b/test_opensearchpy/test_client/test_requests.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +from unittest import TestCase + +from opensearchpy import OpenSearch, RequestsHttpConnection + + +class TestRequests(TestCase): + def test_connection_class(self): + client = OpenSearch(connection_class=RequestsHttpConnection) + self.assertEqual(client.transport.pool_maxsize, None) + self.assertEqual(client.transport.connection_class, RequestsHttpConnection) + self.assertIsInstance( + client.transport.connection_pool.connections[0], RequestsHttpConnection + ) + + def test_pool_maxsize(self): + client = OpenSearch(connection_class=RequestsHttpConnection, pool_maxsize=42) + self.assertEqual(client.transport.pool_maxsize, 42) + self.assertEqual( + client.transport.connection_pool.connections[0] + .session.adapters["https://"] + ._pool_maxsize, + 42, + ) diff --git a/test_opensearchpy/test_client/test_urllib3.py b/test_opensearchpy/test_client/test_urllib3.py new file mode 100644 index 00000000..227164eb --- /dev/null +++ b/test_opensearchpy/test_client/test_urllib3.py @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +from unittest import TestCase + +from urllib3.connectionpool import HTTPConnectionPool + +from opensearchpy import OpenSearch, Urllib3HttpConnection + + +class TestUrlLib3(TestCase): + def test_default(self): + client = OpenSearch() + self.assertEqual(client.transport.connection_class, Urllib3HttpConnection) + self.assertEqual(client.transport.pool_maxsize, None) + + def test_connection_class(self): + client = OpenSearch(connection_class=Urllib3HttpConnection) + self.assertEqual(client.transport.connection_class, Urllib3HttpConnection) + self.assertIsInstance( + client.transport.connection_pool.connections[0], Urllib3HttpConnection + ) + self.assertIsInstance( + client.transport.connection_pool.connections[0].pool, HTTPConnectionPool + ) + + def test_pool_maxsize(self): + client = OpenSearch(connection_class=Urllib3HttpConnection, pool_maxsize=42) + self.assertEqual(client.transport.pool_maxsize, 42) + # https://github.com/python/cpython/blob/3.12/Lib/queue.py#L35 + self.assertEqual( + client.transport.connection_pool.connections[0].pool.pool.maxsize, 42 + ) From af263a060a11771015ea386565a814286a1d59d7 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Fri, 13 Oct 2023 14:29:54 -0400 Subject: [PATCH 20/21] Add micro benchmarks. (#537) * Align pool_maxsize for different connection pool implementations. Signed-off-by: dblock * Added benchmarks. Signed-off-by: dblock * Multi-threaded vs. async benchmarks. Signed-off-by: dblock * Set pool size to the number of threads. Signed-off-by: dblock * Added sync/async benchmark. Signed-off-by: dblock * Report client-side latency. Signed-off-by: dblock * Various updates to benchmarks, demonstrating threading improves throughput. Signed-off-by: dblock * Bench info. Signed-off-by: dblock * Fixup format. Signed-off-by: dblock * Undo async maxsize. Signed-off-by: dblock * Moved benchmarks folder. Signed-off-by: dblock * Updated documentation and project description. Signed-off-by: dblock --------- Signed-off-by: dblock --- CHANGELOG.md | 1 + README.md | 2 +- benchmarks/README.md | 63 ++ benchmarks/bench_async.py | 101 +++ benchmarks/bench_info_sync.py | 93 +++ benchmarks/bench_sync.py | 132 ++++ benchmarks/bench_sync_async.py | 12 + benchmarks/poetry.lock | 847 +++++++++++++++++++++++++ benchmarks/poetry.toml | 2 + benchmarks/pyproject.toml | 16 + benchmarks/thread_with_return_value.py | 25 + 11 files changed, 1293 insertions(+), 1 deletion(-) create mode 100644 benchmarks/README.md create mode 100644 benchmarks/bench_async.py create mode 100644 benchmarks/bench_info_sync.py create mode 100644 benchmarks/bench_sync.py create mode 100644 benchmarks/bench_sync_async.py create mode 100644 benchmarks/poetry.lock create mode 100644 benchmarks/poetry.toml create mode 100644 benchmarks/pyproject.toml create mode 100644 benchmarks/thread_with_return_value.py diff --git a/CHANGELOG.md b/CHANGELOG.md index a2317635..d8019a78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added point-in-time APIs (create_pit, delete_pit, delete_all_pits, get_all_pits) and Security Client APIs (health and update_audit_configuration) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) - Added new guide for using index templates with the client ([#531](https://github.com/opensearch-project/opensearch-py/pull/531)) - Added `pool_maxsize` for `Urllib3HttpConnection` ([#535](https://github.com/opensearch-project/opensearch-py/pull/535)) +- Added benchmarks ([#537](https://github.com/opensearch-project/opensearch-py/pull/537)) ### Changed - Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) diff --git a/README.md b/README.md index 7ecaea56..e4524469 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ For more information, see [opensearch.org](https://opensearch.org/) and the [API ## User Guide -To get started with the OpenSearch Python Client, see [User Guide](https://github.com/opensearch-project/opensearch-py/blob/main/USER_GUIDE.md). +To get started with the OpenSearch Python Client, see [User Guide](https://github.com/opensearch-project/opensearch-py/blob/main/USER_GUIDE.md). This repository also contains [working samples](https://github.com/opensearch-project/opensearch-py/tree/main/samples) and [benchmarks](https://github.com/opensearch-project/opensearch-py/tree/main/benchmarks). ## Compatibility with OpenSearch diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000..1d21d851 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,63 @@ +- [Benchmarks](#benchmarks) + - [Start OpenSearch](#start-opensearch) + - [Install Prerequisites](#install-prerequisites) + - [Run Benchmarks](#run-benchmarks) + +## Benchmarks + +Python client benchmarks using [richbench](https://github.com/tonybaloney/rich-bench). + +### Start OpenSearch + +``` +docker run -p 9200:9200 -e "discovery.type=single-node" opensearchproject/opensearch:latest +``` + +### Install Prerequisites + +Install [poetry](https://python-poetry.org/docs/), then install package dependencies. + +``` +poetry install +``` + +Benchmarks use the code in this repository by specifying the dependency as `opensearch-py = { path = "..", develop=true, extras=["async"] }` in [pyproject.toml](pyproject.toml). + +### Run Benchmarks + +Run all benchmarks available as follows. + +``` +poetry run richbench . --repeat 1 --times 1 +``` + +Outputs results from all the runs. + +``` + Benchmarks, repeat=1, number=1 +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ +┃ Benchmark ┃ Min ┃ Max ┃ Mean ┃ Min (+) ┃ Max (+) ┃ Mean (+) ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ +│ 1 client vs. more clients (async) │ 1.640 │ 1.640 │ 1.640 │ 1.102 (1.5x) │ 1.102 (1.5x) │ 1.102 (1.5x) │ +│ 1 thread vs. 32 threads (sync) │ 5.526 │ 5.526 │ 5.526 │ 1.626 (3.4x) │ 1.626 (3.4x) │ 1.626 (3.4x) │ +│ 1 thread vs. 32 threads (sync) │ 4.639 │ 4.639 │ 4.639 │ 3.363 (1.4x) │ 3.363 (1.4x) │ 3.363 (1.4x) │ +│ sync vs. async (8) │ 3.198 │ 3.198 │ 3.198 │ 0.966 (3.3x) │ 0.966 (3.3x) │ 0.966 (3.3x) │ +└───────────────────────────────────┴─────────┴─────────┴─────────┴─────────────────┴─────────────────┴─────────────────┘ +``` + +Run a specific benchmark, e.g. [bench_sync.py](bench_sync.py) by specifying `--benchmark [name]`. + +``` +poetry run richbench . --repeat 1 --times 1 --benchmark sync +``` + +Outputs results from one benchmark. + +``` + Benchmarks, repeat=1, number=1 +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ +┃ Benchmark ┃ Min ┃ Max ┃ Mean ┃ Min (+) ┃ Max (+) ┃ Mean (+) ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ +│ 1 thread vs. 32 threads (sync) │ 6.804 │ 6.804 │ 6.804 │ 3.409 (2.0x) │ 3.409 (2.0x) │ 3.409 (2.0x) │ +└────────────────────────────────┴─────────┴─────────┴─────────┴─────────────────┴─────────────────┴─────────────────┘ +``` diff --git a/benchmarks/bench_async.py b/benchmarks/bench_async.py new file mode 100644 index 00000000..d08ca634 --- /dev/null +++ b/benchmarks/bench_async.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +import asyncio +import uuid + +from opensearchpy import AsyncHttpConnection, AsyncOpenSearch + +host = "localhost" +port = 9200 +auth = ("admin", "admin") +index_name = "test-index-async" +item_count = 100 + + +async def index_records(client, item_count): + await asyncio.gather( + *[ + client.index( + index=index_name, + body={ + "title": f"Moneyball", + "director": "Bennett Miller", + "year": "2011", + }, + id=uuid.uuid4(), + ) + for j in range(item_count) + ] + ) + + +async def test_async(client_count=1, item_count=1): + clients = [] + for i in range(client_count): + clients.append( + AsyncOpenSearch( + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, + connection_class=AsyncHttpConnection, + pool_maxsize=client_count, + ) + ) + + if await clients[0].indices.exists(index_name): + await clients[0].indices.delete(index_name) + + await clients[0].indices.create(index_name) + + await asyncio.gather( + *[index_records(clients[i], item_count) for i in range(client_count)] + ) + + await clients[0].indices.refresh(index=index_name) + print(await clients[0].count(index=index_name)) + + await clients[0].indices.delete(index_name) + + await asyncio.gather(*[client.close() for client in clients]) + + +def test(item_count=1, client_count=1): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + loop.run_until_complete(test_async(item_count, client_count)) + loop.close() + + +def test_1(): + test(1, 32 * item_count) + + +def test_2(): + test(2, 16 * item_count) + + +def test_4(): + test(4, 8 * item_count) + + +def test_8(): + test(8, 4 * item_count) + + +def test_16(): + test(16, 2 * item_count) + + +def test_32(): + test(32, item_count) + + +__benchmarks__ = [(test_1, test_8, "1 client vs. more clients (async)")] diff --git a/benchmarks/bench_info_sync.py b/benchmarks/bench_info_sync.py new file mode 100644 index 00000000..03e6f998 --- /dev/null +++ b/benchmarks/bench_info_sync.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +import logging +import sys +import time + +from thread_with_return_value import ThreadWithReturnValue + +from opensearchpy import OpenSearch + +host = "localhost" +port = 9200 +auth = ("admin", "admin") +request_count = 250 + + +root = logging.getLogger() +# root.setLevel(logging.DEBUG) +# logging.getLogger("urllib3.connectionpool").setLevel(logging.DEBUG) + +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(logging.DEBUG) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +handler.setFormatter(formatter) +root.addHandler(handler) + + +def get_info(client, request_count): + tt = 0 + for n in range(request_count): + start = time.time() * 1000 + rc = client.info() + total_time = time.time() * 1000 - start + tt += total_time + return tt + + +def test(thread_count=1, request_count=1, client_count=1): + clients = [] + for i in range(client_count): + clients.append( + OpenSearch( + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, + pool_maxsize=thread_count, + ) + ) + + threads = [] + for thread_id in range(thread_count): + thread = ThreadWithReturnValue( + target=get_info, args=[clients[thread_id % len(clients)], request_count] + ) + threads.append(thread) + thread.start() + + latency = 0 + for t in threads: + latency += t.join() + + print(f"latency={latency}") + + +def test_1(): + test(1, 32 * request_count, 1) + + +def test_2(): + test(2, 16 * request_count, 2) + + +def test_4(): + test(4, 8 * request_count, 3) + + +def test_8(): + test(8, 4 * request_count, 8) + + +def test_32(): + test(32, request_count, 32) + + +__benchmarks__ = [(test_1, test_32, "1 thread vs. 32 threads (sync)")] diff --git a/benchmarks/bench_sync.py b/benchmarks/bench_sync.py new file mode 100644 index 00000000..f20ca9f0 --- /dev/null +++ b/benchmarks/bench_sync.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +import json +import time +import uuid + +from thread_with_return_value import ThreadWithReturnValue + +from opensearchpy import OpenSearch, Urllib3HttpConnection + +host = "localhost" +port = 9200 +auth = ("admin", "admin") +index_name = "test-index-sync" +item_count = 1000 + +import logging +import sys + +root = logging.getLogger() +# root.setLevel(logging.DEBUG) +# logging.getLogger("urllib3.connectionpool").setLevel(logging.DEBUG) + +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(logging.DEBUG) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +handler.setFormatter(formatter) +root.addHandler(handler) + + +def index_records(client, item_count): + tt = 0 + for n in range(10): + data = [] + for i in range(item_count): + data.append( + json.dumps({"index": {"_index": index_name, "_id": str(uuid.uuid4())}}) + ) + data.append(json.dumps({"value": i})) + data = "\n".join(data) + + start = time.time() * 1000 + rc = client.bulk(data) + if rc["errors"]: + raise Exception(rc["errors"]) + + server_time = rc["took"] + total_time = time.time() * 1000 - start + + if total_time < server_time: + raise Exception(f"total={total_time} < server={server_time}") + + tt += total_time - server_time + return tt + + +def test(thread_count=1, item_count=1, client_count=1): + clients = [] + for i in range(client_count): + clients.append( + OpenSearch( + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, + pool_maxsize=thread_count, + connection_class=Urllib3HttpConnection, + ) + ) + + if clients[0].indices.exists(index_name): + clients[0].indices.delete(index_name) + + clients[0].indices.create( + index=index_name, + body={ + "mappings": { + "properties": { + "value": {"type": "float"}, + } + } + }, + ) + + threads = [] + for thread_id in range(thread_count): + thread = ThreadWithReturnValue( + target=index_records, args=[clients[thread_id % len(clients)], item_count] + ) + threads.append(thread) + thread.start() + + latency = 0 + for t in threads: + latency += t.join() + + clients[0].indices.refresh(index=index_name) + count = clients[0].count(index=index_name) + + clients[0].indices.delete(index_name) + + print(f"{count}, latency={latency}") + + +def test_1(): + test(1, 32 * item_count, 1) + + +def test_2(): + test(2, 16 * item_count, 2) + + +def test_4(): + test(4, 8 * item_count, 3) + + +def test_8(): + test(8, 4 * item_count, 8) + + +def test_32(): + test(32, item_count, 32) + + +__benchmarks__ = [(test_1, test_32, "1 thread vs. 32 threads (sync)")] diff --git a/benchmarks/bench_sync_async.py b/benchmarks/bench_sync_async.py new file mode 100644 index 00000000..5fa97f46 --- /dev/null +++ b/benchmarks/bench_sync_async.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +import bench_async +import bench_sync + +__benchmarks__ = [(bench_sync.test_32, bench_async.test_8, "sync vs. async (8)")] diff --git a/benchmarks/poetry.lock b/benchmarks/poetry.lock new file mode 100644 index 00000000..d4992d68 --- /dev/null +++ b/benchmarks/poetry.lock @@ -0,0 +1,847 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "aiohttp" +version = "3.8.6" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"}, + {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"}, + {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"}, + {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"}, + {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"}, + {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"}, + {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"}, + {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"}, + {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"}, + {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"}, + {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"}, + {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"}, + {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"}, + {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"}, + {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"}, + {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"}, + {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"}, + {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = ">=4.0.0a3,<5.0" +asynctest = {version = "0.13.0", markers = "python_version < \"3.8\""} +attrs = ">=17.3.0" +charset-normalizer = ">=2.0,<4.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "cchardet"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""} + +[[package]] +name = "asynctest" +version = "0.13.0" +description = "Enhance the standard unittest package with features for testing asyncio libraries" +optional = false +python-versions = ">=3.5" +files = [ + {file = "asynctest-0.13.0-py3-none-any.whl", hash = "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676"}, + {file = "asynctest-0.13.0.tar.gz", hash = "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"}, +] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"}, + {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"}, + {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"}, + {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"}, + {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"}, + {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"}, + {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"}, + {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, +] + +[[package]] +name = "frozenlist" +version = "1.3.3" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.7" +files = [ + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, + {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, + {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, + {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, + {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, + {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, + {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, + {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, + {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, + {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, + {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, +] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, + {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "markdown-it-py" +version = "2.2.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.7" +files = [ + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" +typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "opensearch-py" +version = "2.3.2" +description = "Python client for OpenSearch" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +files = [] +develop = true + +[package.dependencies] +aiohttp = {version = ">=3,<4", optional = true, markers = "extra == \"async\""} +certifi = ">=2022.12.07" +python-dateutil = "*" +requests = ">=2.4.0,<3.0.0" +six = "*" +urllib3 = ">=1.26.9" + +[package.extras] +async = ["aiohttp (>=3,<4)"] +develop = ["black", "botocore", "coverage (<7.0.0)", "jinja2", "mock", "myst_parser", "pytest (>=3.0.0)", "pytest-cov", "pytest-mock (<4.0.0)", "pytz", "pyyaml", "requests (>=2.0.0,<3.0.0)", "sphinx", "sphinx_copybutton", "sphinx_rtd_theme"] +docs = ["myst_parser", "sphinx", "sphinx_copybutton", "sphinx_rtd_theme"] +kerberos = ["requests_kerberos"] + +[package.source] +type = "directory" +url = ".." + +[[package]] +name = "pygments" +version = "2.16.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, + {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pyinstrument" +version = "4.6.0" +description = "Call stack profiler for Python. Shows you why your code is slow!" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyinstrument-4.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:679b5397e3e6c0d6f56df50ba8c683543df4f1f7c1df2e2eb728e275bde2c85b"}, + {file = "pyinstrument-4.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:18479ffa0c922695ba2befab29521b62bfe75debef48d818cea46262cee48a1e"}, + {file = "pyinstrument-4.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daba103955d0d0b37b8bc20a4e8cc6477e839ce5984478fcf3f7cee8318e9636"}, + {file = "pyinstrument-4.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d93451e9c7650629b0bc12caa7390f81d1a15835c07f7dc170e953d4684ed1e7"}, + {file = "pyinstrument-4.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01009a7b58a6f11bf5560c23848ea2881acac974b0841fe5d365ef154baabd6f"}, + {file = "pyinstrument-4.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:288ea44da6333dacc77b4ba2149dba3dc1e9fbbebd3d5dc51a66c20839d80ef3"}, + {file = "pyinstrument-4.6.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecc106213146dd90659a1483047b3a1c2e174fb190c0e109234e524a4651e377"}, + {file = "pyinstrument-4.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5cd8ab30c8dcd1511e9b3b98f601f17f2c5c9df1d28f8298d215c63d68919bdc"}, + {file = "pyinstrument-4.6.0-cp310-cp310-win32.whl", hash = "sha256:40e3656e6ace5a140880bd980a25f6a356c094c36e28ed1bf935d7349a78b1b6"}, + {file = "pyinstrument-4.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9623fc3fde47ae90ad5014737e37034b4abc3fbfb455b7b56cc095f9037d5af"}, + {file = "pyinstrument-4.6.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:beaaa3b647b3a4cbd34b71eacaa31e3eb90e1bf53e15ada3ac7e9df09d737239"}, + {file = "pyinstrument-4.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0c69ab570609ac93b5f4ab2e5ccbf8add4f69a962b06307eea66ba65b5ad9d38"}, + {file = "pyinstrument-4.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5992748a74ec7ff445e4b56b5e316673c34b6cdbd3755111f7c023d8a141f001"}, + {file = "pyinstrument-4.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb1ba76c4e912cae159ab9729c7b31bb6d7fe8ed1f0fafce74484a4bb159c240"}, + {file = "pyinstrument-4.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:674868ebc3663b01d7d059a6f5cdeff6f18b49e217617720a5d645a6b55ead03"}, + {file = "pyinstrument-4.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:514a0ced357ff400988f599b0294d05e3b68468f9ab876f204bf12765f7fdb1b"}, + {file = "pyinstrument-4.6.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ccd1f5b4ad35c734dcf2d08d80b5b37205b4e84aa71fe76f95e43bd30c5eef9"}, + {file = "pyinstrument-4.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:611c6cd33f42f19e46d99eeef3b84a47d33fe34cdb0ce6e3635d2ee5038706a3"}, + {file = "pyinstrument-4.6.0-cp311-cp311-win32.whl", hash = "sha256:d20b5cf79bca1b3d425a7362457621741393b1d5ce2d920583541b947bc8a368"}, + {file = "pyinstrument-4.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:ecd8cf03b04dc1b7f151896228993c6aa0fa897cdd517ea127465bc1c826c5b5"}, + {file = "pyinstrument-4.6.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3d4bed520c0f689a75bca4951f6b7fbad96851e8461086c98e03eb726f8a412a"}, + {file = "pyinstrument-4.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b74745f1d22133da8d4a38dd0c78c02c00154a5b7683bdd5df56a7c7705a979b"}, + {file = "pyinstrument-4.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6ab698400e8401597e39c4816efa247f2b98c9b4e59e3ec25d534ae6887bd93"}, + {file = "pyinstrument-4.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de1a36a083b324dafe5e2880e5e04267a1983beb027f12c3dc361ddbe3acf9af"}, + {file = "pyinstrument-4.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8adc4f87d4289c1f04f19451b5133b8e307bd9b08c364c48e007ba663fefbf1b"}, + {file = "pyinstrument-4.6.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:08fbc75d3615be6259b7af0c173c7bc48acb6e7bd758678d54eb411ba2903052"}, + {file = "pyinstrument-4.6.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d86fea6ce117bcff642e24208eb573c00d78b4c2934eb9bd5f915751980cc9bd"}, + {file = "pyinstrument-4.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23a3b21373e0c8bf0d00dda79989fcab0bb1d30094f7b210d40d2226fe20e141"}, + {file = "pyinstrument-4.6.0-cp312-cp312-win32.whl", hash = "sha256:a498c82d93621c5cf736e4660142ac0c3bbcb7b059bcbd4278a6364037128656"}, + {file = "pyinstrument-4.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:9116154446b9999f6524e9db29310aee6476a5a471c276928f2b46b6655a2dcc"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:704c6d38abef8fca2e1085756c9574ea180f7ac866aab6943b483152c2828c2a"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbebdc11d4fc6f3123c046d84db88c7f605d53247e3f357314d0c5775d1beaf4"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c7a7bae4cce5f8d084153857cedbce29ca8274c9924884d0461a5db48619c5d"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03289b10715e261a5c33b267d0a430d1b408f929922fde0a9fd311835c60351b"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7f83544ff9abfacdf64b39498ca3dcd454956e44aedb5f67626b7212291c9160"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:40640f02fe7865540e8a1e51bf7f9d2403e3364c3b7edfdb9dae5eb5596811da"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f3719464888d7303e1081996bc56ab75ef5cdf7ef69ccbb7b29f48eb37d8f8b9"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-win32.whl", hash = "sha256:46e16de6bd3b74ef01b6457d862fee751515315edb5e9283205e45299a29ac49"}, + {file = "pyinstrument-4.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9ded87ae11cb0a95a767c817908833ec0821fe0e81650968b201a031edf4bc15"}, + {file = "pyinstrument-4.6.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8bf16e459a868d9dbaacff4f0a0acd6ad78ce36f2aceabf21e9fd0c3b6aca0d4"}, + {file = "pyinstrument-4.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cb83e445795431c3d867b298c0583ee27717bbc50e5120a4c98575c979ab3ab8"}, + {file = "pyinstrument-4.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29072b1be183e173d7b0f12caf29f8717d273afbf34df950f5fa0d98127cd3fb"}, + {file = "pyinstrument-4.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09502af2a383c59e5a0d3bebfab7e5845f79122348358e9e52b2b0187db84a44"}, + {file = "pyinstrument-4.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a23c982eb9c4d2f8fe553dacb9bdc0991170a0998b94c84f75c2a052e8af4c74"}, + {file = "pyinstrument-4.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f7a38ef482f2151393e729c5582191e4ab05f0ed1fa56b16c2377ff3129107af"}, + {file = "pyinstrument-4.6.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e983e16c2fdfb752387133380859c3414e119e41c14f39f5f869f29dcf6e995c"}, + {file = "pyinstrument-4.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d00c87e5cea48a562d67f0436999463b7989cff2e4c196b0e8ba06d515f191a9"}, + {file = "pyinstrument-4.6.0-cp38-cp38-win32.whl", hash = "sha256:a24c95cabf2ca5d79b62dbc8ff17749768b8aafd777841352f59f4ffd6688782"}, + {file = "pyinstrument-4.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:f3d88b66dbbcdc6e4c57bd8574ad9d096cd23285eee0f4a5cf74f0e0df6aa190"}, + {file = "pyinstrument-4.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2bcfec45cdbb9edf6d5853debac4a792de589e621be07a71dc76acb36e144a3a"}, + {file = "pyinstrument-4.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e790515a22844bbccaa388c7715b037c45a8d0155c4a6f2990659998a8920501"}, + {file = "pyinstrument-4.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93a30e0d93633a28d4adcf7d7e2d158d6331809b95c2c4a155da17ea1e43eaa3"}, + {file = "pyinstrument-4.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa554eb8ef1c54849dbf480965b073f39b39b517e466ce241808a00398f9742a"}, + {file = "pyinstrument-4.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e289898c644cbbb61d931bbcb6505e2a279ad1122612c9098bfb0958ebf5764"}, + {file = "pyinstrument-4.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:20ce0f1612a019888a6b94fa7f1e7862842f0b5219282e3354d5b35aceb363f6"}, + {file = "pyinstrument-4.6.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4935f3cdb9062fceac65c50de76f07e05cf630bd3a9c663fedc9e88b5efe7d7c"}, + {file = "pyinstrument-4.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:dc9c4577ef4b06ae1592c920d0a4f0f0db587a16f530c629ad93e125bc79ebb7"}, + {file = "pyinstrument-4.6.0-cp39-cp39-win32.whl", hash = "sha256:3ec6b04d8cfb34aec48de7fa77aeb919e8e7e19909740ab7a5553339f6f4c53a"}, + {file = "pyinstrument-4.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a6d2e5c15f989629fac41536ec2ca1fe81359fadf4dadf2ff24fe96b389f6df"}, + {file = "pyinstrument-4.6.0.tar.gz", hash = "sha256:3e509e879c853dbc5fdc1757f0cfdbf8bee899c80f53d504a7df28898f0fa8ed"}, +] + +[package.extras] +bin = ["click", "nox"] +docs = ["furo (==2021.6.18b36)", "myst-parser (==0.15.1)", "sphinx (==4.2.0)", "sphinxcontrib-programoutput (==0.17)"] +examples = ["django", "numpy"] +test = ["flaky", "greenlet (>=3.0.0a1)", "ipython", "pytest", "pytest-asyncio (==0.12.0)", "sphinx-autobuild (==2021.3.14)", "trio"] +types = ["typing-extensions"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.6.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.6.0-py3-none-any.whl", hash = "sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245"}, + {file = "rich-13.6.0.tar.gz", hash = "sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "richbench" +version = "1.0.3" +description = "Richbench, a little benchmarking tool" +optional = false +python-versions = ">=3.6" +files = [ + {file = "richbench-1.0.3-py3-none-any.whl", hash = "sha256:f52651cc0e0069a1355c5ed8cda214cb3f8961e7aaa431e440071d30f62e3e55"}, + {file = "richbench-1.0.3.tar.gz", hash = "sha256:744afa3e78cbd919721042c11f7b7f9d2f546cebb3333d40290c4a0d88791701"}, +] + +[package.dependencies] +pyinstrument = "*" +rich = "*" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[[package]] +name = "urllib3" +version = "2.0.6" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.7" +files = [ + {file = "urllib3-2.0.6-py3-none-any.whl", hash = "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2"}, + {file = "urllib3-2.0.6.tar.gz", hash = "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "yarl" +version = "1.9.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, + {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, + {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, + {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, + {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, + {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, + {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, + {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, + {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, + {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, + {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, + {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, + {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.7" +content-hash = "84953079e0bc825b495f10721d514529becb4fc8eef2b9772562f63b0bd75ef3" diff --git a/benchmarks/poetry.toml b/benchmarks/poetry.toml new file mode 100644 index 00000000..eadfd54b --- /dev/null +++ b/benchmarks/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +create = true \ No newline at end of file diff --git a/benchmarks/pyproject.toml b/benchmarks/pyproject.toml new file mode 100644 index 00000000..c0c82142 --- /dev/null +++ b/benchmarks/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "package" +version = "0.1.0" +description = "OpenSearch Python client benchmarks." +authors = ["Daniel Doubrovkine "] +license = "Apache 2.0" +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.7" +opensearch-py = { path = "..", develop=true, extras=["async"] } +richbench = "*" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/benchmarks/thread_with_return_value.py b/benchmarks/thread_with_return_value.py new file mode 100644 index 00000000..fb495656 --- /dev/null +++ b/benchmarks/thread_with_return_value.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +from threading import Thread + + +class ThreadWithReturnValue(Thread): + def __init__( + self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None + ): + Thread.__init__(self, group, target, name, args, kwargs) + self._return = None + + def run(self): + if self._target is not None: + self._return = self._target(*self._args, **self._kwargs) + + def join(self, *args): + Thread.join(self, *args) + return self._return From e8df29ddb923d7e4b6ac7ce5223e5085308bc409 Mon Sep 17 00:00:00 2001 From: Djcarrillo6 Date: Fri, 13 Oct 2023 15:26:30 -0700 Subject: [PATCH 21/21] Added advanced index actions API guide & sample. Signed-off-by: Djcarrillo6 Updated CHANGELOG Signed-off-by: Djcarrillo6 Updated CHANGELOG & link to sample. Signed-off-by: Djcarrillo6 --- CHANGELOG.md | 3 +- USER_GUIDE.md | 1 + guides/advanced_index_actions.md | 113 ++++++++++++++++++ .../advanced_index_actions_sample.py | 82 +++++++++++++ 4 files changed, 198 insertions(+), 1 deletion(-) create mode 100644 guides/advanced_index_actions.md create mode 100644 samples/advanced_index_actions/advanced_index_actions_sample.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d8019a78..718c628c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added new guide for using index templates with the client ([#531](https://github.com/opensearch-project/opensearch-py/pull/531)) - Added `pool_maxsize` for `Urllib3HttpConnection` ([#535](https://github.com/opensearch-project/opensearch-py/pull/535)) - Added benchmarks ([#537](https://github.com/opensearch-project/opensearch-py/pull/537)) +- Added advanced index actions guide ([#539](https://github.com/opensearch-project/opensearch-py/pull/539)) ### Changed - Generate `tasks` client from API specs ([#508](https://github.com/opensearch-project/opensearch-py/pull/508)) - Generate `ingest` client from API specs ([#513](https://github.com/opensearch-project/opensearch-py/pull/513)) @@ -54,7 +55,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added support for the security plugin ([#399](https://github.com/opensearch-project/opensearch-py/pull/399)) - Supports OpenSearch 2.1.0 - 2.6.0 ([#381](https://github.com/opensearch-project/opensearch-py/pull/381)) - Added `allow_redirects` to `RequestsHttpConnection#perform_request` ([#401](https://github.com/opensearch-project/opensearch-py/pull/401)) -- Enhanced YAML test runner to use OpenSearch `rest-api-spec` YAML tests ([#414](https://github.com/opensearch-project/opensearch-py/pull/414) +- Enhanced YAML test runner to use OpenSearch `rest-api-spec` YAML tests ([#414](https://github.com/opensearch-project/opensearch-py/pull/414)) - Added `Search#collapse` ([#409](https://github.com/opensearch-project/opensearch-py/issues/409)) - Added support for the ISM API ([#398](https://github.com/opensearch-project/opensearch-py/pull/398)) - Added `trust_env` to `AIOHttpConnection` ([#398](https://github.com/opensearch-project/opensearch-py/pull/438)) diff --git a/USER_GUIDE.md b/USER_GUIDE.md index 416bbc4d..18058368 100644 --- a/USER_GUIDE.md +++ b/USER_GUIDE.md @@ -155,6 +155,7 @@ print(response) - [Using a Proxy](guides/proxy.md) - [Index Templates](guides/index_template.md) - [Connection Classes](guides/connection_classes.md) +- [Advanced Index Actions](guides/advanced_index_actions.md) ## Plugins diff --git a/guides/advanced_index_actions.md b/guides/advanced_index_actions.md new file mode 100644 index 00000000..a3a0620e --- /dev/null +++ b/guides/advanced_index_actions.md @@ -0,0 +1,113 @@ +# Advanced Index Actions Guide +- [Advanced Index Actions](#advanced-index-actions) + - [Setup](#setup) + - [Api Actions](#api-actions) + - [Clear Index Cache](#clear-index-cache) + - [Flush Index](#flush-index) + - [Refresh Index](#refresh-index) + - [Open or Close Index](#open-or-close-index) + - [Force Merge Index](#force-merge-index) + - [Clone Index](#clone-index) + - [Split Index](#split-index) + - [Cleanup](#cleanup) + + +# Advanced Index Actions +In this guide, we will look at some advanced index actions that are not covered in the [Index Lifecycle](index_lifecycle.md) guide. + +## Setup +Let's create a client instance, and an index named `movies`: + +```python +from opensearchpy import OpenSearch +client = OpenSearch( + hosts=['https://@localhost:9200'], + use_ssl=True, + verify_certs=False, + http_auth=('admin', 'admin') +) +client.indices.create(index='movies') +``` + +## API Actions +### Clear index cache +You can clear the cache of an index or indices by using the `indices.clear_cache` API action. The following example clears the cache of the `movies` index: + +```python +client.indices.clear_cache(index='movies') +``` + +By default, the `indices.clear_cache` API action clears all types of cache. To clear specific types of cache pass the `query`, `fielddata`, or `request` parameter to the API action: + +```python +client.indices.clear_cache(index='movies', query=True) +client.indices.clear_cache(index='movies', fielddata=True, request=True) +``` + +### Flush index +Sometimes you might want to flush an index or indices to make sure that all data in the transaction log is persisted to the index. To flush an index or indices use the `indices.flush` API action. The following example flushes the `movies` index: + +```python +client.indices.flush(index='movies') +``` + +### Refresh index +You can refresh an index or indices to make sure that all changes are available for search. To refresh an index or indices use the `indices.refresh` API action: + +```python +client.indices.refresh(index='movies') +``` + +### Open or close index +You can close an index to prevent read and write operations on the index. A closed index does not have to maintain certain data structures that an opened index require, reducing the memory and disk space required by the index. The following example closes and reopens the `movies` index: + +```python +client.indices.close(index='movies') +client.indices.open(index='movies') +``` + +### Force merge index +You can force merge an index or indices to reduce the number of segments in the index. This can be useful if you have a large number of small segments in the index. Merging segments reduces the memory footprint of the index. Do note that this action is resource intensive and it is only recommended for read-only indices. The following example force merges the `movies` index: + +```python +client.indices.forcemerge(index='movies') +``` + +### Clone index +You can clone an index to create a new index with the same mappings, data, and MOST of the settings. The source index must be in read-only state for cloning. The following example blocks write operations from `movies` index, clones the said index to create a new index named `movies_clone`, then re-enables write: + +```python +client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': True}}}) +client.indices.clone(index='movies', target='movies_clone') +client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': False}}}) +``` + +### Split index +You can split an index into another index with more primary shards. The source index must be in read-only state for splitting. The following example create the read-only `books` index with 30 routing shards and 5 shards (which is divisible by 30), splits index into `bigger_books` with 10 shards (which is also divisible by 30), then re-enables write: + +```python +client.indices.create( + index='books', + body={ 'settings': { + 'index': { 'number_of_shards': 5, + 'number_of_routing_shards': 30, + 'blocks': { 'write': True } } } }) + +client.indices.split( + index='books', + target='bigger_books', + body={ 'settings': { 'index': { 'number_of_shards': 10 } } }) + +client.indices.put_settings(index='books', body={ 'index': { 'blocks': { 'write': False } } }) +``` + +## Cleanup + +Let's delete all the indices we created in this guide: + +```python +client.indices.delete(index=['movies', 'books', 'movies_clone', 'bigger_books']) +``` + +# Sample Code +See [advanced_index_actions_sample.py](/samples/advanced_index_actions/advanced_index_actions_sample.py) for a working sample of the concepts in this guide. \ No newline at end of file diff --git a/samples/advanced_index_actions/advanced_index_actions_sample.py b/samples/advanced_index_actions/advanced_index_actions_sample.py new file mode 100644 index 00000000..391d36b9 --- /dev/null +++ b/samples/advanced_index_actions/advanced_index_actions_sample.py @@ -0,0 +1,82 @@ +from opensearchpy import OpenSearch +import time + + +# For cleaner output, comment in the two lines below to disable warnings and informational messages +# import urllib3 +# urllib3.disable_warnings() + + +def test_opensearch_examples(): + # Set up + client = OpenSearch( + hosts=['https://localhost:9200'], + use_ssl=True, + verify_certs=False, + http_auth=('admin', 'admin') + ) + client.indices.create(index='movies') + print("'movies' index created!") + + # Test Clear Index Cache + client.indices.clear_cache(index='movies') + print("Cache for 'movies' index cleared!") + client.indices.clear_cache(index='movies', query=True) + print("Query cache for 'movies' index cleared!") + client.indices.clear_cache(index='movies', fielddata=True, request=True) + print("Field data and request cache for 'movies' index cleared!") + + # Test Flush Index + client.indices.flush(index='movies') + print("'movies' index flushed!") + + # Test Refresh Index + client.indices.refresh(index='movies') + print("'movies' index refreshed!") + + # Test Close or Open Index + client.indices.close(index='movies') + print("'movies' index closed!") + time.sleep(2) # add sleep to ensure the index has time to close + client.indices.open(index='movies') + print("'movies' index opened!") + + # Test Force Merge Index + client.indices.forcemerge(index='movies') + print("'movies' index force merged!") + + # Test Clone + client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': True}}}) + print("Write operations blocked for 'movies' index!") + time.sleep(2) + client.indices.clone(index='movies', target='movies_clone') + print("'movies' index cloned to 'movies_clone'!") + client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': False}}}) + print("Write operations enabled for 'movies' index!") + + # Test Split + client.indices.create( + index='books', + body={'settings': { + 'index': {'number_of_shards': 5, 'number_of_routing_shards': 30, 'blocks': {'write': True}}}} + ) + print("'books' index created!") + time.sleep(2) # add sleep to ensure the index has time to become read-only + client.indices.split( + index='books', + target='bigger_books', + body={'settings': {'index': {'number_of_shards': 10 }}} + ) + print("'books' index split into 'bigger_books'!") + client.indices.put_settings(index='books', body={'index': {'blocks': {'write': False}}}) + print("Write operations enabled for 'books' index!") + + # Cleanup + client.indices.delete(index=['movies', 'books', 'movies_clone', 'bigger_books']) + print("All indices deleted!") + + + + +if __name__ == "__main__": + test_opensearch_examples() \ No newline at end of file