From b07d17ae244c8162acf81f766f42b1f9fdb73084 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 10:54:52 -0500 Subject: [PATCH 1/4] Bump cryptography from 41.0.4 to 41.0.6 (#252) Bumps [cryptography](https://github.com/pyca/cryptography) from 41.0.4 to 41.0.6. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/41.0.4...41.0.6) --- updated-dependencies: - dependency-name: cryptography dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/poetry.lock b/poetry.lock index 69b31e9f..0e9a6421 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "aiohttp" @@ -436,34 +436,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.4" +version = "41.0.6" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, - {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, - {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, - {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, + {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c"}, + {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d"}, + {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c"}, + {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596"}, + {file = "cryptography-41.0.6-cp37-abi3-win32.whl", hash = "sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660"}, + {file = "cryptography-41.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4"}, + {file = "cryptography-41.0.6.tar.gz", hash = "sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3"}, ] [package.dependencies] From bd1f74dd76579b9232875c94ccad12b91144d1ed Mon Sep 17 00:00:00 2001 From: digitalocean-engineering Date: Wed, 29 Nov 2023 11:03:13 -0500 Subject: [PATCH 2/4] [bot] Updated client based on openapi-423c2d3/clientgen (#248) Co-authored-by: API Engineering Co-authored-by: Andrew Starr-Bochicchio --- DO_OPENAPI_COMMIT_SHA.txt | 2 +- src/pydo/aio/operations/_operations.py | 4 ++++ src/pydo/operations/_operations.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index fcda552c..622654a3 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -7a717b3 +423c2d3 \ No newline at end of file diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index bce0eb26..4b43aaea 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -72987,6 +72987,10 @@ async def update_online_migration( body = { "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. + ], "source": { "dbname": "str", # Optional. The name of the default database. "host": "str", # Optional. The FQDN pointing to the database diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index 67ae6ee2..fa646cd4 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -79862,6 +79862,10 @@ def update_online_migration( body = { "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. + ], "source": { "dbname": "str", # Optional. The name of the default database. "host": "str", # Optional. The FQDN pointing to the database From 0043b0f96658eb66604f9894a96c18438d417456 Mon Sep 17 00:00:00 2001 From: digitalocean-engineering Date: Wed, 29 Nov 2023 11:06:11 -0500 Subject: [PATCH 3/4] [bot] Updated client based on openapi-13ed326/clientgen (#249) Co-authored-by: API Engineering Co-authored-by: danaelhe <42972711+danaelhe@users.noreply.github.com> Co-authored-by: Andrew Starr-Bochicchio --- DO_OPENAPI_COMMIT_SHA.txt | 2 +- src/pydo/aio/operations/_operations.py | 54 +++++--------------------- src/pydo/operations/_operations.py | 54 +++++--------------------- 3 files changed, 19 insertions(+), 91 deletions(-) diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index 622654a3..55e54a30 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -423c2d3 \ No newline at end of file +13ed326 diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index 4b43aaea..122e469f 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -79427,14 +79427,10 @@ async def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partition_count": 0, # Optional. The number of partitions available for the @@ -79534,14 +79530,10 @@ async def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -79705,14 +79697,10 @@ async def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -79874,14 +79862,10 @@ async def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -80124,14 +80108,10 @@ async def get_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -80361,14 +80341,10 @@ async def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. }, "partition_count": 0, # Optional. The number of partitions available for the topic. On update, this value can only be increased. @@ -80467,14 +80443,10 @@ async def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -80641,14 +80613,10 @@ async def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -80813,14 +80781,10 @@ async def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index fa646cd4..9b1a4093 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -86294,14 +86294,10 @@ def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partition_count": 0, # Optional. The number of partitions available for the @@ -86401,14 +86397,10 @@ def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -86572,14 +86564,10 @@ def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -86741,14 +86729,10 @@ def create_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -86991,14 +86975,10 @@ def get_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -87228,14 +87208,10 @@ def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. }, "partition_count": 0, # Optional. The number of partitions available for the topic. On update, this value can only be increased. @@ -87334,14 +87310,10 @@ def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -87508,14 +87480,10 @@ def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ @@ -87680,14 +87648,10 @@ def update_kafka_topic( "segment_jitter_ms": 0, # Optional. Default value is 0. The segment_jitter_ms specifies the maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is + "segment_ms": 604800000 # Optional. Default value is 604800000. The segment_ms specifies the period of time after which the log will be forced to roll if the segment file isn't full. This ensures that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. }, "name": "str", # Optional. The name of the Kafka topic. "partitions": [ From bbe5d06b44e118579e94f58cb63cd3f908d57b0b Mon Sep 17 00:00:00 2001 From: digitalocean-engineering Date: Wed, 29 Nov 2023 11:09:09 -0500 Subject: [PATCH 4/4] [bot] Updated client based on openapi-32be7e9/clientgen (#251) Co-authored-by: API Engineering Co-authored-by: Andrew Starr-Bochicchio --- DO_OPENAPI_COMMIT_SHA.txt | 2 +- src/pydo/aio/operations/_operations.py | 114 +++++++++++++++++++++ src/pydo/operations/_operations.py | 136 +++++++++++++++++++++++++ 3 files changed, 251 insertions(+), 1 deletion(-) diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index 55e54a30..69a619b8 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -13ed326 +32be7e9 \ No newline at end of file diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index 122e469f..f357dee8 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -102,6 +102,7 @@ build_databases_list_backups_request, build_databases_list_clusters_request, build_databases_list_connection_pools_request, + build_databases_list_events_logs_request, build_databases_list_firewall_rules_request, build_databases_list_kafka_topics_request, build_databases_list_options_request, @@ -75110,6 +75111,119 @@ async def create_replica( return cast(JSON, deserialized) + @distributed_trace_async + async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """List all Events Logs. + + To list all of the cluster events, send a GET request to + ``/v2/databases/$DATABASE_ID/events``. + + The result will be a JSON object with a ``events`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "events": [ + { + "cluster_name": "str", # Optional. The name of cluster. + "create_time": "str", # Optional. The time of the generation + of a event. + "event_type": "str", # Optional. Type of the event. + "id": "str" # Optional. ID of the particular event. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_list_events_logs_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + @distributed_trace_async async def get_replica( self, database_cluster_uuid: str, replica_name: str, **kwargs: Any diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index 9b1a4093..0a9b865c 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -1795,6 +1795,29 @@ def build_databases_create_replica_request( return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) +def build_databases_list_events_logs_request( + database_cluster_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/events" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + def build_databases_get_replica_request( database_cluster_uuid: str, replica_name: str, **kwargs: Any ) -> HttpRequest: @@ -81983,6 +82006,119 @@ def create_replica( return cast(JSON, deserialized) + @distributed_trace + def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """List all Events Logs. + + To list all of the cluster events, send a GET request to + ``/v2/databases/$DATABASE_ID/events``. + + The result will be a JSON object with a ``events`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "events": [ + { + "cluster_name": "str", # Optional. The name of cluster. + "create_time": "str", # Optional. The time of the generation + of a event. + "event_type": "str", # Optional. Type of the event. + "id": "str" # Optional. ID of the particular event. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_list_events_logs_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + @distributed_trace def get_replica( self, database_cluster_uuid: str, replica_name: str, **kwargs: Any