From 9baa20e4118c34c12dc7005ba1bb69c2388ceb10 Mon Sep 17 00:00:00 2001 From: Mohit Date: Wed, 4 Oct 2023 00:44:34 +0530 Subject: [PATCH 1/2] added test --- tests/mocked/test_droplets.py | 367 ++++++++++++++++++++++++++++++++++ 1 file changed, 367 insertions(+) diff --git a/tests/mocked/test_droplets.py b/tests/mocked/test_droplets.py index 56b37559..a69e71de 100644 --- a/tests/mocked/test_droplets.py +++ b/tests/mocked/test_droplets.py @@ -1,3 +1,6 @@ +# pylint: disable=line-too-long +# pylint: disable=too-many-lines + """Mock tests for the droplets API resource.""" import responses @@ -450,3 +453,367 @@ def test_delete(mock_client: Client, mock_client_url): del_resp = mock_client.droplets.destroy(1) assert del_resp is None + + +@responses.activate +def test_destroy_by_tag(mock_client: Client, mock_client_url): + """Mocks the droplets destroy by tag operation.""" + + tag_name = "awesome" + + responses.add( + responses.DELETE, + f"{mock_client_url}/v2/droplets?tag_name={tag_name}", + status=204, + ) + + del_resp = mock_client.droplets.destroy_by_tag(tag_name=tag_name) + + assert del_resp is None + + +@responses.activate +def test_list_backups(mock_client: Client, mock_client_url): + """Mocks the droplets list backups operation.""" + + expected = { + "backups": [ + { + "id": 67539192, + "name": "web-01- 2020-07-29", + "distribution": "Ubuntu", + "slug": None, + "public": False, + "regions": ["nyc3"], + "created_at": "2020-07-29T01:44:35Z", + "min_disk_size": 50, + "size_gigabytes": 2.34, + "type": "backup", + } + ], + "links": {}, + "meta": {"total": 1}, + } + droplet_id = 1 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/backups", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_backups(droplet_id) + + assert expected == resp + + +@responses.activate +def test_list_snapshots(mock_client: Client, mock_client_url): + """Mocks the droplets list snapshots operation.""" + + expected = { + "snapshots": [ + { + "id": 6372721, + "name": "web-01-1595954862243", + "created_at": "2020-07-28T16:47:44Z", + "regions": ["nyc3", "sfo3"], + "min_disk_size": 30, + "size_gigabytes": 2.34, + "type": "snapshot", + } + ], + "links": {}, + "meta": {"total": 1}, + } + droplet_id = 3929391 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/snapshots", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_snapshots(droplet_id) + + assert expected == resp + + +@responses.activate +def test_list_kernels(mock_client: Client, mock_client_url): + """Mocks the droplets list kernels operation.""" + + expected = { + "kernels": [ + { + "id": 7515, + "name": "DigitalOcean GrubLoader v0.2 (20160714)", + "version": "2016.07.13-DigitalOcean_loader_Ubuntu", + } + ], + "links": { + "pages": { + "next": "https://api.digitalocean.com/v2/droplets/3164444/kernels?page=2&per_page=1", + "last": "https://api.digitalocean.com/v2/droplets/3164444/kernels?page=171&per_page=1", + } + }, + "meta": {"total": 171}, + } + droplet_id = 3929391 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/kernels", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_kernels(droplet_id) + + assert expected == resp + + +@responses.activate +def test_list_firewalls(mock_client: Client, mock_client_url): + """Mocks the droplets list firewalls operation.""" + + expected = { + "firewalls": [ + { + "id": "bb4b2611-3d72-467b-8602-280330ecd65c", + "status": "succeeded", + "created_at": "2020-05-23T21:24:00Z", + "pending_changes": [ + {"droplet_id": 8043964, "removing": True, "status": "waiting"} + ], + "name": "firewall", + "droplet_ids": [89989, 33322], + "tags": ["base-image", "prod"], + "inbound_rules": [ + { + "protocol": "udp", + "ports": "8000-9000", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8282823, 3930392], + "load_balancer_uids": [ + "4de7ac8b-495b-4884-9a69-1050c6793cd6" + ], + "tags": ["base-image", "dev"], + }, + } + ], + "outbound_rules": [ + { + "protocol": "tcp", + "ports": "7000-9000", + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [3827493, 213213], + "load_balancer_uids": [ + "4de7ac8b-495b-4884-9a69-1050c6793cd6" + ], + "tags": ["base-image", "prod"], + }, + } + ], + } + ], + "links": {"pages": {}}, + "meta": {"total": 1}, + } + droplet_id = 3164444 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/firewalls", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_firewalls(droplet_id) + + assert expected == resp + + +@responses.activate +def test_list_neighbors(mock_client: Client, mock_client_url): + """Mocks the droplets delete operation.""" + + expected = { + "droplets": [ + { + "id": 3164444, + "name": "example.com", + "memory": 1024, + "vcpus": 1, + "disk": 25, + "locked": False, + "status": "active", + "kernel": { + "id": 7515, + "name": "DigitalOcean GrubLoader v0.2 (20160714)", + "version": "2016.07.13-DigitalOcean_loader_Ubuntu", + }, + "created_at": "2020-07-21T18:37:44Z", + "features": ["backups", "private_networking", "ipv6"], + "backup_ids": [53893572], + "next_backup_window": { + "start": "2019-12-04T00:00:00Z", + "end": "2019-12-04T23:00:00Z", + }, + "snapshot_ids": [67512819], + "image": { + "id": 7555620, + "name": "Nifty New Snapshot", + "type": "snapshot", + "distribution": "Ubuntu", + "slug": "nifty1", + "public": True, + "regions": ["nyc1", "nyc2"], + "created_at": "2020-05-04T22:23:02Z", + "min_disk_size": 20, + "size_gigabytes": 2.34, + "description": " ", + "tags": ["base-image", "prod"], + "status": "NEW", + "error_message": " ", + }, + "volume_ids": ["506f78a4-e098-11e5-ad9f-000f53306ae1"], + "size": { + "slug": "s-1vcpu-1gb", + "memory": 1024, + "vcpus": 1, + "disk": 25, + "transfer": 1, + "price_monthly": 5, + "price_hourly": 0.00743999984115362, + "regions": [ + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + ], + "available": True, + "description": "Basic", + }, + "size_slug": "s-1vcpu-1gb", + "networks": { + "v4": [ + { + "ip_address": "104.236.32.182", + "netmask": "255.255.192.0", + "gateway": "104.236.0.1", + "type": "public", + } + ], + "v6": [ + { + "ip_address": "2604:a880:0:1010::18a:a001", + "netmask": 64, + "gateway": "2604:a880:0:1010::1", + "type": "public", + } + ], + }, + "region": { + "name": "New York 3", + "slug": "nyc3", + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata", + "install_agent", + "storage", + "image_transfer", + ], + "available": True, + "sizes": [ + "s-1vcpu-1gb", + "s-1vcpu-2gb", + "s-1vcpu-3gb", + "s-2vcpu-2gb", + "s-3vcpu-1gb", + "s-2vcpu-4gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "s-8vcpu-32gb", + "s-12vcpu-48gb", + "s-16vcpu-64gb", + "s-20vcpu-96gb", + "s-24vcpu-128gb", + "s-32vcpu-192g", + ], + }, + "tags": ["web", "env:prod"], + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + } + ] + } + droplet_id = 3164444 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/neighbors", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_neighbors(droplet_id) + + assert expected == resp + + +@responses.activate +def test_list_associated_resources(mock_client: Client, mock_client_url): + """Mocks the droplets delete operation.""" + + expected = { + "reserved_ips": [{"id": "6186916", "name": "45.55.96.47", "cost": "4.00"}], + "floating_ips": [{"id": "6186916", "name": "45.55.96.47", "cost": "4.00"}], + "snapshots": [ + { + "id": "61486916", + "name": "ubuntu-s-1vcpu-1gb-nyc1-01-1585758823330", + "cost": "0.05", + } + ], + "volumes": [ + { + "id": "ba49449a-7435-11ea-b89e-0a58ac14480f", + "name": "volume-nyc1-01", + "cost": "10.00", + } + ], + "volume_snapshots": [ + { + "id": "edb0478d-7436-11ea-86e6-0a58ac144b91", + "name": "volume-nyc1-01-1585758983629", + "cost": "0.04", + } + ], + } + droplet_id = 3164444 + + responses.add( + responses.GET, + f"{mock_client_url}/v2/droplets/{droplet_id}/destroy_with_associated_resources", + json=expected, + status=200, + ) + + resp = mock_client.droplets.list_associated_resources(droplet_id) + + assert expected == resp From 269dfd5e209295e6b37e5c18023f9701206c918b Mon Sep 17 00:00:00 2001 From: Mohit Date: Wed, 4 Oct 2023 11:00:22 +0530 Subject: [PATCH 2/2] Reverted miss pushed --- DO_OPENAPI_COMMIT_SHA.txt | 2 +- src/pydo/aio/operations/_operations.py | 4208 ++++++------------------ src/pydo/operations/_operations.py | 2451 +------------- 3 files changed, 1006 insertions(+), 5655 deletions(-) diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index 9f3ddf3e..7f659175 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -a5a2b6a +6f7c147 diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index c1b95574..09d144f7 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -79,10 +79,8 @@ build_databases_add_request, build_databases_add_user_request, build_databases_create_cluster_request, - build_databases_create_kafka_topic_request, build_databases_create_replica_request, build_databases_delete_connection_pool_request, - build_databases_delete_kafka_topic_request, build_databases_delete_online_migration_request, build_databases_delete_request, build_databases_delete_user_request, @@ -93,7 +91,6 @@ build_databases_get_config_request, build_databases_get_connection_pool_request, build_databases_get_eviction_policy_request, - build_databases_get_kafka_topic_request, build_databases_get_migration_status_request, build_databases_get_replica_request, build_databases_get_request, @@ -103,7 +100,6 @@ build_databases_list_clusters_request, build_databases_list_connection_pools_request, build_databases_list_firewall_rules_request, - build_databases_list_kafka_topics_request, build_databases_list_options_request, build_databases_list_replicas_request, build_databases_list_request, @@ -115,7 +111,6 @@ build_databases_update_connection_pool_request, build_databases_update_eviction_policy_request, build_databases_update_firewall_rules_request, - build_databases_update_kafka_topic_request, build_databases_update_maintenance_window_request, build_databases_update_major_version_request, build_databases_update_online_migration_request, @@ -68376,30 +68371,6 @@ async def list_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { "options": { - "kafka": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, "mongodb": { "layouts": [ { @@ -68498,19 +68469,6 @@ async def list_options(self, **kwargs: Any) -> JSON: } }, "version_availability": { - "kafka": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], "mongodb": [ { "end_of_availability": "str", # Optional. A @@ -68705,9 +68663,8 @@ async def list_clusters( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Redis, "mongodb" for MongoDB, and "kafka" - for Kafka. Required. Known values are: "pg", "mysql", "redis", "mongodb", - and "kafka". + "mysql" for MySQL, "redis" for Redis, and "mongodb" for MongoDB. + Required. Known values are: "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -68786,10 +68743,6 @@ async def list_clusters( ], "users": [ { - "access_cert": "str", # Optional. Access - certificate for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key - for TLS client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for @@ -68805,31 +68758,10 @@ async def list_clusters( user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An - identifier for the ACL. Required. - "permission": "str", - # Permission set applied to the ACL. 'consume' allows - for messages to be consumed from the topic. 'produce' - allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. - Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A - regex for matching the topic(s) that this ACL should - apply to. Required. - } - ] - } } ], "version": "str", # Optional. A string representing the @@ -68993,8 +68925,8 @@ async def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. Known values - are: "pg", "mysql", "redis", "mongodb", and "kafka". + for Redis, and "mongodb" for MongoDB. Required. Known values are: "pg", "mysql", + "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -69066,10 +68998,6 @@ async def create_cluster( ], "users": [ { - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -69082,28 +69010,9 @@ async def create_cluster( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the - ACL. Required. - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } + "role": "str" # Optional. A string representing the database + user's role. The value will be either"n"primary" or "normal". Known + values are: "primary" and "normal". } ], "version": "str", # Optional. A string representing the version of the @@ -69144,8 +69053,366 @@ async def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default project. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "rules": [ + { + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "uuid": "str", # Optional. A unique ID for the + firewall rule itself. + "value": "str" # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + } + ], + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. + ], + "users": [ + { + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections"nto the + MySQL user account. The valid values are + ``mysql_native_password``"nor ``caching_sha2_password``. If + excluded when creating a new user, the"ndefault for the version + of MySQL in use will be used. As of MySQL 8.0, the"ndefault is + ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "name": "str", # The name of a database user. + Required. + "password": "str", # Optional. A randomly generated + password for the database user. + "role": "str" # Optional. A string representing the + database user's role. The value will be either"n"primary" or + "normal". Known values are: "primary" and "normal". + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_cluster( + self, body: IO, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + """Create a New Database Cluster. + + To create a database cluster, send a POST request to ``/v2/databases``. + The response will be a JSON object with a key called ``database``. The value of this will be an + object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is + ready to receive traffic, this will transition to ``online``. + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Backups are not supported for Redis clusters. + + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "database": { + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default project. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "rules": [ + { + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "uuid": "str", # Optional. A unique ID for the + firewall rule itself. + "value": "str" # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + } + ], + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. + ], + "users": [ + { + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections"nto the + MySQL user account. The valid values are + ``mysql_native_password``"nor ``caching_sha2_password``. If + excluded when creating a new user, the"ndefault for the version + of MySQL in use will be used. As of MySQL 8.0, the"ndefault is + ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "name": "str", # The name of a database user. + Required. + "password": "str", # Optional. A randomly generated + password for the database user. + "role": "str" # Optional. A string representing the + database user's role. The value will be either"n"primary" or + "normal". Known values are: "primary" and "normal". + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: + """Create a New Database Cluster. + + To create a database cluster, send a POST request to ``/v2/databases``. + The response will be a JSON object with a key called ``database``. The value of this will be an + object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is + ready to receive traffic, this will transition to ``online``. + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Backups are not supported for Redis clusters. + + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "database": { + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -69220,10 +69487,6 @@ async def create_cluster( ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -69238,435 +69501,9 @@ async def create_cluster( Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def create_cluster( - self, body: IO, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Create a New Database Cluster. - - To create a database cluster, send a POST request to ``/v2/databases``. - The response will be a JSON object with a key called ``database``. The value of this will be an - object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is - ready to receive traffic, this will transition to ``online``. - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Backups are not supported for Redis clusters. - - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "database": { - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default project. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "rules": [ - { - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "uuid": "str", # Optional. A unique ID for the - firewall rule itself. - "value": "str" # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - } - ], - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. - ], - "users": [ - { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections"nto the - MySQL user account. The valid values are - ``mysql_native_password``"nor ``caching_sha2_password``. If - excluded when creating a new user, the"ndefault for the version - of MySQL in use will be used. As of MySQL 8.0, the"ndefault is - ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "name": "str", # The name of a database user. - Required. - "password": "str", # Optional. A randomly generated - password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or - "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: - """Create a New Database Cluster. - - To create a database cluster, send a POST request to ``/v2/databases``. - The response will be a JSON object with a key called ``database``. The value of this will be an - object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is - ready to receive traffic, this will transition to ``online``. - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Backups are not supported for Redis clusters. - - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "database": { - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default project. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "rules": [ - { - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "uuid": "str", # Optional. A unique ID for the - firewall rule itself. - "value": "str" # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - } - ], - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. - ], - "users": [ - { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections"nto the - MySQL user account. The valid values are - ``mysql_native_password``"nor ``caching_sha2_password``. If - excluded when creating a new user, the"ndefault for the version - of MySQL in use will be used. As of MySQL 8.0, the"ndefault is - ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "name": "str", # The name of a database user. - Required. - "password": "str", # Optional. A randomly generated - password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or - "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -69826,8 +69663,8 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -69902,10 +69739,6 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -69920,29 +69753,9 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -73201,10 +73014,6 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response == { "users": [ { - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73217,28 +73026,9 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the - ACL. Required. - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } + "role": "str" # Optional. A string representing the database + user's role. The value will be either"n"primary" or "normal". Known + values are: "primary" and "normal". } ] } @@ -73344,9 +73134,6 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73367,10 +73154,6 @@ async def add_user( # JSON input template you can fill out and use as your body input. body = { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user account. The valid values are @@ -73385,34 +73168,14 @@ async def add_user( "readonly": bool, # Optional. For MongoDB clusters, set to ``true`` to create a read-only user."nThis option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. Required. - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str" # A regex for matching the topic(s) - that this ACL should apply to. Required. - } - ] - } } # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73425,27 +73188,9 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -73480,9 +73225,6 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73504,10 +73246,6 @@ async def add_user( # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73520,27 +73258,9 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -73570,9 +73290,6 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73594,10 +73311,6 @@ async def add_user( # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73610,27 +73323,9 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -73744,11 +73439,9 @@ async def get_user( The response will be a JSON object with a ``user`` key. This will be set to an object containing the standard database user attributes. - For MySQL clusters, additional options will be contained in the ``mysql_settings`` + For MySQL clusters, additional options will be contained in the mysql_settings object. - For Kafka clusters, additional options will be contained in the ``settings`` object. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param username: The name of the database user. Required. @@ -73763,10 +73456,6 @@ async def get_user( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73779,27 +73468,9 @@ async def get_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -74044,10 +73715,6 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -74060,27 +73727,9 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -74137,10 +73786,6 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -74153,27 +73798,9 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -74228,10 +73855,6 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -74244,380 +73867,362 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_reset_auth_request( + database_cluster_uuid=database_cluster_uuid, + username=username, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @distributed_trace_async + async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """List All Databases. + + To list all of the databases in a clusters, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs``. + + The result will be a JSON object with a ``dbs`` key. This will be set to an array + of database objects, each of which will contain the standard database attributes. + + Note: Database management is not supported for Redis clusters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dbs": [ + { + "name": "str" # The name of the database. Required. } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_reset_auth_request( - database_cluster_uuid=database_cluster_uuid, - username=username, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @distributed_trace_async - async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - """List All Databases. - - To list all of the databases in a clusters, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs``. - - The result will be a JSON object with a ``dbs`` key. This will be set to an array - of database objects, each of which will contain the standard database attributes. - - Note: Database management is not supported for Redis clusters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "dbs": [ - { - "name": "str" # The name of the database. Required. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_list_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - async def add( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "name": "str" # The name of the database. Required. - } - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def add( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def add( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_list_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + async def add( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def add( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def add( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. } } # response body for status code(s): 404 @@ -75963,868 +75568,14 @@ async def delete_connection_pool( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - - request = build_databases_delete_connection_pool_request( - database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - @distributed_trace_async - async def get_eviction_policy( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: - """Retrieve the Eviction Policy for a Redis Cluster. - - To retrieve the configured eviction policy for an existing Redis cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/eviction_policy``. - The response will be a JSON object with an ``eviction_policy`` key. This will be set to a - string representing the eviction policy. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "eviction_policy": "str" # A string specifying the desired eviction policy - for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns - error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least - recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random - order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a - random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_get_eviction_policy_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - async def update_eviction_policy( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. - - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns - error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least - recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random - order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a - random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_eviction_policy( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. - - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def update_eviction_policy( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. - - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_update_eviction_policy_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - @distributed_trace_async - async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - """Retrieve the SQL Modes for a MySQL Cluster. - - To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/sql_mode``. - The response will be a JSON object with a ``sql_mode`` key. This will be set to a string - representing the configured SQL modes. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_get_sql_mode_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - async def update_sql_mode( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_sql_mode( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def update_sql_mode( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_update_sql_mode_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - @overload - async def update_major_version( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Upgrade Major Version for a Database. - - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_major_version( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Upgrade Major Version for a Database. - - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def update_major_version( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> Optional[JSON]: - """Upgrade Major Version for a Database. - - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_update_major_version_request( + request = build_databases_delete_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + pool_name=pool_name, headers=_headers, params=_params, ) @@ -76877,15 +75628,15 @@ async def update_major_version( return deserialized @distributed_trace_async - async def list_kafka_topics( + async def get_eviction_policy( self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: - """List Topics for a Kafka Cluster. - - To list all of a Kafka cluster's topics, send a GET request to - ``/v2/databases/$DATABASE_ID/topics``. + """Retrieve the Eviction Policy for a Redis Cluster. - The result will be a JSON object with a ``topics`` key. + To retrieve the configured eviction policy for an existing Redis cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/eviction_policy``. + The response will be a JSON object with an ``eviction_policy`` key. This will be set to a + string representing the eviction policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -76898,17 +75649,16 @@ async def list_kafka_topics( # response body for status code(s): 200 response == { - "topics": [ - { - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions - available for the topic. On update, this value can only be increased. - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. - Known values are: "active", "configuring", "deleting", and "unknown". - } - ] + "eviction_policy": "str" # A string specifying the desired eviction policy + for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns + error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least + recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random + order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a + random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } # response body for status code(s): 404 response == { @@ -76936,7 +75686,7 @@ async def list_kafka_topics( cls = kwargs.pop("cls", None) # type: ClsType[JSON] - request = build_databases_list_kafka_topics_request( + request = build_databases_get_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -76994,30 +75744,28 @@ async def list_kafka_topics( return cast(JSON, deserialized) @overload - async def create_kafka_topic( + async def update_eviction_policy( self, database_cluster_uuid: str, - body: Optional[JSON] = None, + body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. - The result will be a JSON object with a ``topic`` key. + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -77025,231 +75773,18 @@ async def create_kafka_topic( # JSON input template you can fill out and use as your body input. body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. + "eviction_policy": "str" # A string specifying the desired eviction policy + for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns + error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least + recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random + order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a + random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -77264,163 +75799,33 @@ async def create_kafka_topic( """ @overload - async def create_kafka_topic( + async def update_eviction_policy( self, database_cluster_uuid: str, - body: Optional[IO] = None, + body: IO, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. - The result will be a JSON object with a ``topic`` key. + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param body: Required. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -77435,161 +75840,28 @@ async def create_kafka_topic( """ @distributed_trace_async - async def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO]] = None, - **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + async def update_eviction_policy( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. - The result will be a JSON object with a ``topic`` key. + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Default value is None. + :param body: Is either a model type or a IO type. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -77617,7 +75889,7 @@ async def create_kafka_topic( content_type = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] content_type = content_type or "application/json" _json = None @@ -77625,12 +75897,9 @@ async def create_kafka_topic( if isinstance(body, (IO, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - request = build_databases_create_kafka_topic_request( + request = build_databases_update_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -77646,14 +75915,15 @@ async def create_kafka_topic( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: map_error( status_code=response.status_code, response=response, error_map=error_map ) raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -77664,11 +75934,6 @@ async def create_kafka_topic( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -77686,25 +75951,21 @@ async def create_kafka_topic( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) + return cls(pipeline_response, deserialized, response_headers) - return cast(JSON, deserialized) + return deserialized @distributed_trace_async - async def get_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any - ) -> JSON: - """Get Topic for a Kafka Cluster. - - To retrieve a given topic by name from the set of a Kafka cluster's topics, - send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """Retrieve the SQL Modes for a MySQL Cluster. - The result will be a JSON object with a ``topic`` key. + To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/sql_mode``. + The response will be a JSON object with a ``sql_mode`` key. This will be set to a string + representing the configured SQL modes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -77714,131 +75975,8 @@ async def get_kafka_topic( # response body for status code(s): 200 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } # response body for status code(s): 404 response == { @@ -77866,9 +76004,8 @@ async def get_kafka_topic( cls = kwargs.pop("cls", None) # type: ClsType[JSON] - request = build_databases_get_kafka_topic_request( + request = build_databases_get_sql_mode_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, headers=_headers, params=_params, ) @@ -77925,33 +76062,31 @@ async def get_kafka_topic( return cast(JSON, deserialized) @overload - async def update_kafka_topic( + async def update_sql_mode( self, database_cluster_uuid: str, - topic_name: str, - body: Optional[JSON] = None, + body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. - The result will be a JSON object with a ``topic`` key. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -77959,240 +76094,10 @@ async def update_kafka_topic( # JSON input template you can fill out and use as your body input. body = { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available - for the topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate - data across the cluster. - } + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -78207,166 +76112,36 @@ async def update_kafka_topic( """ @overload - async def update_kafka_topic( + async def update_sql_mode( self, database_cluster_uuid: str, - topic_name: str, - body: Optional[IO] = None, + body: IO, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. - The result will be a JSON object with a ``topic`` key. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. + :param body: Required. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -78381,164 +76156,31 @@ async def update_kafka_topic( """ @distributed_trace_async - async def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[Union[JSON, IO]] = None, - **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + async def update_sql_mode( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. - The result will be a JSON object with a ``topic`` key. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Is either a model type or a IO type. Default value is None. + :param body: Is either a model type or a IO type. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -78566,7 +76208,7 @@ async def update_kafka_topic( content_type = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] content_type = content_type or "application/json" _json = None @@ -78574,14 +76216,10 @@ async def update_kafka_topic( if isinstance(body, (IO, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - request = build_databases_update_kafka_topic_request( + request = build_databases_update_sql_mode_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, content_type=content_type, json=_json, content=_content, @@ -78596,14 +76234,15 @@ async def update_kafka_topic( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: map_error( status_code=response.status_code, response=response, error_map=error_map ) raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -78614,11 +76253,6 @@ async def update_kafka_topic( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -78636,26 +76270,117 @@ async def update_kafka_topic( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) + return cls(pipeline_response, deserialized, response_headers) - return cast(JSON, deserialized) + return deserialized - @distributed_trace_async - async def delete_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any ) -> Optional[JSON]: - """Delete Topic for a Kafka Cluster. + """Upgrade Major Version for a Database. - To delete a single topic within a Kafka cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Upgrade Major Version for a Database. + + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_major_version( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Upgrade Major Version for a Database. + + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -78684,14 +76409,27 @@ async def delete_kafka_topic( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - request = build_databases_delete_kafka_topic_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_update_major_version_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index 1ddf3015..bea0c691 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -2368,138 +2368,6 @@ def build_databases_update_major_version_request( return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) -def build_databases_list_kafka_topics_request( - database_cluster_uuid: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/v2/databases/{database_cluster_uuid}/topics" - path_format_arguments = { - "database_cluster_uuid": _SERIALIZER.url( - "database_cluster_uuid", database_cluster_uuid, "str" - ), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) - - -def build_databases_create_kafka_topic_request( - database_cluster_uuid: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/v2/databases/{database_cluster_uuid}/topics" - path_format_arguments = { - "database_cluster_uuid": _SERIALIZER.url( - "database_cluster_uuid", database_cluster_uuid, "str" - ), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header( - "content_type", content_type, "str" - ) - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) - - -def build_databases_get_kafka_topic_request( - database_cluster_uuid: str, topic_name: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" - path_format_arguments = { - "database_cluster_uuid": _SERIALIZER.url( - "database_cluster_uuid", database_cluster_uuid, "str" - ), - "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) - - -def build_databases_update_kafka_topic_request( - database_cluster_uuid: str, topic_name: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" - path_format_arguments = { - "database_cluster_uuid": _SERIALIZER.url( - "database_cluster_uuid", database_cluster_uuid, "str" - ), - "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header( - "content_type", content_type, "str" - ) - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) - - -def build_databases_delete_kafka_topic_request( - database_cluster_uuid: str, topic_name: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" - path_format_arguments = { - "database_cluster_uuid": _SERIALIZER.url( - "database_cluster_uuid", database_cluster_uuid, "str" - ), - "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) - - def build_domains_list_request( *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> HttpRequest: @@ -75159,30 +75027,6 @@ def list_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { "options": { - "kafka": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, "mongodb": { "layouts": [ { @@ -75281,19 +75125,6 @@ def list_options(self, **kwargs: Any) -> JSON: } }, "version_availability": { - "kafka": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], "mongodb": [ { "end_of_availability": "str", # Optional. A @@ -75486,9 +75317,8 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Redis, "mongodb" for MongoDB, and "kafka" - for Kafka. Required. Known values are: "pg", "mysql", "redis", "mongodb", - and "kafka". + "mysql" for MySQL, "redis" for Redis, and "mongodb" for MongoDB. + Required. Known values are: "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75567,10 +75397,6 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO ], "users": [ { - "access_cert": "str", # Optional. Access - certificate for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key - for TLS client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for @@ -75586,31 +75412,10 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An - identifier for the ACL. Required. - "permission": "str", - # Permission set applied to the ACL. 'consume' allows - for messages to be consumed from the topic. 'produce' - allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. - Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A - regex for matching the topic(s) that this ACL should - apply to. Required. - } - ] - } } ], "version": "str", # Optional. A string representing the @@ -75774,8 +75579,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. Known values - are: "pg", "mysql", "redis", "mongodb", and "kafka". + for Redis, and "mongodb" for MongoDB. Required. Known values are: "pg", "mysql", + "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75847,10 +75652,6 @@ def create_cluster( ], "users": [ { - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -75863,28 +75664,9 @@ def create_cluster( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the - ACL. Required. - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } + "role": "str" # Optional. A string representing the database + user's role. The value will be either"n"primary" or "normal". Known + values are: "primary" and "normal". } ], "version": "str", # Optional. A string representing the version of the @@ -75925,8 +75707,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76001,10 +75783,6 @@ def create_cluster( ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76019,29 +75797,9 @@ def create_cluster( Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -76129,8 +75887,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76205,10 +75963,6 @@ def create_cluster( ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76223,29 +75977,9 @@ def create_cluster( Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -76331,8 +76065,8 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76407,10 +76141,6 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76425,29 +76155,9 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -76607,8 +76317,8 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. - Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: + "pg", "mysql", "redis", and "mongodb". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76683,10 +76393,6 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "users": [ { - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76701,29 +76407,9 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier - for the ACL. Required. - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } } ], "version": "str", # Optional. A string representing the version of @@ -79978,10 +79664,6 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response == { "users": [ { - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -79994,28 +79676,9 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the - database user's role. The value will be either"n"primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the - ACL. Required. - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str" # A regex for - matching the topic(s) that this ACL should apply to. - Required. - } - ] - } + "role": "str" # Optional. A string representing the database + user's role. The value will be either"n"primary" or "normal". Known + values are: "primary" and "normal". } ] } @@ -80121,9 +79784,6 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -80144,10 +79804,6 @@ def add_user( # JSON input template you can fill out and use as your body input. body = { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user account. The valid values are @@ -80162,34 +79818,14 @@ def add_user( "readonly": bool, # Optional. For MongoDB clusters, set to ``true`` to create a read-only user."nThis option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. Required. - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str" # A regex for matching the topic(s) - that this ACL should apply to. Required. - } - ] - } } # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80202,27 +79838,9 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -80257,9 +79875,6 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -80281,10 +79896,6 @@ def add_user( # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80297,27 +79908,9 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -80347,9 +79940,6 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -80371,10 +79961,6 @@ def add_user( # response body for status code(s): 201 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80387,27 +79973,9 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -80521,11 +80089,9 @@ def get_user( The response will be a JSON object with a ``user`` key. This will be set to an object containing the standard database user attributes. - For MySQL clusters, additional options will be contained in the ``mysql_settings`` + For MySQL clusters, additional options will be contained in the mysql_settings object. - For Kafka clusters, additional options will be contained in the ``settings`` object. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param username: The name of the database user. Required. @@ -80540,10 +80106,6 @@ def get_user( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80556,27 +80118,9 @@ def get_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -80821,10 +80365,6 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80837,27 +80377,9 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -80914,10 +80436,6 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80930,27 +80448,9 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -81005,10 +80505,6 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -81021,27 +80517,9 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str", # Optional. A string representing the database user's + "role": "str" # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "id": "str", # An identifier for the ACL. - Required. - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str" # A regex for matching the - topic(s) that this ACL should apply to. Required. - } - ] - } } } # response body for status code(s): 404 @@ -83649,1871 +83127,6 @@ def update_major_version( return deserialized - @distributed_trace - def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - """List Topics for a Kafka Cluster. - - To list all of a Kafka cluster's topics, send a GET request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topics`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "topics": [ - { - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions - available for the topic. On update, this value can only be increased. - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. - Known values are: "active", "configuring", "deleting", and "unknown". - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_list_kafka_topics_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - "unclean_leader_election_enable": False # Optional. Default value is - False. Whether unclean_leader_election_enable specifies whether to allow - replicas that are not insync to be elected as leaders as a last resort. This - may result in data loss since those leaders are not insync. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[IO] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO]] = None, - **kwargs: Any - ) -> JSON: - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Default value is None. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - request = build_databases_create_kafka_topic_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 201: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @distributed_trace - def get_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any - ) -> JSON: - """Get Topic for a Kafka Cluster. - - To retrieve a given topic by name from the set of a Kafka cluster's topics, - send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_get_kafka_topic_request( - database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available - for the topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate - data across the cluster. - } - } - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[IO] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[Union[JSON, IO]] = None, - **kwargs: Any - ) -> JSON: - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Is either a model type or a IO type. Default value is None. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000, # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - "unclean_leader_election_enable": False # Optional. Default - value is False. Whether unclean_leader_election_enable specifies whether - to allow replicas that are not insync to be elected as leaders as a last - resort. This may result in data loss since those leaders are not insync. - }, - "name": "str", # The name of the Kafka topic. Required. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - request = build_databases_update_kafka_topic_request( - database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @distributed_trace - def delete_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any - ) -> Optional[JSON]: - """Delete Topic for a Kafka Cluster. - - To delete a single topic within a Kafka cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - - request = build_databases_delete_kafka_topic_request( - database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - class DomainsOperations: """