Skip to content

Commit

Permalink
[bot] Updated client based on openapi-32be7e9/clientgen
Browse files Browse the repository at this point in the history
  • Loading branch information
API Engineering committed Nov 28, 2023
1 parent 066b577 commit 4a93437
Show file tree
Hide file tree
Showing 3 changed files with 277 additions and 91 deletions.
2 changes: 1 addition & 1 deletion DO_OPENAPI_COMMIT_SHA.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7a717b3
32be7e9
172 changes: 127 additions & 45 deletions src/pydo/aio/operations/_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@
build_databases_list_backups_request,
build_databases_list_clusters_request,
build_databases_list_connection_pools_request,
build_databases_list_events_logs_request,
build_databases_list_firewall_rules_request,
build_databases_list_kafka_topics_request,
build_databases_list_options_request,
Expand Down Expand Up @@ -72987,6 +72988,10 @@ async def update_online_migration(
body = {
"disable_ssl": bool, # Optional. Enables SSL encryption when connecting to
the source database.
"ignore_dbs": [
"str" # Optional. List of databases that should be ignored during
migration.
],
"source": {
"dbname": "str", # Optional. The name of the default database.
"host": "str", # Optional. The FQDN pointing to the database
Expand Down Expand Up @@ -75106,6 +75111,119 @@ async def create_replica(

return cast(JSON, deserialized)

@distributed_trace_async
async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
"""List all Events Logs.

To list all of the cluster events, send a GET request to
``/v2/databases/$DATABASE_ID/events``.

The result will be a JSON object with a ``events`` key.

:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:

Example:
.. code-block:: python

# response body for status code(s): 200
response == {
"events": [
{
"cluster_name": "str", # Optional. The name of cluster.
"create_time": "str", # Optional. The time of the generation
of a event.
"event_type": "str", # Optional. Type of the event.
"id": "str" # Optional. ID of the particular event.
}
]
}
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
be "not_found.". Required.
"message": "str", # A message providing additional information about the
error, including details to help resolve it when possible. Required.
"request_id": "str" # Optional. Optionally, some endpoints may include a
request ID that should be provided when reporting bugs or opening support
tickets to help identify the issue.
}
"""
error_map = {
404: ResourceNotFoundError,
409: ResourceExistsError,
401: lambda response: ClientAuthenticationError(response=response),
429: HttpResponseError,
500: HttpResponseError,
}
error_map.update(kwargs.pop("error_map", {}) or {})

_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}

cls = kwargs.pop("cls", None) # type: ClsType[JSON]

request = build_databases_list_events_logs_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
)
request.url = self._client.format_url(request.url) # type: ignore

pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)

response = pipeline_response.http_response

if response.status_code not in [200, 404]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
raise HttpResponseError(response=response)

response_headers = {}
if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
response_headers["ratelimit-remaining"] = self._deserialize(
"int", response.headers.get("ratelimit-remaining")
)
response_headers["ratelimit-reset"] = self._deserialize(
"int", response.headers.get("ratelimit-reset")
)

if response.content:
deserialized = response.json()
else:
deserialized = None

if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
response_headers["ratelimit-remaining"] = self._deserialize(
"int", response.headers.get("ratelimit-remaining")
)
response_headers["ratelimit-reset"] = self._deserialize(
"int", response.headers.get("ratelimit-reset")
)

if response.content:
deserialized = response.json()
else:
deserialized = None

if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers)

return cast(JSON, deserialized)

@distributed_trace_async
async def get_replica(
self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
Expand Down Expand Up @@ -79423,14 +79541,10 @@ async def create_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is 604800000. The
"segment_ms": 604800000 # Optional. Default value is 604800000. The
segment_ms specifies the period of time after which the log will be forced to
roll if the segment file isn't full. This ensures that retention can delete
or compact old data.
"unclean_leader_election_enable": False # Optional. Default value is
False. Whether unclean_leader_election_enable specifies whether to allow
replicas that are not insync to be elected as leaders as a last resort. This
may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partition_count": 0, # Optional. The number of partitions available for the
Expand Down Expand Up @@ -79530,14 +79644,10 @@ async def create_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -79701,14 +79811,10 @@ async def create_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -79870,14 +79976,10 @@ async def create_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -80120,14 +80222,10 @@ async def get_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -80357,14 +80455,10 @@ async def update_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is 604800000. The
"segment_ms": 604800000 # Optional. Default value is 604800000. The
segment_ms specifies the period of time after which the log will be forced to
roll if the segment file isn't full. This ensures that retention can delete
or compact old data.
"unclean_leader_election_enable": False # Optional. Default value is
False. Whether unclean_leader_election_enable specifies whether to allow
replicas that are not insync to be elected as leaders as a last resort. This
may result in data loss since those leaders are not insync.
},
"partition_count": 0, # Optional. The number of partitions available for the
topic. On update, this value can only be increased.
Expand Down Expand Up @@ -80463,14 +80557,10 @@ async def update_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -80637,14 +80727,10 @@ async def update_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down Expand Up @@ -80809,14 +80895,10 @@ async def update_kafka_topic(
"segment_jitter_ms": 0, # Optional. Default value is 0. The
segment_jitter_ms specifies the maximum random jitter subtracted from the
scheduled segment roll time to avoid thundering herds of segment rolling.
"segment_ms": 604800000, # Optional. Default value is
"segment_ms": 604800000 # Optional. Default value is
604800000. The segment_ms specifies the period of time after which the
log will be forced to roll if the segment file isn't full. This ensures
that retention can delete or compact old data.
"unclean_leader_election_enable": False # Optional. Default
value is False. Whether unclean_leader_election_enable specifies whether
to allow replicas that are not insync to be elected as leaders as a last
resort. This may result in data loss since those leaders are not insync.
},
"name": "str", # Optional. The name of the Kafka topic.
"partitions": [
Expand Down
Loading

0 comments on commit 4a93437

Please sign in to comment.