From 5cb9f1414622782b72f89e5f2b042dc07fb1bb2d Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Mon, 10 May 2021 13:22:28 +0200 Subject: [PATCH 01/65] VPC peering fix (#109) VPC peering fix Reviewed-by: None Reviewed-by: None --- plugins/modules/vpc_peering.py | 115 +++++-------- plugins/modules/vpc_peering_mode.py | 155 ++++++++++++++++++ plugins/modules/vpc_route.py | 37 ++--- roles/vpc_peering/README.rst | 37 +++++ roles/vpc_peering/tasks/main.yaml | 6 + roles/vpc_peering/tasks/provision.yaml | 58 +++++++ .../tasks/main.yaml | 0 .../tasks/main.yaml | 19 ++- .../tasks/main.yaml | 0 .../tasks/main.yaml | 8 +- tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 12 files changed, 328 insertions(+), 109 deletions(-) create mode 100644 plugins/modules/vpc_peering_mode.py create mode 100644 roles/vpc_peering/README.rst create mode 100644 roles/vpc_peering/tasks/main.yaml create mode 100644 roles/vpc_peering/tasks/provision.yaml rename tests/integration/targets/{vpc_peering_info => vpc_peering_info_test}/tasks/main.yaml (100%) rename tests/integration/targets/{vpc_peering => vpc_peering_test}/tasks/main.yaml (90%) rename tests/integration/targets/{vpc_route_info => vpc_route_info_test}/tasks/main.yaml (100%) rename tests/integration/targets/{vpc_route => vpc_route_test}/tasks/main.yaml (96%) diff --git a/plugins/modules/vpc_peering.py b/plugins/modules/vpc_peering.py index f002896c..24ca8c93 100644 --- a/plugins/modules/vpc_peering.py +++ b/plugins/modules/vpc_peering.py @@ -24,12 +24,12 @@ options: name: description: - - Name of the vpc peering connection. + - Name or ID of the vpc peering connection. - Mandatory for creating. - Can be updated. type: str id: - description: ID of the vpc peering connection. + description: ID of the vpc peering connection. type: str state: description: Should the resource be present or absent. @@ -39,20 +39,15 @@ local_router: description: Name or ID of the local router. type: str - project_id_local: + local_project: description: Specifies the ID of the project to which a local VPC belongs. type: str - peer_router: - description: Name or ID of the peer router. + remote_router: + description: ID of the remote router. type: str - project_id_peer: + remote_project: description: Specifies the ID of the project to which a peer VPC belongs. type: str - description: - description: - - Provides supplementary information about the VPC peering connection. - - Can be updated. - type: str requirements: ["openstacksdk", "otcextensions"] ''' @@ -125,9 +120,9 @@ - opentelekomcloud.cloud.vpc_peering: name: "peering1" local_router: "local-router" - project_id_local: "959db9b6017d4a1fa1c6fd17b6820f55" - peer_router: "peer-router" - project_id_peer: "959db9b6017d4a1fa1c6fd17b6820f55" + local_project_id: "959db9b6017d4a1fa1c6fd17b6820f55" + remote_router: "peer-router" + remote_project_id: "959db9b6017d4a1fa1c6fd17b6820f55" # Change name of the vpc peering - opentelekomcloud.cloud.vpc_peering: @@ -149,44 +144,41 @@ class VPCPeeringModule(OTCModule): id=dict(type='str'), state=dict(default='present', choices=['absent', 'present']), local_router=dict(type='str'), - project_id_local=dict(type='str'), - peer_router=dict(type='str'), - project_id_peer=dict(type='str'), - description=dict(type='str', default="") + local_project=dict(type='str'), + remote_router=dict(type='str'), + remote_project=dict(type='str'), ) module_kwargs = dict( required_if=[ - ('name', 'None', ['id']) + ('name', 'None', ['id']), + ('state', 'present', ['name', 'local_router', 'local_project', + 'remote_router', 'remote_project']), ], supports_check_mode=True ) - def _check_peering(self, local_vpc_id, peer_vpc_id): - - result = True - peerings = [] - - for raw in self.conn.vpc.peerings(): - dt = raw.to_dict() - dt.pop('location') - peerings.append(dt) + def _is_peering_exist(self, local_router_id, peer_router_id): - if peerings: - for peering in peerings: - if (peering['local_vpc_info']['vpc_id'] == local_vpc_id and peering['peer_vpc_info']['vpc_id'] == peer_vpc_id) or \ - (peering['local_vpc_info']['vpc_id'] == peer_vpc_id and peering['peer_vpc_info']['vpc_id'] == local_vpc_id): - result = False + for peering in self.conn.vpc.peerings(): + if ( + ( + peering.local_vpc_info['vpc_id'] == local_router_id + and peering.peer_vpc_info['vpc_id'] == peer_router_id) + or ( + peering.local_vpc_info['vpc_id'] == peer_router_id + and peering.peer_vpc_info['vpc_id'] == local_router_id) + ): + return True - return result + return False def run(self): name = self.params['name'] id = self.params['id'] local_router = self.params['local_router'] - project_id_local = self.params['project_id_local'] - peer_router = self.params['peer_router'] - project_id_peer = self.params['project_id_peer'] - description = self.params['description'] + local_project = self.params['local_project'] + remote_router = self.params['remote_router'] + remote_project = self.params['remote_project'] changed = False vpc_peering = None @@ -204,9 +196,6 @@ def run(self): if self.params['name'] and (self.params['name'] != vpc_peering.name): attrs['name'] = self.params['name'] - if self.params['description'] and (self.params['description'] != vpc_peering.description): - attrs['description'] = self.params['description'] - changed = False if attrs: @@ -232,47 +221,29 @@ def run(self): attrs = {} - if not local_router: - self.fail_json(msg="'local_router' is mandatory for creating") - - if not project_id_local: - self.fail_json(msg="'project_id_local' is mandatory for creating") - - if not peer_router: - self.fail_json(msg="'peer_router' is mandatory for creating") + local_router = self.conn.network.find_router(local_router, ignore_missing=True) - if not project_id_peer: - self.fail_json(msg="'project_id_peer' is mandatory for creating") - - local_vpc = self.conn.network.find_router(local_router, ignore_missing=True) - peer_vpc = self.conn.network.find_router(peer_router, ignore_missing=True) - - local_vpc_id = None - peer_vpc_id = None - - if local_vpc: - local_vpc_id = local_vpc['id'] - else: + if not local_router: self.fail_json(msg="Local router not found") - if peer_vpc: - peer_vpc_id = peer_vpc['id'] - else: - self.fail_json(msg="Peer router not found") - attrs['name'] = name - local_vpc = {'vpc_id': local_vpc_id, 'project_id': project_id_local} + local_vpc = {'vpc_id': local_router.id} attrs['local_vpc_info'] = local_vpc - peer_vpc = {'vpc_id': peer_vpc_id, 'project_id': project_id_peer} + peer_vpc = {'vpc_id': remote_router} attrs['peer_vpc_info'] = peer_vpc - - if description: - attrs['description'] = self.params['description'] + if ( + self.conn.current_project_id == local_project + and local_project != remote_project + ): + # Seems to be an API bug that doesn't want to see tenant_id + # if A and B are in same project + local_vpc['tenant_id'] = local_project + peer_vpc['tenant_id'] = remote_project changed = False - if self._check_peering(local_vpc_id, peer_vpc_id): + if not self._is_peering_exist(local_router.id, remote_router): if self.ansible.check_mode: self.exit_json(changed=True) diff --git a/plugins/modules/vpc_peering_mode.py b/plugins/modules/vpc_peering_mode.py new file mode 100644 index 00000000..75cefcce --- /dev/null +++ b/plugins/modules/vpc_peering_mode.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +DOCUMENTATION = ''' +--- +module: vpc_peering_mode +short_description: Add/Update/Delete vpc peering connection from OpenTelekomCloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.8.2" +author: "Artem Goncharov (@gtema)" +description: + - Accept or Reject VPC peering request. +options: + name: + description: + - Name of the vpc peering connection. + type: str + mode: + description: + - Mode to be used. + type: str + choices: ['accept', 'reject'] +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +vpc_peering: + description: Dictionary describing VPC peering instance. + type: complex + returned: On Success. + contains: + id: + description: Specifies the VPC peering connection ID. + returned: On success when C(state=present) + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + name: + description: Specifies the VPC peering connection name. + returned: On success when C(state=present) + type: str + sample: "vpc_peering1" + status: + description: Specifies the VPC peering connection status. + returned: On success when C(state=present) + type: str + sample: "accepted" + request_vpc_info: + description: Dictionary describing the local vpc. + returned: On success when C(state=present) + type: complex + contains: + vpc_id: + description: Specifies the ID of a VPC involved in a VPC peering connection. + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + project_id: + description: Specifies the ID of the project to which a VPC involved in the VPC peering connection belongs. + type: str + sample: "45007a7e-ee4f-4d13-8283-b4da2e037c69" + accept_vpc_info: + description: Dictionary describing the local vpc. + returned: On success when C(state=present) + type: complex + contains: + vpc_id: + description: Specifies the ID of a VPC involved in a VPC peering connection. + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + project_id: + description: Specifies the ID of the project to which a VPC involved in the VPC peering connection belongs. + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + description: + description: Provides supplementary information about the VPC peering connection. + returned: On success when C(state=present) + type: str + sample: "" + created_at: + description: Specifies the time (UTC) when the VPC peering connection is created. + returned: On success when C(state=present) + type: str + sample: "2020-09-13T20:38:02" + updated_at: + description: Specifies the time (UTC) when the VPC peering connection is updated. + returned: On success when C(state=present) + type: str + sample: "2020-09-13T20:38:02" +''' + +EXAMPLES = ''' +# Accept vpc peering. +- opentelekomcloud.cloud.vpc_peering_mode: + cloud: "cloud_b" + name: "peering1" + mode: "accept" + +# Reject vpc peering. +- opentelekomcloud.cloud.vpc_peering_mode: + cloud: "cloud_b" + name: "peering1" + mode: "reject" +''' + + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class VPCPeeringModeModule(OTCModule): + argument_spec = dict( + name=dict(type='str'), + mode=dict(type='str', choices=['accept', 'reject']) + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + name = self.params['name'] + + vpc_peering = self.conn.vpc.find_peering(name_or_id=name, ignore_missing=True) + + if not vpc_peering: + self.fail_json(msg='Cannot find requested VPC peering') + + if not vpc_peering.status == 'PENDING_ACCEPTANCE': + self.exit_json( + changed=False, + vpc_peering=vpc_peering + ) + if not self.ansible.check_mode: + self.conn.vpc.set_peering(vpc_peering, self.params['mode']) + self.exit_json( + changed=True, + vpc_peering=vpc_peering + ) + + +def main(): + module = VPCPeeringModeModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/vpc_route.py b/plugins/modules/vpc_route.py index 738b44a7..64a1e0d4 100644 --- a/plugins/modules/vpc_route.py +++ b/plugins/modules/vpc_route.py @@ -117,24 +117,14 @@ class VPCRouteModule(OTCModule): supports_check_mode=True ) - def _check_route(self, destination, router_id): + def _is_route_exist(self, destination, router_id): query = {} - result = True query['destination'] = destination query['vpc_id'] = router_id - data = [] - for raw in self.conn.vpc.routes(**query): - dt = raw.to_dict() - dt.pop('location') - data.append(dt) - - if data: - result = False - - return result + return len(list(self.conn.vpc.routes(**query))) > 0 def run(self): @@ -157,23 +147,20 @@ def run(self): else: self.fail_json(msg="vpc peering connection ('nexthop') not found") - check = self._check_route(attrs['destination'], attrs['vpc_id']) + route_exists = self._is_route_exist(attrs['destination'], attrs['vpc_id']) + + if route_exists: + self.exit_json(changed=False) if self.ansible.check_mode: - self.exit_json(changed=check) + self.exit_json(changed=True) - if check: - vpc_route = self.conn.vpc.add_route(**attrs) - changed = True - self.exit_json( - changed=changed, - vpc_route=vpc_route - ) + vpc_route = self.conn.vpc.add_route(**attrs) - else: - self.fail_json( - msg="Resource with this destination already exists" - ) + self.exit_json( + changed=True, + vpc_route=vpc_route + ) elif self.params['state'] == 'absent': diff --git a/roles/vpc_peering/README.rst b/roles/vpc_peering/README.rst new file mode 100644 index 00000000..bed2bc16 --- /dev/null +++ b/roles/vpc_peering/README.rst @@ -0,0 +1,37 @@ +Configure VPC Peering between 2 routers. + +Role is designed to work best looping over the structure of peering +definitions: +.. code-block:: yaml + + cloud_peerings: + - cloud: "cloud_a" + name: "peering_cloud_a_cloud_b" + local_router: "router_a" + local_project: "project_a" + local_cidr: "192.168.1.0/24" + remote_cloud: "cloud_b" + remote_router: "router_b" + remote_project: "project_b" + remote_cidr: "192.168.2.0/24" + +.. code-block:: yaml + + - hosts: localhost + name: "Manage cloud VPC peerings" + tasks: + - name: Manage VPC Peerings + include_role: + name: opentelekomcloud.cloud.vpc_peering + loop: "{{ cloud_peerings }}" + loop_control: + loop_var: vpcp + +**Role Variables** +cloud_a: Connection to cloud A +local_router: Name or ID of the router on side A +local_project: Name or ID of the project of the side A +local_cidr: CIDR for the route +cloud_b: Connection to the cloud B +remote_router: Name or ID of the router on side B +remote_cidr: CIDR for the route diff --git a/roles/vpc_peering/tasks/main.yaml b/roles/vpc_peering/tasks/main.yaml new file mode 100644 index 00000000..a5f29d69 --- /dev/null +++ b/roles/vpc_peering/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "provision.yaml" + when: "state != 'absent'" + +- include: "destroy.yaml" + when: "state == 'absent'" diff --git a/roles/vpc_peering/tasks/provision.yaml b/roles/vpc_peering/tasks/provision.yaml new file mode 100644 index 00000000..c231ee66 --- /dev/null +++ b/roles/vpc_peering/tasks/provision.yaml @@ -0,0 +1,58 @@ +--- +# Provision VPC Peerings +- name: Find local project + openstack.cloud.project_info: + cloud: "{{ vpcp.cloud }}" + name: "{{ vpcp.local_project }}" + register: local_project + +- name: Find Local router + openstack.cloud.routers_info: + cloud: "{{ vpcp.cloud }}" + name: "{{ vpcp.local_router }}" + register: local_router + +- name: Find Remote project + openstack.cloud.project_info: + cloud: "{{ vpcp.remote_cloud }}" + name: "{{ vpcp.remote_project }}" + register: remote_project + +- name: Find Remote router + openstack.cloud.routers_info: + cloud: "{{ vpcp.remote_cloud }}" + name: "{{ vpcp.remote_router }}" + register: remote_router + +- name: Create VPC Peering - A side + opentelekomcloud.cloud.vpc_peering: + cloud: "{{ vpcp.cloud }}" + name: "{{ vpcp.name }}" + local_router: "{{ local_router.openstack_routers[0].id }}" + local_project: "{{ local_project.openstack_projects[0].id }}" + remote_router: "{{ remote_router.openstack_routers[0].id }}" + remote_project: "{{ remote_project.openstack_projects[0].id }}" + register: peering_a + +- name: Accept Peering - B side + opentelekomcloud.cloud.vpc_peering_mode: + cloud: "{{ vpcp.remote_cloud }}" + name: "{{ vpcp.name }}" + mode: "accept" + register: peering_b + +- name: Create VPC Peering route - A side + opentelekomcloud.cloud.vpc_route: + cloud: "{{ vpcp.cloud }}" + type: "peering" + router: "{{ local_router.openstack_routers[0].id }}" + destination: "{{ vpcp.remote_cidr }}" + nexthop: "{{ peering_a.vpc_peering.id }}" + +- name: Create VPC Peering route - B side + opentelekomcloud.cloud.vpc_route: + cloud: "{{ vpcp.remote_cloud }}" + type: "peering" + router: "{{ remote_router.openstack_routers[0].id }}" + destination: "{{ vpcp.local_cidr }}" + nexthop: "{{ peering_b.vpc_peering.id }}" diff --git a/tests/integration/targets/vpc_peering_info/tasks/main.yaml b/tests/integration/targets/vpc_peering_info_test/tasks/main.yaml similarity index 100% rename from tests/integration/targets/vpc_peering_info/tasks/main.yaml rename to tests/integration/targets/vpc_peering_info_test/tasks/main.yaml diff --git a/tests/integration/targets/vpc_peering/tasks/main.yaml b/tests/integration/targets/vpc_peering_test/tasks/main.yaml similarity index 90% rename from tests/integration/targets/vpc_peering/tasks/main.yaml rename to tests/integration/targets/vpc_peering_test/tasks/main.yaml index 0e31be4e..12b53386 100644 --- a/tests/integration/targets/vpc_peering/tasks/main.yaml +++ b/tests/integration/targets/vpc_peering_test/tasks/main.yaml @@ -77,9 +77,9 @@ opentelekomcloud.cloud.vpc_peering: name: "{{ vpc_peering_name }}" local_router: "{{ test_router_1.router.id }}" - project_id_local: "{{ project_id }}" - peer_router: "{{ test_router_2.router.id }}" - project_id_peer: "{{ project_id }}" + local_project: "{{ project_id }}" + remote_router: "{{ test_router_2.router.id }}" + remote_project: "{{ project_id }}" register: vpc_peering_check check_mode: yes @@ -91,10 +91,10 @@ - name: Create vpc peering opentelekomcloud.cloud.vpc_peering: name: "{{ vpc_peering_name }}" - local_router: "{{ test_router_1.router.name }}" - project_id_local: "{{ project_id }}" - peer_router: "{{ test_router_2.router.name }}" - project_id_peer: "{{ project_id }}" + local_router: "{{ test_router_1.router.id }}" + local_project: "{{ project_id }}" + remote_router: "{{ test_router_2.router.id }}" + remote_project: "{{ project_id }}" register: vpc_peering - name: assert result @@ -107,7 +107,10 @@ opentelekomcloud.cloud.vpc_peering: id: "{{ vpc_peering.vpc_peering.id }}" name: "{{ new_peering_name }}" - description: "New description" + local_router: "{{ test_router_1.router.id }}" + local_project: "{{ project_id }}" + remote_router: "{{ test_router_2.router.id }}" + remote_project: "{{ project_id }}" register: updated_vpc_peering - name: assert result diff --git a/tests/integration/targets/vpc_route_info/tasks/main.yaml b/tests/integration/targets/vpc_route_info_test/tasks/main.yaml similarity index 100% rename from tests/integration/targets/vpc_route_info/tasks/main.yaml rename to tests/integration/targets/vpc_route_info_test/tasks/main.yaml diff --git a/tests/integration/targets/vpc_route/tasks/main.yaml b/tests/integration/targets/vpc_route_test/tasks/main.yaml similarity index 96% rename from tests/integration/targets/vpc_route/tasks/main.yaml rename to tests/integration/targets/vpc_route_test/tasks/main.yaml index a1f81f2b..3cfe056d 100644 --- a/tests/integration/targets/vpc_route/tasks/main.yaml +++ b/tests/integration/targets/vpc_route_test/tasks/main.yaml @@ -76,10 +76,10 @@ - name: Create vpc peering opentelekomcloud.cloud.vpc_peering: name: "{{ vpc_peering_name }}" - local_router: "{{ test_router_1.router.name }}" - project_id_local: "{{ project_id }}" - peer_router: "{{ test_router_2.router.name }}" - project_id_peer: "{{ project_id }}" + local_router: "{{ test_router_1.router.id }}" + local_project: "{{ project_id }}" + remote_router: "{{ test_router_2.router.id }}" + remote_project: "{{ project_id }}" register: vpc_peering - name: Create vpc route -check mode diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 68e55825..8b42ce78 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -47,6 +47,7 @@ plugins/modules/volume_backup_info.py validate-modules:missing-gplv3-license plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license +plugins/modules/vpc_peering_mode.py validate-modules:missing-gplv3-license plugins/modules/vpc_route_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_route.py validate-modules:missing-gplv3-license plugins/modules/vpn_service_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f492e821..07097e67 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -48,6 +48,7 @@ plugins/modules/volume_backup_info.py validate-modules:missing-gplv3-license plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license +plugins/modules/vpc_peering_mode.py validate-modules:missing-gplv3-license plugins/modules/vpc_route_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_route.py validate-modules:missing-gplv3-license plugins/modules/vpn_service_info.py validate-modules:missing-gplv3-license From 7bc039b066b55ac1db32758a8a0c0b3f6d2884c3 Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Thu, 27 May 2021 15:19:51 +0300 Subject: [PATCH 02/65] As quota info module (#110) As quota info module As quota info module Reviewed-by: None Reviewed-by: Tino Schr Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Artem Goncharov --- meta/runtime.yml | 1 + plugins/modules/as_quota_info.py | 114 ++++++++++++++++++ .../targets/as_quota_info/tasks/main.yaml | 14 +++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 131 insertions(+) create mode 100644 plugins/modules/as_quota_info.py create mode 100644 tests/integration/targets/as_quota_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 6aefb034..aaf5dafa 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -9,6 +9,7 @@ action_groups: - as_instance_info - as_policy - as_policy_info + - as_quota_info - availability_zone_info - cce_cluster - cce_cluster_cert_info diff --git a/plugins/modules/as_quota_info.py b/plugins/modules/as_quota_info.py new file mode 100644 index 00000000..7b8a06a7 --- /dev/null +++ b/plugins/modules/as_quota_info.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: as_quota_info +short_description: Get information about auto scaling quotas +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.8.1" +author: "Polina Gubina (@Polina-Gubina)" +description: + - This module is used to query the total quotas and used quotas of AS \ + groups, AS configurations, bandwidth scaling policies, AS policies, and \ + instances for a specified tenant. +options: + scaling_group: + description: Name or id of an auto scaling group. If set, quota for this group will be outputed. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +as_quotas: + description: The auto scaling quota object list. + type: complex + returned: On Success. + contains: + resources: + description: Specifies resources. + type: complex + contains: + type: + description: + - Specifies the quota type. + - Can be 'scaling_Group', 'scaling_Config', 'scaling_Policy' \ + 'sсaling_Instance', 'bandwidth_scaling_policy'. + type: str + used: + description: + - Specifies the used amount of the quota. + - When type is set to scaling_Policy or scaling_Instance, \ + this parameter is reserved, and the system returns -1 as the \ + parameter value. You can query the used quota of AS policies \ + and AS instances in a specified AS group. + type: int + quota: + description: + - Specifies the total quota. + type: int + max: + description: + - Specifies the quota upper limit. + type: int +''' + +EXAMPLES = ''' +# Get as quotas. +- opentelekomcloud.cloud.as_quota_info: + register: as_quotas + +# Get as quotas of a specified AS group. +- opentelekomcloud.cloud.as_quota_info: + scaling_group: "test-group" + register: as_quotas +''' + + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class ASQuotaInfoModule(OTCModule): + argument_spec = dict( + scaling_group=dict(required=False) + ) + + def run(self): + data = [] + + scaling_group_id = None + if self.params['scaling_group']: + try: + scaling_group_id = self.conn.auto_scaling.find_group(self.params['scaling_group'], + ignore_missing=False).id + except self.sdk.exceptions.ResourceNotFound: + self.fail_json(msg="Auto scaling group not found") + + for raw in self.conn.auto_scaling.quotas(group=scaling_group_id): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit_json( + changed=False, + as_quotas=data + ) + + +def main(): + module = ASQuotaInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/as_quota_info/tasks/main.yaml b/tests/integration/targets/as_quota_info/tasks/main.yaml new file mode 100644 index 00000000..91212e2a --- /dev/null +++ b/tests/integration/targets/as_quota_info/tasks/main.yaml @@ -0,0 +1,14 @@ +--- +- module_defaults: + opentelekomcloud.cloud.as_quota_info: + cloud: "{{ test_cloud }}" + block: + - name: Get as quota info + opentelekomcloud.cloud.as_quota_info: + register: as_quotas + + - name: assert result + assert: + that: + - as_quotas is success + - as_quotas is not changed diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 8b42ce78..8d6ad9be 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,6 +5,7 @@ plugins/modules/as_group_info.py validate-modules:missing-gplv3-license plugins/modules/as_instance_info.py validate-modules:missing-gplv3-license plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license +plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_cert_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 07097e67..9b2e21f9 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -5,6 +5,7 @@ plugins/modules/as_group_info.py validate-modules:missing-gplv3-license plugins/modules/as_instance_info.py validate-modules:missing-gplv3-license plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license +plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license From 2d83479b04a06d9c6bea7a3087225f17346660d1 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Thu, 10 Jun 2021 15:37:39 +0200 Subject: [PATCH 03/65] Do not pass as.group.delete_volume unless true (#117) Do not pass as.group.delete_volume unless true specifying auto_scaling.create_group(..delete_volume..) causes error on the service side. For now do not pass it unless it is really set to true. Reviewed-by: None Reviewed-by: OpenTelekomCloud Bot --- plugins/modules/as_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/as_group.py b/plugins/modules/as_group.py index 0cee18f2..b05f4695 100644 --- a/plugins/modules/as_group.py +++ b/plugins/modules/as_group.py @@ -456,8 +456,8 @@ def run(self): if self.params['delete_volume']: attrs['delete_volume'] = self.params['delete_volume'] - else: - attrs['delete_volume'] = False +# else: +# attrs['delete_volume'] = False if self.params['cool_down_time']: attrs['cool_down_time'] = self.params['cool_down_time'] From e32be07d24d5000dc77fc7b77525783dadab83f1 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Thu, 10 Jun 2021 15:58:02 +0200 Subject: [PATCH 04/65] Allow not setting wait for rds_instance (#113) Allow not waiting for RDS VPC peering fix Allow not setting wait for rds_instance Reviewed-by: None Reviewed-by: OpenTelekomCloud Bot --- plugins/modules/rds_instance.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index fe4a83bd..e6a96d7b 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -209,7 +209,10 @@ def _system_state_change(self, obj): return False def run(self): - self.params['wait_timeout'] = self.params.pop('timeout') + if self.params['wait']: + self.params['wait_timeout'] = self.params.pop('timeout') + else: + self.params.pop('timeout') name = self.params['name'] changed = False From 4b96c433b86c7a68d7222a2582dd00aa273e280b Mon Sep 17 00:00:00 2001 From: SebastianGode <70581801+SebastianGode@users.noreply.github.com> Date: Fri, 11 Jun 2021 12:36:51 +0200 Subject: [PATCH 05/65] DMS Modules (#100) DMS Modules Reviewed-by: Tino Schr Reviewed-by: None Reviewed-by: None --- plugins/modules/dms_instance.py | 366 ++++++++++++++++++ plugins/modules/dms_instance_info.py | 160 ++++++++ plugins/modules/dms_instance_topic.py | 193 +++++++++ plugins/modules/dms_instance_topic_info.py | 129 ++++++ plugins/modules/dms_message.py | 197 ++++++++++ plugins/modules/dms_queue.py | 177 +++++++++ plugins/modules/dms_queue_group.py | 155 ++++++++ plugins/modules/dms_queue_group_info.py | 109 ++++++ plugins/modules/dms_queue_info.py | 105 +++++ tests/integration/targets/dms/tasks/main.yaml | 147 +++++++ tests/sanity/ignore-2.10.txt | 9 + tests/sanity/ignore-2.9.txt | 9 + 12 files changed, 1756 insertions(+) create mode 100644 plugins/modules/dms_instance.py create mode 100644 plugins/modules/dms_instance_info.py create mode 100644 plugins/modules/dms_instance_topic.py create mode 100644 plugins/modules/dms_instance_topic_info.py create mode 100644 plugins/modules/dms_message.py create mode 100644 plugins/modules/dms_queue.py create mode 100644 plugins/modules/dms_queue_group.py create mode 100644 plugins/modules/dms_queue_group_info.py create mode 100644 plugins/modules/dms_queue_info.py create mode 100644 tests/integration/targets/dms/tasks/main.yaml diff --git a/plugins/modules/dms_instance.py b/plugins/modules/dms_instance.py new file mode 100644 index 00000000..46fc6092 --- /dev/null +++ b/plugins/modules/dms_instance.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_instance +short_description: Manage DMS Instances on Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Manage DMS Instances on Open Telekom Cloud +options: + name: + description: + - Name of the Instance. Can also be ID for deletion. + type: str + required: true + description: + description: + - Description. + type: str + engine: + description: + - Indicates a message engine. + - Required for creation. + type: str + default: kafka + engine_version: + description: + - Indicates the version of the message engine. + - Required for creation. + type: str + default: 2.3.0 + storage_space: + description: + - Indicates the message storage space with increments of 100 GB. + - Required for creation + type: int + access_user: + description: + - Indicates a username. + - Required when ssl_enable is true. + type: str + password: + description: + - Indicates the instance password. + - Required when ssl_enable is true. + type: str + vpc_id: + description: + - Indicates VPC ID. + - Required for creation + type: str + security_group_id: + description: + - Indicates Security Group ID. + - Required for creation + type: str + subnet_id: + description: + - Indicates Network ID. + - Required for creation + type: str + available_zones: + description: + - Indicates ID of an AZ. + - Required for creation + type: list + elements: str + product_id: + description: + - Indicates Product ID. + - Required for creation + type: str + maintain_begin: + description: + - Indicates Beginning of mantenance time window. + - Must be set in pairs with maintain_end + type: str + maintain_end: + description: + - Indicates End of maintenance Window. + - Must be set in pairs with maintain_begin + type: str + ssl_enable: + description: + - Indicates whether to enable SSL-encrypted access to the Instance. + type: bool + default: False + enable_publicip: + description: + - Indicates whether to enable ppublic access to the instance. + type: bool + public_bandwidth: + description: + - Indicates the public network bandwidth. + type: str + retention_policy: + description: + - Indicates the action to be taken when the memory usage reaches the disk capacity threshold. + type: str + storage_spec_code: + description: + - Indicates I/O specification of a Kafka instance. + - Required for creation + type: str + state: + choices: [present, absent] + default: present + description: Instance state + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +instance: + description: Dictionary of Instance + returned: changed + type: dict + sample: { + "instance": { + "availability_zones": [ + "eu-de-03" + ], + "engine_name": "kafka", + "engine_version": "2.3.0", + "id": "12345678-e7c4-4ba1-8aa2-f2c4eb507c43", + "instance_id": "12345678-e7c4-4ba1-8aa2-f2c4eb507c43", + "name": "aed93756fa3c04e4083c5b48ad6ba6258-instance", + "network_id": "12345678-ca80-4b49-bbbb-85ea9b96f8b3", + "product_id": "00300-30308-0--0", + "router_id": "12345678-dc40-4e3a-95b1-5a0756441e12", + "security_group_id": "12345678-9b1f-4af8-9b53-527ff05c5e12", + "storage": 600, + "storage_spec_code": "dms.physical.storage.ultra" + } + } +''' + +EXAMPLES = ''' +# Create Kafka Instance +- opentelekomcloud.cloud.dms_instance: + name: 'test' + storage_space: '600' + vpc_id: '12345678-dc40-4e3a-95b1-5a0756441e12' + security_group_id: '12345678' + subnet_id: '12345678-ca80-4b49-bbbb-85ea9b96f8b3' + available_zones: ['eu-de-03'] + product_id: '00300-30308-0--0' + storage_spec_code: 'dms.physical.storage.ultra' + +# Delete Kafka Instance +- opentelekomcloud.cloud.dms_instance: + name: 'kafka-c76z' + state: absent + +# Update Kafka Instance +- opentelekomcloud.cloud.dms_instance: + name: 'kafka-s1dd' + description: 'Test' +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsInstanceModule(OTCModule): + argument_spec = dict( + name=dict(required=True), + description=dict(required=False), + engine=dict(required=False, default='kafka'), + engine_version=dict(required=False, default='2.3.0'), + storage_space=dict(required=False, type='int'), + access_user=dict(required=False), + password=dict(required=False), + vpc_id=dict(required=False), + security_group_id=dict(required=False), + subnet_id=dict(required=False), + product_id=dict(required=False), + available_zones=dict(required=False, type='list', elements='str'), + maintain_begin=dict(required=False), + maintain_end=dict(required=False), + ssl_enable=dict(required=False, type='bool', default='False'), + enable_publicip=dict(required=False, type='bool'), + public_bandwidth=dict(required=False), + retention_policy=dict(required=False), + storage_spec_code=dict(required=False), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + instance = self.conn.dms.find_instance(name_or_id=self.params['name'], ignore_missing=True) + + attrs['name'] = self.params['name'] + if self.params['description']: + attrs['description'] = self.params['description'] + if self.params['maintain_begin'] and self.params['maintain_end']: + attrs['maintain_begin'] = self.params['maintain_begin'] + attrs['maintain_end'] = self.params['maintain_end'] + elif self.params['maintain_end'] or self.params['maintain_begin']: + self.exit( + changed=False, + failed=True, + message=('Both maintain_end and maintain_begin need to be defined.') + ) + + if self.params['state'] == 'present': + + # Instance creation + if not instance: + + if self.params['engine']: + attrs['engine'] = self.params['engine'] + else: + self.exit( + changed=False, + failed=True, + message=('No engine param provided') + ) + if self.params['engine_version']: + attrs['engine_version'] = self.params['engine_version'] + else: + self.exit( + changed=False, + failed=True, + message=('No engine_version param provided') + ) + if self.params['storage_space']: + attrs['storage_space'] = self.params['storage_space'] + else: + self.exit( + changed=False, + failed=True, + message=('No storage_space param provided') + ) + if self.params['access_user'] and self.params['ssl_enable'] is True: + attrs['access_user'] = self.params['access_user'] + elif self.params['access_user'] and self.params['ssl_enable'] is False: + self.exit( + changed=False, + failed=True, + message=('access_user specified but ssl_enable is false') + ) + if self.params['password'] and self.params['ssl_enable'] is True: + attrs['password'] = self.params['password'] + elif self.params['password'] and self.params['ssl_enable'] is False: + self.exit( + changed=False, + failed=True, + message=('Password specified but ssl_enable is false') + ) + if self.params['vpc_id']: + attrs['vpc_id'] = self.params['vpc_id'] + else: + self.exit( + changed=False, + failed=True, + message=('No vpc_id param provided') + ) + if self.params['security_group_id']: + attrs['security_group_id'] = self.params['security_group_id'] + else: + self.exit( + changed=False, + failed=True, + message=('No security_group_id param provided') + ) + if self.params['subnet_id']: + attrs['subnet_id'] = self.params['subnet_id'] + else: + self.exit( + changed=False, + failed=True, + message=('No subnet_id param provided') + ) + if self.params['available_zones']: + attrs['available_zones'] = self.params['available_zones'] + else: + self.exit( + changed=False, + failed=True, + message=('No available_zones param provided') + ) + if self.params['product_id']: + attrs['product_id'] = self.params['product_id'] + else: + self.exit( + changed=False, + failed=True, + message=('No product_id param provided') + ) + if self.params['ssl_enable'] is True and self.params['password']: + attrs['ssl_enable'] = self.params['ssl_enable'] + elif self.params['ssl_enable'] is True and not self.params['password']: + self.exit( + changed=False, + failed=True, + message=('ssl_enable is true, but no password defined') + ) + if self.params['enable_publicip']: + attrs['enable_publicip'] = self.params['enable_publicip'] + if self.params['public_bandwidth']: + attrs['public_bandwidth'] = self.params['public_bandwidth'] + if self.params['retention_policy']: + attrs['retention_policy'] = self.params['retention_policy'] + if self.params['storage_spec_code']: + attrs['storage_spec_code'] = self.params['storage_spec_code'] + else: + self.exit( + changed=False, + failed=True, + message=('No storage_spec_code param provided') + ) + + if self.ansible.check_mode: + self.exit(changed=True) + instance = self.conn.dms.create_instance(**attrs) + self.exit(changed=True, instance=instance.to_dict()) + + # Instance Modification + elif instance: + if self.params['security_group_id']: + attrs['security_group_id'] = self.params['security_group_id'] + + if self.ansible.check_mode: + self.exit(changed=True) + instance = self.conn.dms.update_instance(instance, **attrs) + self.exit(changed=True, instance=instance.to_dict()) + + if self.params['state'] == 'absent': + + # Instance Deletion + if instance: + if self.ansible.check_mode: + self.exit(changed=True) + instance = self.conn.dms.delete_instance(instance) + self.exit(changed=True, instance=instance) + + elif not instance: + self.exit( + changed=False, + failed=True, + message=('No Instance with name or ID %s found') % (self.params['name']) + ) + + +def main(): + module = DmsInstanceModule() + module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dms_instance_info.py b/plugins/modules/dms_instance_info.py new file mode 100644 index 00000000..ad3f0350 --- /dev/null +++ b/plugins/modules/dms_instance_info.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_instance_info +short_description: Get info about DMS instances +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Get info about DMS instances +options: + engine: + description: + - Name of the Engine + type: str + name: + description: + - Name + type: str + id: + description: + - ID of the Instance + type: str + status: + description: + - Instance Status + type: str + includeFailure: + description: + - Indicates whether to return instances that fail to be created. + type: bool + default: true + exactMatchName: + description: + - Indicates whether to search for the instance that precisely matches a specified instance name. + type: bool + default: false +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +dms_queues: + description: Dictionary of Queue Groups + returned: changed + type: list + sample: [ + { + "access_user": null, + "availability_zones": [ + "12345678" + ], + "charging_mode": 1, + "connect_address": "", + "created_at": "1617183440086", + "description": null, + "engine_name": "kafka", + "engine_version": "2.3.0", + "id": "12345678-003f-4d2a-9f6a-8468f832faea", + "instance_id": "12345678-003f-4d2a-9f6a-8468f832faea", + "is_public": false, + "is_ssl": false, + "kafka_public_status": "false", + "maintenance_end": "02:00:00", + "maintenance_start": "22:00:00", + "max_partitions": 300, + "name": "kafka-4ck1", + "network_id": "12345678-99ee-43aa-9448-6fac41614db6", + "password": null, + "port": 9092, + "product_id": "00300-30308-0--0", + "public_bandwidth": 0, + "retention_policy": "produce_reject", + "router_id": "12345678-6d1d-471e-a911-6924b7ec6ea9", + "router_name": "abcdef", + "security_group_id": "12345678-a836-4dc9-ae59-0aea6dcaf0c3", + "security_group_name": "sg", + "service_type": "advanced", + "spec": "100MB", + "spec_code": "dms.instance.kafka.cluster.c3.mini", + "status": "CREATING", + "storage": 600, + "storage_resource_id": "12345678-f432-4409-8c1b-f1a40fba7408", + "storage_spec_code": "dms.physical.storage.high", + "storage_type": "hec", + "total_storage": 600, + "type": "cluster", + "used_storage": 0, + "user_id": "12345678", + "user_name": "test" + } + ] +''' + +EXAMPLES = ''' +# Query all Instances +- opentelekomcloud.cloud.dms_instance_info: +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsInstanceInfoModule(OTCModule): + argument_spec = dict( + engine=dict(required=False), + name=dict(required=False), + id=dict(required=False), + status=dict(required=False), + includeFailure=dict(required=False, type='bool', default='true'), + exactMatchName=dict(required=False, type='bool', default='false'), + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + + data = [] + query = {} + + if self.params['engine']: + query['engine'] = self.params['engine'] + if self.params['name']: + query['name'] = self.params['name'] + if self.params['id']: + query['id'] = self.params['id'] + if self.params['status']: + query['status'] = self.params['status'] + if self.params['includeFailure']: + query['includeFailure'] = self.params['includeFailure'] + if self.params['exactMatchName']: + query['exactMatchName'] = self.params['exactMatchName'] + + for raw in self.conn.dms.instances(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + self.exit( + changed=False, + dms_instances=data + ) + + +def main(): + module = DmsInstanceInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dms_instance_topic.py b/plugins/modules/dms_instance_topic.py new file mode 100644 index 00000000..f85cbb31 --- /dev/null +++ b/plugins/modules/dms_instance_topic.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_instance_topic +short_description: Manage DMS Instance Topics on Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Manage DMS Instance Topics on Open Telekom Cloud +options: + instance: + description: + - Name or ID of the Instance + type: str + required: true + id: + description: + - Name of the Instance topic to be created. Required for creation. + type: str + required: false + partition: + description: + - Indicates the number of topic partitions, which is used to set the number of concurrently consumed messages. + type: int + required: false + default: 3 + replication: + description: + - Indicates the number of replicas, which is configured to ensure data reliability. + type: int + required: false + default: 3 + sync_replication: + description: + - Indicates whether to enable synchronous replication. + type: bool + required: false + default: false + retention_time: + description: + - Indicates the retention period of a message. + type: int + required: false + default: 72 + sync_message_flush: + description: + - Indicates whether to enable synchronous flushing. + type: bool + required: false + default: false + topics: + description: + - Indicates the list of topics to be deleted. Required for deleting. + type: list + required: false + elements: str + state: + choices: [present, absent] + default: present + description: Instance state + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +instance: + description: Dictionary of Instance Topics + returned: changed + type: dict + sample: { + "topic": { + "id": "test2", + "location": { + "cloud": "otc", + "project": { + "id": "12345678", + "name": "eu-de" + }, + "region_name": "eu-de", + }, + "partition": 3, + "replication": 3, + "retention_time": 72 + } + } +''' + +EXAMPLES = ''' +# Create Instance Topic +- opentelekomcloud.cloud.dms_instance_topic: + instance: 'test' + id: 'test2' + +# Delete Instance Topics +- opentelekomcloud.cloud.dms_instance_topic: + instance: 'test' + topics: + - 'test1' + - 'test2' + state: absent +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsInstanceTopic(OTCModule): + argument_spec = dict( + instance=dict(required=True), + id=dict(required=False), + topics=dict(required=False, type='list', elements='str'), + partition=dict(required=False, type='int', default=3), + replication=dict(required=False, type='int', default=3), + sync_replication=dict(required=False, type='bool', default=False), + retention_time=dict(required=False, type='int', default=72), + sync_message_flush=dict(required=False, type='bool', default=False), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + instance = self.conn.dms.find_instance(name_or_id=self.params['instance'], ignore_missing=True) + if not instance: + self.exit( + changed=False, + failed=True, + message=('No Instance with name or ID %s found') % (self.params['instance']) + ) + + if self.params['state'] == 'present': + + # Topic creation + if self.params['id']: + attrs['id'] = self.params['id'] + else: + self.exit( + changed=False, + failed=True, + message=('No Topic ID specified, but needed for creation!') + ) + if self.params['partition']: + attrs['partition'] = self.params['partition'] + if self.params['replication']: + attrs['replication'] = self.params['replication'] + if self.params['sync_replication']: + attrs['sync_replication'] = self.params['sync_replication'] + if self.params['retention_time']: + attrs['retention_time'] = self.params['retention_time'] + if self.params['sync_message_flush']: + attrs['sync_message_flush'] = self.params['sync_message_flush'] + + if self.ansible.check_mode: + self.exit(changed=True) + topic = self.conn.dms.create_topic(instance, **attrs) + self.exit(changed=True, topic=topic.to_dict()) + + if self.params['state'] == 'absent': + + if self.params['topics']: + # Topic Deletion + if self.ansible.check_mode: + self.exit(changed=True) + topic = self.conn.dms.delete_topic(instance, self.params['topics']) + self.exit(changed=True) + else: + self.exit( + changed=False, + failed=True, + message=('No Topics specified, but needed for deletion!') + ) + + +def main(): + module = DmsInstanceTopic() + module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dms_instance_topic_info.py b/plugins/modules/dms_instance_topic_info.py new file mode 100644 index 00000000..865515d8 --- /dev/null +++ b/plugins/modules/dms_instance_topic_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_instance_topic_info +short_description: Get info about DMS instance topics +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Get info about DMS instance topics +options: + instance: + description: + - Name or ID of the instance + type: str + required: true +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +dms_queues: + description: Dictionary of Queue Groups + returned: changed + type: list + sample: [ + { + "access_user": null, + "availability_zones": [ + "12345678" + ], + "charging_mode": 1, + "connect_address": "", + "created_at": "1617183440086", + "description": null, + "engine_name": "kafka", + "engine_version": "2.3.0", + "id": "12345678-003f-4d2a-9f6a-8468f832faea", + "instance_id": "12345678-003f-4d2a-9f6a-8468f832faea", + "is_public": false, + "is_ssl": false, + "kafka_public_status": "false", + "maintenance_end": "02:00:00", + "maintenance_start": "22:00:00", + "max_partitions": 300, + "name": "kafka-4ck1", + "network_id": "12345678-99ee-43aa-9448-6fac41614db6", + "password": null, + "port": 9092, + "product_id": "00300-30308-0--0", + "public_bandwidth": 0, + "retention_policy": "produce_reject", + "router_id": "12345678-6d1d-471e-a911-6924b7ec6ea9", + "router_name": "abcdef", + "security_group_id": "12345678-a836-4dc9-ae59-0aea6dcaf0c3", + "security_group_name": "sg", + "service_type": "advanced", + "spec": "100MB", + "spec_code": "dms.instance.kafka.cluster.c3.mini", + "status": "CREATING", + "storage": 600, + "storage_resource_id": "12345678-f432-4409-8c1b-f1a40fba7408", + "storage_spec_code": "dms.physical.storage.high", + "storage_type": "hec", + "total_storage": 600, + "type": "cluster", + "used_storage": 0, + "user_id": "12345678", + "user_name": "test" + } + ] +''' + +EXAMPLES = ''' +# Query all Instance Topics +- opentelekomcloud.cloud.dms_instance_topic_info: + instance: 'test' +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsInstanceTopicInfoModule(OTCModule): + argument_spec = dict( + instance=dict(required=True), + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + + data = [] + instance = self.conn.dms.find_instance(name_or_id=self.params['instance'], ignore_missing=True) + + if instance: + for raw in self.conn.dms.topics(instance): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + self.exit( + changed=False, + dms_instances=data + ) + else: + self.exit( + changed=False, + failed=True, + message=('No Instance with name or ID %s found') % (self.params['instance']) + ) + + +def main(): + module = DmsInstanceTopicInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dms_message.py b/plugins/modules/dms_message.py new file mode 100644 index 00000000..f1136255 --- /dev/null +++ b/plugins/modules/dms_message.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_message +short_description: Manage DMS Messages on Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Manage DMS Messages on Open Telekom Cloud +options: + queue: + description: + - Name of the Queue. Can also be ID. + type: str + required: true + group: + description: + - Name of the Group. Can also be ID. Required when consuming. + type: str + required: false + messages: + description: + - Messages. + type: list + elements: dict + required: false + max_msgs: + description: + - Max messages to consume. + type: int + required: false + default: 10 + time_wait: + description: + - Time to wait for consuming. + type: int + required: false + default: 3 + ack_wait: + description: + - Time to wait for confirmation. + type: int + required: false + default: 30 + ack: + description: + - Whether to try confirming the consumed messages or not. + type: bool + required: false + default: True + task: + choices: [send, consume] + description: Task to do + type: str + required: True +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +message: + description: Dictionary of DMS Queue + returned: changed + type: dict + sample: { + "message": [ + { + "attributes": {}, + "body": "test2", + "id": "eyJ0b3BpYyI6InEtMTZkNTNhODRhMTNiNDk1MjlkMmUyYzM2N....", + }, + { + "attributes": {}, + "body": "test1", + "id": "eyJ0b3BpYyI6InEtMTZkNTNhODRhMTNiNDk1MjlkMmUyYzM2N...", + } + ] + } +''' + +EXAMPLES = ''' +# Send Message +- opentelekomcloud.cloud.dms_message: + queue: 'queue' + messages: + - body: 'test1' + attributes: + attribute1: 'value1' + attribute2: 'value2' + - body: 'test2' + attributes: + attribute1: 'value3' + attribute2: 'value4' + task: send + +# Consume Message +- opentelekomcloud.cloud.dms_message: + queue: 'queue' + group: 'group' + task: consume + ack: false +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsMessageModule(OTCModule): + argument_spec = dict( + queue=dict(required=True), + group=dict(required=False), + messages=dict(required=False, type='list', elements='dict'), + max_msgs=dict(required=False, type='int', default=10), + time_wait=dict(required=False, type='int', default=3), + ack_wait=dict(required=False, type='int', default=30), + ack=dict(required=False, type='bool', default='True'), + task=dict(type='str', choices=['send', 'consume'], required=True) + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + queue = self.conn.dms.find_queue(name_or_id=self.params['queue']) + if not queue: + self.exit( + changed=False, + failed=True, + message=('No Queue with name or ID %s found') % (self.params['queue_name']) + ) + + if self.params['task'] == 'send': + + attrs['queue'] = queue.id + attrs['messages'] = self.params['messages'] + if self.ansible.check_mode: + self.exit(changed=True) + message = self.conn.dms.send_messages(**attrs) + self.exit(changed=True, message=message) + + if self.params['task'] == 'consume': + + if not self.params['group']: + self.exit( + changed=False, + failed=True, + message=('No Group name or ID specified') + ) + queue_group = self.conn.dms.find_group(queue=queue, name_or_id=self.params['group'], ignore_missing=True) + if not queue_group: + self.exit( + changed=False, + failed=True, + message=('No Queue-Group with name or ID %s found') % (self.params['group']) + ) + attrs['queue'] = queue.id + attrs['group'] = queue_group.id + attrs['max_msgs'] = self.params['max_msgs'] + attrs['time_wait'] = self.params['time_wait'] + attrs['ack_wait'] = self.params['ack_wait'] + if self.ansible.check_mode: + self.exit(changed=True) + response = [] + for message in self.conn.dms.consume_message(**attrs): + dt = message.to_dict() + response.append(dt) + + if self.params['ack'] is False: + self.exit(changed=True, message=response) + else: + messages = { + 'handler': response[0]['id'], + 'status': 'success' + } + result = self.conn.dms.ack_message(queue=queue, group=queue_group, messages=messages) + response.append(result) + self.exit(changed=True, message=response) + + +def main(): + module = DmsMessageModule() + module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dms_queue.py b/plugins/modules/dms_queue.py new file mode 100644 index 00000000..31d97366 --- /dev/null +++ b/plugins/modules/dms_queue.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_queue +short_description: Manage DMS Queues on Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Manage DMS Queues on Open Telekom Cloud +options: + name: + description: + - Name of the Queue. Can also be ID for deletion. + type: str + required: true + queue_mode: + description: + - Indicates the queue type. + type: str + default: NORMAL + description: + description: + - Description. + type: str + redrive_policy: + description: + - This parameter specifies whether to enable dead letter messages. + - Dead letter messages are messages that cannot be normally consumed. + - This parameter is valid only when queue_mode is set to NORMAL or FIFO. + type: str + default: disable + max_consume_count: + description: + - Indicates the maximum number of allowed message consumption failures. + - This parameter is mandatory only when redrive_policy is set to enable. + type: int + retention_hours: + description: + - Indicates the hours of storing messages in the Kafka queue. + - This parameter is valid only when queue_mode is set to KAFKA_HA or KAFKA_HT. + type: int + state: + choices: [present, absent] + default: present + description: Instance state + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +deh_host: + description: Dictionary of DMS Queue + returned: changed + type: dict + sample: { + "queue": { + "created": null, + "description": null, + "id": "c28ff35a-dbd4-460a-a30d-cf31a6013eb0", + "location": { + "cloud": "otc", + "project": { + "domain_id": null, + "domain_name": null, + "id": "16d53a84a13b49529d2e2c3646691288", + "name": "eu-de" + }, + "region_name": "eu-de", + "zone": null + }, + "max_consume_count": null, + "name": "test-queue", + "queue_mode": "NORMAL", + "redrive_policy": "disable", + "retention_hours": null + } + } +''' + +EXAMPLES = ''' +# Create Queue +- opentelekomcloud.cloud.dms_queue: + name: 'test-queue' + state: present + +# Delete Queue +- opentelekomcloud.cloud.dms_queue: + name: 'test-queue' + state: absent +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsQueueModule(OTCModule): + argument_spec = dict( + name=dict(required=True), + queue_mode=dict(required=False, default='NORMAL'), + description=dict(required=False), + redrive_policy=dict(required=False, default='disable'), + max_consume_count=dict(required=False, type='int'), + retention_hours=dict(required=False, type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + queue = self.conn.dms.find_queue(name_or_id=self.params['name']) + + if self.params['state'] == 'present': + + # Queue creation + if not queue: + attrs['name'] = self.params['name'] + if self.params['queue_mode']: + attrs['queue_mode'] = self.params['queue_mode'] + if self.params['description']: + attrs['description'] = self.params['description'] + if self.params['redrive_policy']: + attrs['redrive_policy'] = self.params['redrive_policy'] + if self.params['max_consume_count']: + attrs['max_consume_count'] = self.params['max_consume_count'] + if self.params['retention_hours']: + attrs['retention_hours'] = self.params['retention_hours'] + + if self.ansible.check_mode: + self.exit(changed=True) + queue = self.conn.dms.create_queue(**attrs) + self.exit(changed=True, queue=queue.to_dict()) + + # Queue Modification - not possible + elif queue: + self.exit( + changed=False, + failed=True, + message=('A Queue with this name already exists. Aborting') + ) + + if self.params['state'] == 'absent': + + # Queue Deletion + if queue: + if self.ansible.check_mode: + self.exit(changed=True) + queue = self.conn.dms.delete_queue(queue=queue.id) + self.exit(changed=True) + + elif not queue: + self.exit( + changed=False, + failed=True, + message=('No Queue with name or ID %s found') % (self.params['name']) + ) + + +def main(): + module = DmsQueueModule() + module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dms_queue_group.py b/plugins/modules/dms_queue_group.py new file mode 100644 index 00000000..3e16a385 --- /dev/null +++ b/plugins/modules/dms_queue_group.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_queue_group +short_description: Manage DMS Queue-Groups on Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Manage DMS Queue-Groups on Open Telekom Cloud +options: + queue_name: + description: + - Name of the Queue. Can also be ID. + type: str + required: true + group_name: + description: + - Name of the Group. + type: str + required: true + state: + choices: [present, absent] + default: present + description: Instance state + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +deh_host: + description: Dictionary of DMS Queue + returned: changed + type: dict + sample: { + "group": { + "available_deadletters": null, + "available_messages": null, + "consumed_messages": null, + "id": "g-8f271ad2-ec43-4d6f-b9f0-ff060b864f85", + "location": { + "cloud": "otc", + "project": { + "domain_id": null, + "domain_name": null, + "id": "16d53a84a13b49529d2e2c3646691288", + "name": "eu-de" + }, + "region_name": "eu-de", + "zone": null + }, + "name": "group_test", + "produced_deadletters": null, + "produced_messages": null, + "queue_id": "e4508dbd-75ba-4199-970e-b1efdb1f4503" + } + } +''' + +EXAMPLES = ''' +# Create Queue-Group +- opentelekomcloud.cloud.dms_queue_group: + queue_name: 'test-queue' + group_name: 'test-group' + state: present + +# Delete Queue-Group +- opentelekomcloud.cloud.dms_queue_group: + queue_name: 'test-queue' + group_name: 'test-group' + state: absent +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsQueueModule(OTCModule): + argument_spec = dict( + queue_name=dict(required=True), + group_name=dict(required=True), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + queue = self.conn.dms.find_queue(name_or_id=self.params['queue_name']) + if not queue: + self.exit( + changed=False, + failed=True, + message=('No Queue with name or ID %s found') % (self.params['queue_name']) + ) + queue_group = self.conn.dms.find_group(queue=queue, name_or_id=self.params['group_name'], ignore_missing=True) + + if self.params['state'] == 'present': + + # Queue-Group creation + if not queue_group: + attrs['queue'] = queue.id + attrs['name'] = self.params['group_name'] + + if self.ansible.check_mode: + self.exit(changed=True) + group = self.conn.dms.create_group(**attrs) + self.exit(changed=True, group=group) + + # Queue-Group Modification - not possible + elif queue: + self.exit( + changed=False, + failed=True, + message=('A Queue-Group with this name already exists. Aborting') + ) + + if self.params['state'] == 'absent': + + # Queue-Group Deletion + if queue_group: + attrs['queue'] = queue.id + attrs['group'] = queue_group.id + + if self.ansible.check_mode: + self.exit(changed=True) + queue = self.conn.dms.delete_group(**attrs) + self.exit(changed=True) + + elif not queue_group: + self.exit( + changed=False, + failed=True, + message=('No Queue-Group with name or ID %s found') % (self.params['name']) + ) + + +def main(): + module = DmsQueueModule() + module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dms_queue_group_info.py b/plugins/modules/dms_queue_group_info.py new file mode 100644 index 00000000..8241f9fa --- /dev/null +++ b/plugins/modules/dms_queue_group_info.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_queue_group_info +short_description: Get info about DMS queue groups +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Get info about DMS queue groups +options: + queue: + description: + - Name or ID of a target queue. Leave it empty to query all queues. + type: str + required: true + include_deadletter: + description: + - Indicates whether to list dead letter parameters in the response message. + type: bool + required: false + default: false +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +dms_queues: + description: Dictionary of Queue Groups + returned: changed + type: list + sample: [ + { + "available_deadletters": 0, + "available_messages": 0, + "consumed_messages": 0, + "id": "g-12345678-b770-4ace-83c2-28800b7a4ecc", + "name": "group-123456754", + "produced_deadletters": 0, + "produced_messages": 0 + } + ] +''' + +EXAMPLES = ''' +# Query a single DMS Queue Group +- opentelekomcloud.cloud.dms_queue_group_info: + queue: 'test-test' + include_deadletter: true + register: dms-queue +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsQueueInfoModule(OTCModule): + argument_spec = dict( + queue=dict(required=True), + include_deadletter=dict(required=False, type='bool', default='false') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + + data = [] + query = {} + + queue = self.conn.dms.find_queue( + name_or_id=self.params['queue'] + ) + if self.params['include_deadletter']: + query['include_deadletter'] = self.params['include_deadletter'] + if queue: + for raw in self.conn.dms.groups(queue.id, **query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + self.exit( + changed=False, + dms_queues=data + ) + else: + self.exit( + changed=False, + failed=True, + message=('No Queue found with ID or Name: %s' % + self.params['queue']) + ) + + +def main(): + module = DmsQueueInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dms_queue_info.py b/plugins/modules/dms_queue_info.py new file mode 100644 index 00000000..515c24aa --- /dev/null +++ b/plugins/modules/dms_queue_info.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dms_queue_info +short_description: Get info about DMS queues +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Sebastian Gode (@SebastianGode)" +description: + - Get info about DMS queues +options: + queue: + description: + - Name or ID of a target queue. Leave it empty to query all queues. + type: str + required: false +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +dms_queues: + description: Dictionary of Queues + returned: changed + type: list + sample: [ + { + "created": 1517385090349, + "description": "", + "id": "12345678-73e4-449f-a157-53d5d9900e21", + "max_consume_count": null, + "name": "test-test", + "queue_mode": "NORMAL", + "redrive_policy": null, + "retention_hours": null + } + ] +''' + +EXAMPLES = ''' +# Query a single DMS Queue +- opentelekomcloud.cloud.dms_queue_info: + queue: 'test-test' + register: dms-queue +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DmsQueueInfoModule(OTCModule): + argument_spec = dict( + queue=dict(required=False) + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + + data = [] + + if self.params['queue']: + queue = self.conn.dms.find_queue( + name_or_id=self.params['queue'] + ) + if queue: + dt = queue.to_dict() + dt.pop('location') + data.append(dt) + else: + self.exit( + changed=False, + failed=True, + message=('No Queue found with ID or Name: %s' % + self.params['queue']) + ) + else: + for raw in self.conn.dms.queues(): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + dms_queues=data + ) + + +def main(): + module = DmsQueueInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dms/tasks/main.yaml b/tests/integration/targets/dms/tasks/main.yaml new file mode 100644 index 00000000..108d99d5 --- /dev/null +++ b/tests/integration/targets/dms/tasks/main.yaml @@ -0,0 +1,147 @@ +- name: Doing Integration test + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set facts + set_fact: + queue_name: "{{ ( 'a' + prefix + '-queue' ) }}" + group_name: "{{ ( 'group_test' ) }}" + instance_name: "{{ ( 'a' + prefix + '-instance' ) }}" + network_name: "{{ ( prefix + '-dmsnetwork' )}}" + subnet_name: "{{ ( prefix + '-dmssubnet' )}}" + router_name: "{{ ( prefix + '-dmsrouter' )}}" + sg_name: "{{ ( prefix + '-dmssg' )}}" + + - name: DMS Queue + opentelekomcloud.cloud.dms_queue: + name: '{{ queue_name }}' + state: present + register: dms_queue + check_mode: false + + - name: DMS Queue Group + opentelekomcloud.cloud.dms_queue_group: + queue_name: '{{ queue_name }}' + group_name: '{{ group_name }}' + state: present + register: dms_queue_group + check_mode: false + + - name: List DMS Queues + opentelekomcloud.cloud.dms_queue_info: + queue: '{{ queue_name }}' + register: zone_net + + - name: List DMS Queue Group Info + opentelekomcloud.cloud.dms_queue_group_info: + queue: '{{ queue_name }}' + include_deadletter: true + register: zone_net + + - name: Send Messages + opentelekomcloud.cloud.dms_message: + queue: '{{ queue_name }}' + messages: + - body: 'test1' + attributes: + attribute1: 'value1' + attribute2: 'value2' + - body: 'test2' + attributes: + attribute1: 'value3' + attribute2: 'value4' + task: send + register: dms_mess_send + + - name: List DMS Queue Group Info + opentelekomcloud.cloud.dms_queue_group_info: + queue: '{{ queue_name }}' + include_deadletter: true + register: zone_net + + - name: Consume Messages + opentelekomcloud.cloud.dms_message: + queue: '{{ queue_name }}' + group: '{{ group_name }}' + task: consume + ack: false + register: dms_mess_cons + + - name: List DMS Instance Info + opentelekomcloud.cloud.dms_instance_info: + register: dms_instance + + - name: Create network for DMS Instance + openstack.cloud.network: + name: "{{ network_name }}" + state: present + register: dms_net + + - name: Create subnet for DMS Instance + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: present + network_name: "{{ dms_net.network.name }}" + cidr: "192.168.110.0/24" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: dms_subnet + + - name: Create Router for DMS Instance + openstack.cloud.router: + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: false + interfaces: + - net: "{{ dms_net.network.name }}" + subnet: "{{ dms_subnet.subnet.name }}" + register: dms_router + + - name: Create Security Group for DMS Instance + openstack.cloud.security_group: + name: "{{ sg_name }}" + register: dms_sg + + always: + - block: + - name: DMS Delete Queue Group + opentelekomcloud.cloud.dms_queue_group: + queue_name: '{{ queue_name }}' + group_name: '{{ group_name }}' + state: absent + register: dms_queue_group_rm + check_mode: false + + - name: Delete Queue + opentelekomcloud.cloud.dms_queue: + name: '{{ queue_name }}' + state: absent + register: dms_queue_rm + check_mode: false + + - name: Delete Security Group + openstack.cloud.security_group: + name: "{{ sg_name }}" + state: absent + register: dms_sg + + - name: Drop existing Router + openstack.cloud.router: + name: "{{ router_name }}" + state: absent + register: dns_rout_dr + + - name: Drop existing subnet + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: absent + register: dns_subnet_dr + + - name: Drop existing network + openstack.cloud.network: + name: "{{ network_name }}" + state: absent + register: dns_net_dr + ignore_errors: yes diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 8d6ad9be..867ddcfe 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -20,6 +20,15 @@ plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_topic_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_message.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_group.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_group_info.py validate-modules:missing-gplv3-license plugins/modules/dns_floating_ip.py validate-modules:missing-gplv3-license plugins/modules/dns_recordset.py validate-modules:missing-gplv3-license plugins/modules/dns_zone.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 9b2e21f9..9af90d53 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -20,6 +20,15 @@ plugins/modules/ces_metric_data_info.py validate-modules:missing-gplv3-license plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license +plugins/modules/dms_instance_topic_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_message.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_group.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_info.py validate-modules:missing-gplv3-license +plugins/modules/dms_queue_group_info.py validate-modules:missing-gplv3-license plugins/modules/dns_floating_ip.py validate-modules:missing-gplv3-license plugins/modules/dns_recordset.py validate-modules:missing-gplv3-license plugins/modules/dns_zone.py validate-modules:missing-gplv3-license From 7f0578ca2839b3a7b2a0f34676f930fab9660bc9 Mon Sep 17 00:00:00 2001 From: Tino Schr Date: Fri, 11 Jun 2021 12:47:59 +0200 Subject: [PATCH 06/65] cce fixes for network and data vols (#115) cce fixes for network and data vols Reviewed-by: kucerakk Reviewed-by: Tino Schr Reviewed-by: Rodion Gyrbu Reviewed-by: None --- plugins/modules/cce_cluster_node.py | 24 ++- plugins/modules/cce_node_pool.py | 10 +- .../targets/cce_cluster_node/aliases | 1 + .../targets/cce_cluster_node/tasks/main.yaml | 162 ++++++++++++++++++ .../targets/cce_lifecycle/tasks/main.yaml | 28 ++- 5 files changed, 206 insertions(+), 19 deletions(-) create mode 100644 tests/integration/targets/cce_cluster_node/aliases create mode 100644 tests/integration/targets/cce_cluster_node/tasks/main.yaml diff --git a/plugins/modules/cce_cluster_node.py b/plugins/modules/cce_cluster_node.py index c23acfd9..8dd3dd02 100644 --- a/plugins/modules/cce_cluster_node.py +++ b/plugins/modules/cce_cluster_node.py @@ -31,6 +31,7 @@ description: - CCE cluster name or id which hosts the cce cluster node type: str + required: true count: description: - Cluster node count which will be created. @@ -61,7 +62,7 @@ k8s_tags: description: Dictionary of Kubernetes tags. type: dict - keypair: + ssh_key: description: Name of the public key to login type: str labels: @@ -78,6 +79,10 @@ - Name of the CCE cluster node. required: true type: str + network: + description: + - Network ID of the CCE cluster node. + type: str node_image_id: description: ID of a custom image used in a baremetall scenario. type: str @@ -274,8 +279,12 @@ cluster: "{{ cluster_name_or_id }}" count: 1 data_volumes: - - SATA: 150 - - SAS: 100 + - volumetype: 'SATA' + size: 100 + encrypted: False + cmk_id: '' + - volumetype: 'SAS' + size: 120 flavor: 's2.large.2' k8s_tags: testtag: 'value' @@ -284,6 +293,7 @@ mein: 'label' max_pods: 16 name: "{{ cce_node_name }}" + network: '25d24fc8-d019-4a34-9fff-0a09fde6a123' os: 'CentOS 7.7' root_volume_size: 40 root_volume_type: SATA @@ -309,7 +319,7 @@ class CceClusterNodeModule(OTCModule): argument_spec = dict( annotations=dict(required=False, type='dict'), availability_zone=dict(required=False), - cluster=dict(required=False), + cluster=dict(required=True), count=dict(required=False, type='int', default=1), data_volumes=dict( required=False, @@ -322,11 +332,12 @@ class CceClusterNodeModule(OTCModule): flavor=dict(required=False), floating_ip=dict(required=False), k8s_tags=dict(required=False, type='dict'), - keypair=dict(required=False), + ssh_key=dict(required=False), labels=dict(required=False, type='dict'), lvm_config=dict(required=False), max_pods=dict(required=False, type='int'), name=dict(required=True), + network=dict(required=False), node_image_id=dict(required=False), offload_node=dict(required=False, type='bool'), os=dict(required=False), @@ -345,7 +356,8 @@ class CceClusterNodeModule(OTCModule): module_kwargs = dict( required_if=[ ('state', 'present', - ['availability_zone', 'cluster', 'flavor', 'keypair', 'data_volumes']), + ['availability_zone', 'cluster', 'flavor', 'ssh_key', + 'data_volumes', 'network']), ('state', 'absent', ['cluster', 'name']), ], supports_check_mode=True diff --git a/plugins/modules/cce_node_pool.py b/plugins/modules/cce_node_pool.py index 6716c92f..2b5b06fe 100644 --- a/plugins/modules/cce_node_pool.py +++ b/plugins/modules/cce_node_pool.py @@ -67,7 +67,7 @@ - Name of the CCE Node Pool required: true type: str - network_id: + network: description: - ID of the network to which the CCE node pool belongs to. type: str @@ -248,7 +248,7 @@ flavor: s2.large.2 os: 'CentOS 7.7' name: my-nodepool - network_id: '25d24fc8-d019-4a34-9fff-0a09fde6a123' + network: '25d24fc8-d019-4a34-9fff-0a09fde6a123' ssh_key: 'ssh-pub' state: present register: pool @@ -274,7 +274,7 @@ min_node_count: 1 max_node_count: 3 name: test-ansible2 - network_id: '25d24fc8-d019-4a34-9fff-0a09fde6a123' + network: '25d24fc8-d019-4a34-9fff-0a09fde6a123' priority: 2 os: 'CentOS 7.7' scale_down_cooldown_time: 5 @@ -329,7 +329,7 @@ class CceNodePoolModule(OTCModule): max_node_count=dict(required=False, type='int'), max_pods=dict(required=False, type='int'), name=dict(required=True), - network_id=dict(required=False), + network=dict(required=False), node_image_id=dict(required=False), os=dict(required=False), postinstall_script=dict(required=False), @@ -356,7 +356,7 @@ class CceNodePoolModule(OTCModule): 'flavor', 'os', 'name', - 'network_id', + 'network', 'ssh_key' ]), ('state', 'absent', ['cluster', 'name']), diff --git a/tests/integration/targets/cce_cluster_node/aliases b/tests/integration/targets/cce_cluster_node/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/cce_cluster_node/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/cce_cluster_node/tasks/main.yaml b/tests/integration/targets/cce_cluster_node/tasks/main.yaml new file mode 100644 index 00000000..6ad23c3a --- /dev/null +++ b/tests/integration/targets/cce_cluster_node/tasks/main.yaml @@ -0,0 +1,162 @@ +--- +- module_defaults: + opentelekomcloud.cloud.cce_cluster: + cloud: "{{ test_cloud }}" + openstack.cloud.network: + cloud: "{{ test_cloud }}" + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + openstack.cloud.router: + cloud: "{{ test_cloud }}" + openstack.cloud.keypair: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.cce_cluster_node: + cloud: "{{ test_cloud }}" + vars: + prefix: scenario00a- + block: + - name: Set random prefix + set_fact: + prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" + + - name: Set initial facts + set_fact: + keypair_name: "{{ ( prefix + '-key') }}" + network_name: "{{ ( prefix + '-test-network') }}" + subnet_name: "{{ ( prefix + '-test-subnet') }}" + router_name: "{{ ( prefix + '-test-router') }}" + cce_cluster_name: "{{ ( 'z-' + prefix + '-acc-test') }}" + cce_flavor: "cce.s1.small" + container_network_mode: "overlay_l2" + cce_node_name: "{{ ( 'z-' + prefix + '-node') }}" + + - name: Create network for test + openstack.cloud.network: + name: "{{ network_name }}" + state: present + register: test_network + + - name: Create subnet for test + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: present + network_name: "{{ test_network.network.name }}" + cidr: "192.168.0.0/24" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: test_subnet + + - name: Create router for test + openstack.cloud.router: + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: true + interfaces: + - net: "{{ test_network.network.name }}" + subnet: "{{ test_subnet.subnet.name }}" + register: test_router + + - name: Create CCE Cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + flavor: "{{ cce_flavor }}" + description: "Ansible collection test" + router: "{{ router_name }}" + network: "{{ network_name }}" + container_network_mode: "{{ container_network_mode }}" + wait: true + register: cluster + + - name: assert result + assert: + that: + - cluster is success + - cluster is changed + + - name: Keypair creation + openstack.cloud.keypair: + state: present + name: "{{ keypair_name }}" + register: ssh_key + + - name: assert result + assert: + that: + - ssh_key is success + - ssh_key is changed + + - name: Create CCE Cluster Node + opentelekomcloud.cloud.cce_cluster_node: + annotations: + annotation1: 'abc' + availability_zone: 'eu-de-01' + cluster: "{{ cce_cluster_name }}" + count: 1 + data_volumes: + - volumetype: 'SATA' + size: 100 + encrypted: false + cmk_id: '' + - volumetype: 'SAS' + size: 120 + flavor: 's2.large.2' + k8s_tags: + testtag: 'value' + ssh_key: "{{ keypair_name }}" + labels: + mein: 'label' + max_pods: 16 + name: "{{ cce_node_name }}" + network: "{{ test_network.network.name }}" + os: 'CentOS 7.7' + root_volume_size: 40 + root_volume_type: SATA + tags: + - key: 'key1' + value: 'value1' + - key: 'key2' + value: 'value2' + wait: true + state: present + register: node + + - name: assert result + assert: + that: + - node is success + - node is changed + + always: + - block: + # Cleanup + - name: Drop CCE cluster node + opentelekomcloud.cloud.cce_cluster_node: + cluster: "{{ cce_cluster_name }}" + name: "{{ cce_node_name }}" + state: "absent" + + - name: Drop keypair + openstack.cloud.keypair: + name: "{{ keypair_name }}" + state: "absent" + + - name: Drop cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + state: "absent" + + - name: Drop router + openstack.cloud.router: + name: "{{ router_name }}" + state: absent + + - name: Drop subnet + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: absent + + - name: Drop network + openstack.cloud.network: + name: "{{ network_name }}" + state: absent + ignore_errors: yes diff --git a/tests/integration/targets/cce_lifecycle/tasks/main.yaml b/tests/integration/targets/cce_lifecycle/tasks/main.yaml index bbb25465..49ec4074 100644 --- a/tests/integration/targets/cce_lifecycle/tasks/main.yaml +++ b/tests/integration/targets/cce_lifecycle/tasks/main.yaml @@ -96,24 +96,29 @@ - keypair is success - keypair.key.private_key is defined - - name: Create CCE cluster node 1, reserved for cluster deletion + - name: Create CCE Cluster Node opentelekomcloud.cloud.cce_cluster_node: annotations: - annotation1: 'annoval1' - availability_zone: 'eu-de-02' + annotation1: 'abc' + availability_zone: 'eu-de-01' cluster: "{{ cce_cluster_name }}" count: 1 data_volumes: - - SATA: 150 - - SAS: 100 + - volumetype: 'SATA' + size: 100 + encrypted: false + cmk_id: '' + - volumetype: 'SAS' + size: 120 flavor: "{{ cce_node_flavor }}" k8s_tags: - testk8stag: 'value1' - keypair: "{{ keypair_name }}" + testtag: 'value' + ssh_key: "{{ keypair_name }}" labels: - my: 'label' + mein: 'label' max_pods: 16 name: "{{ cce_node_name }}" + network: "{{ network_name }}" root_volume_size: 40 root_volume_type: SATA tags: @@ -122,8 +127,15 @@ - key: 'key2' value: 'value2' wait: true + state: present register: node + - name: assert result + assert: + that: + - node is success + - node is changed + - name: assert result assert: that: From 10ce9aa71a213f7857f73234b9d53ad201905525 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 11 Jun 2021 14:52:50 +0200 Subject: [PATCH 07/65] Add role metadata (#118) Add role metadata Galaxy starts compaining about absence of the role data. Add it to keep it happy. Reviewed-by: None Reviewed-by: OpenTelekomCloud Bot --- roles/vpc_peering/README.rst | 42 ++++++++++++++++++++++++++------- roles/vpc_peering/meta/main.yml | 10 ++++++++ 2 files changed, 44 insertions(+), 8 deletions(-) create mode 100644 roles/vpc_peering/meta/main.yml diff --git a/roles/vpc_peering/README.rst b/roles/vpc_peering/README.rst index bed2bc16..c3dab04f 100644 --- a/roles/vpc_peering/README.rst +++ b/roles/vpc_peering/README.rst @@ -1,5 +1,35 @@ +opentelekomcloud.cloud.vpc_peering +================================== + Configure VPC Peering between 2 routers. +Requirements +------------ + +Python packages: + - openstacksdk + - otcextensions + +Ansible collections: + - openstack.cloud + - opentelekomcloud.cloud + +Role Variables +-------------- + +cloud_a: Connection to cloud A +local_router: Name or ID of the router on side A +local_project: Name or ID of the project of the side A +local_cidr: CIDR for the route +cloud_b: Connection to the cloud B +remote_router: Name or ID of the router on side B +remote_cidr: CIDR for the route + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + Role is designed to work best looping over the structure of peering definitions: .. code-block:: yaml @@ -27,11 +57,7 @@ definitions: loop_control: loop_var: vpcp -**Role Variables** -cloud_a: Connection to cloud A -local_router: Name or ID of the router on side A -local_project: Name or ID of the project of the side A -local_cidr: CIDR for the route -cloud_b: Connection to the cloud B -remote_router: Name or ID of the router on side B -remote_cidr: CIDR for the route +License +------- + +Apache-2.0 diff --git a/roles/vpc_peering/meta/main.yml b/roles/vpc_peering/meta/main.yml new file mode 100644 index 00000000..08b7db3c --- /dev/null +++ b/roles/vpc_peering/meta/main.yml @@ -0,0 +1,10 @@ +galaxy_info: + author: Artem Goncharov + description: Manage VPC Peering between 2 VPCs in the Open Telekom Cloud + company: Open Telekom Cloud + + license: Apache-2.0 + + min_ansible_version: 2.10 + +dependencies: [] From f681aaeb8f6a64e0658bb5e996404db2f778a590 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Mon, 14 Jun 2021 17:01:40 +0200 Subject: [PATCH 08/65] move rst to md in role readme (#119) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit move rst to md in role readme Galaxy still complains about role readme file. Try moving it to md. Reviewed-by: None Reviewed-by: OpenTelekomCloud Bot --- roles/vpc_peering/{README.rst => README.md} | 20 +++++++------------- roles/vpc_peering/tasks/destroy.yaml | 1 + 2 files changed, 8 insertions(+), 13 deletions(-) rename roles/vpc_peering/{README.rst => README.md} (85%) create mode 100644 roles/vpc_peering/tasks/destroy.yaml diff --git a/roles/vpc_peering/README.rst b/roles/vpc_peering/README.md similarity index 85% rename from roles/vpc_peering/README.rst rename to roles/vpc_peering/README.md index c3dab04f..b818d01e 100644 --- a/roles/vpc_peering/README.rst +++ b/roles/vpc_peering/README.md @@ -1,10 +1,8 @@ -opentelekomcloud.cloud.vpc_peering -================================== +# opentelekomcloud.cloud.vpc_peering Configure VPC Peering between 2 routers. -Requirements ------------- +## Requirements Python packages: - openstacksdk @@ -14,8 +12,7 @@ Ansible collections: - openstack.cloud - opentelekomcloud.cloud -Role Variables --------------- +## Role Variables cloud_a: Connection to cloud A local_router: Name or ID of the router on side A @@ -27,13 +24,12 @@ remote_cidr: CIDR for the route A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. -Example Playbook ----------------- +## Example Playbook Role is designed to work best looping over the structure of peering definitions: -.. code-block:: yaml + # Inventory cloud_peerings: - cloud: "cloud_a" name: "peering_cloud_a_cloud_b" @@ -45,8 +41,7 @@ definitions: remote_project: "project_b" remote_cidr: "192.168.2.0/24" -.. code-block:: yaml - + # playbook - hosts: localhost name: "Manage cloud VPC peerings" tasks: @@ -57,7 +52,6 @@ definitions: loop_control: loop_var: vpcp -License -------- +## License Apache-2.0 diff --git a/roles/vpc_peering/tasks/destroy.yaml b/roles/vpc_peering/tasks/destroy.yaml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/vpc_peering/tasks/destroy.yaml @@ -0,0 +1 @@ +--- From 25c0bb3a2872bb390eb4ad37113d92b42b914284 Mon Sep 17 00:00:00 2001 From: SebastianGode <70581801+SebastianGode@users.noreply.github.com> Date: Tue, 29 Jun 2021 12:52:06 +0200 Subject: [PATCH 09/65] Tox Script + Sphinx for ansible documentation (#116) Tox Script + Sphinx for ansible documentation Introduce documentation building: introduce doc/source/... with the initial skeleton content tox -e docs can be used locally to see how documentation will look like general ansible-collection-docs job is doing the real rendering return galaxy.yml from from being template to allow using antsibull (this is anyway required to allow installation from git directly) Depends-on: opentelekomcloud-infra/otc-zuul-jobs#68 Reviewed-by: None Reviewed-by: None Reviewed-by: Artem Goncharov --- .gitignore | 2 + doc/requirements.txt | 1 + doc/source/antiddos.rst | 9 ++++ doc/source/as.rst | 15 ++++++ doc/source/cce.rst | 13 +++++ doc/source/ces.rst | 13 +++++ doc/source/conf.py | 54 +++++++++++++++++++ doc/source/deh.rst | 10 ++++ doc/source/dns.rst | 10 ++++ doc/source/ecs.rst | 7 +++ doc/source/elb.rst | 19 +++++++ doc/source/index.rst | 32 +++++++++++ doc/source/misc.rst | 10 ++++ doc/source/nat.rst | 13 +++++ doc/source/rds.rst | 12 +++++ doc/source/volume.rst | 10 ++++ doc/source/vpc.rst | 8 +++ doc/source/vpc_peering.rst | 13 +++++ doc/source/waf.rst | 10 ++++ docs/guidelines.rst | 0 galaxy.yml.in => galaxy.yml | 2 +- plugins/modules/dns_zones.py | 1 - test-requirements.txt | 1 - tests/integration/targets/dns/tasks/main.yaml | 24 ++++----- tox.ini | 38 +++++++++---- 25 files changed, 302 insertions(+), 25 deletions(-) create mode 100644 doc/requirements.txt create mode 100644 doc/source/antiddos.rst create mode 100644 doc/source/as.rst create mode 100644 doc/source/cce.rst create mode 100644 doc/source/ces.rst create mode 100644 doc/source/conf.py create mode 100644 doc/source/deh.rst create mode 100644 doc/source/dns.rst create mode 100644 doc/source/ecs.rst create mode 100644 doc/source/elb.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/misc.rst create mode 100644 doc/source/nat.rst create mode 100644 doc/source/rds.rst create mode 100644 doc/source/volume.rst create mode 100644 doc/source/vpc.rst create mode 100644 doc/source/vpc_peering.rst create mode 100644 doc/source/waf.rst delete mode 100644 docs/guidelines.rst rename galaxy.yml.in => galaxy.yml (98%) delete mode 120000 plugins/modules/dns_zones.py diff --git a/.gitignore b/.gitignore index ad56d0dd..c3148aaf 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ importer_result.json **.swp *.tar.gz +doc/build +tmp diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 00000000..1439b2c2 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1 @@ +otcdocstheme # Ansible-2.0 diff --git a/doc/source/antiddos.rst b/doc/source/antiddos.rst new file mode 100644 index 00000000..bee88425 --- /dev/null +++ b/doc/source/antiddos.rst @@ -0,0 +1,9 @@ +Anti-DDoS Modules +================= + +.. toctree:: + :maxdepth: 1 + + anti_ddos_fip_statuses_info + anti_ddos_optional_policies_info + diff --git a/doc/source/as.rst b/doc/source/as.rst new file mode 100644 index 00000000..dedf3ea5 --- /dev/null +++ b/doc/source/as.rst @@ -0,0 +1,15 @@ +Auto Scaling Modules +==================== + +.. toctree:: + :maxdepth: 1 + + as_config + as_config_info + as_group + as_group_info + as_instance_info + as_policy + as_policy_info + as_quota + diff --git a/doc/source/cce.rst b/doc/source/cce.rst new file mode 100644 index 00000000..be5aefc6 --- /dev/null +++ b/doc/source/cce.rst @@ -0,0 +1,13 @@ +CCE Modules +=========== + +.. toctree:: + :maxdepth: 1 + + cce_cluster + cce_cluster_cert_info + cce_cluster_info + cce_cluster_node + cce_cluster_node_info + cce_node_pool + cce_node_pool_info diff --git a/doc/source/ces.rst b/doc/source/ces.rst new file mode 100644 index 00000000..a1bc01c2 --- /dev/null +++ b/doc/source/ces.rst @@ -0,0 +1,13 @@ +Cloud Eye Service Modules +========================= + +.. toctree:: + :maxdepth: 1 + + ces_alarms + ces_alarms_info + ces_event_data_info + ces_metric_data_info + ces_metrics_info + ces_quotas_info + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 00000000..8e4ee6cd --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,54 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +# -- Project information ----------------------------------------------------- + +project = 'Open Telekom Cloud Ansible Modules Documentation' +copyright = '2021, Open Telekom Cloud' +author = 'Open Telekom Cloud' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'otcdocstheme', + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx_antsibull_ext' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'otcdocs' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". diff --git a/doc/source/deh.rst b/doc/source/deh.rst new file mode 100644 index 00000000..21920c6e --- /dev/null +++ b/doc/source/deh.rst @@ -0,0 +1,10 @@ +Dedicated Host Service Modules +============================== + +.. toctree:: + :maxdepth: 1 + + deh_host + deh_host_info + deh_host_type_info + deh_server_info diff --git a/doc/source/dns.rst b/doc/source/dns.rst new file mode 100644 index 00000000..d19a364a --- /dev/null +++ b/doc/source/dns.rst @@ -0,0 +1,10 @@ +Domain Name Service Modules +=========================== + +.. toctree:: + :maxdepth: 1 + + dns_floating_ip + dns_recordset + dns_zone + diff --git a/doc/source/ecs.rst b/doc/source/ecs.rst new file mode 100644 index 00000000..ab794b08 --- /dev/null +++ b/doc/source/ecs.rst @@ -0,0 +1,7 @@ +Compute (ECS) Modules +===================== + +.. toctree:: + :maxdepth: 1 + + server_group_info diff --git a/doc/source/elb.rst b/doc/source/elb.rst new file mode 100644 index 00000000..86202fc9 --- /dev/null +++ b/doc/source/elb.rst @@ -0,0 +1,19 @@ +Elastic Load Balancing Modules +============================== + +.. toctree:: + :maxdepth: 1 + + lb_certificate + lb_certificate_info + lb_healthmonitor + lb_healthmonitor_info + lb_listener + lb_listener_info + lb_member + lb_member_info + lb_pool + lb_pool_info + loadbalancer + loadbalancer_info + diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 00000000..fe365d25 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,32 @@ +Opentelekomcloud.Cloud +====================== + +Collection version 0.9.0 + + +Plugin Index +------------ + +These are the plugins in the opentelekomcloud.cloud collection + + +.. toctree:: + :maxdepth: 1 + + antiddos + as + cce + ces + deh + dns + ecs + elb + misc + nat + rds + volume + vpc + vpc_peering + waf + + diff --git a/doc/source/misc.rst b/doc/source/misc.rst new file mode 100644 index 00000000..fcc4326a --- /dev/null +++ b/doc/source/misc.rst @@ -0,0 +1,10 @@ +Other Modules +============= + +.. toctree:: + :maxdepth: 1 + + availability_zone_info + object_info + tag + diff --git a/doc/source/nat.rst b/doc/source/nat.rst new file mode 100644 index 00000000..1800bc7f --- /dev/null +++ b/doc/source/nat.rst @@ -0,0 +1,13 @@ +NAT Gateway Modules +=================== + +.. toctree:: + :maxdepth: 1 + + nat_dnat_rule + nat_dnat_rule_info + nat_gateway + nat_gateway_info + nat_snat_rule + nat_snat_rule_info + diff --git a/doc/source/rds.rst b/doc/source/rds.rst new file mode 100644 index 00000000..9380f60e --- /dev/null +++ b/doc/source/rds.rst @@ -0,0 +1,12 @@ +Relational Database Service Modules +=================================== + +.. toctree:: + :maxdepth: 1 + + rds_backup + rds_backup_info + rds_datastore_info + rds_flavor_info + rds_instance + rds_instance_info diff --git a/doc/source/volume.rst b/doc/source/volume.rst new file mode 100644 index 00000000..7ea6a94b --- /dev/null +++ b/doc/source/volume.rst @@ -0,0 +1,10 @@ +Volume (EVS) Modules +==================== + +.. toctree:: + :maxdepth: 1 + + volume_backup + volume_backup_info + volume_snapshot_info + diff --git a/doc/source/vpc.rst b/doc/source/vpc.rst new file mode 100644 index 00000000..1ea0a4f8 --- /dev/null +++ b/doc/source/vpc.rst @@ -0,0 +1,8 @@ +Networking (VPC) Modules +======================== + +.. toctree:: + :maxdepth: 1 + + floating_ip + security_group_info diff --git a/doc/source/vpc_peering.rst b/doc/source/vpc_peering.rst new file mode 100644 index 00000000..3e0e47ce --- /dev/null +++ b/doc/source/vpc_peering.rst @@ -0,0 +1,13 @@ +VPC Peering Modules +=================== + +.. toctree:: + :maxdepth: 1 + + vpc_peering + vpc_peering_info + vpc_peering_mode + vpc_route + vpc_route_info + vpn_service_info + diff --git a/doc/source/waf.rst b/doc/source/waf.rst new file mode 100644 index 00000000..e3912121 --- /dev/null +++ b/doc/source/waf.rst @@ -0,0 +1,10 @@ +WAF Modules +=========== + +.. toctree:: + :maxdepth: 1 + + waf_certificate + waf_certificate_info + waf_domain + waf_domain_info diff --git a/docs/guidelines.rst b/docs/guidelines.rst deleted file mode 100644 index e69de29b..00000000 diff --git a/galaxy.yml.in b/galaxy.yml similarity index 98% rename from galaxy.yml.in rename to galaxy.yml index a58e3cf7..3ec632ae 100644 --- a/galaxy.yml.in +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.0.1 +version: 0.9.0 readme: README.md authors: - Artem Goncharov diff --git a/plugins/modules/dns_zones.py b/plugins/modules/dns_zones.py deleted file mode 120000 index f4e4bb7c..00000000 --- a/plugins/modules/dns_zones.py +++ /dev/null @@ -1 +0,0 @@ -../modules/dns_zone.py \ No newline at end of file diff --git a/test-requirements.txt b/test-requirements.txt index af84bba4..f6c4ac5a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,5 +7,4 @@ voluptuous yamllint rstcheck ruamel.yaml -tox otcextensions>=0.10.0 diff --git a/tests/integration/targets/dns/tasks/main.yaml b/tests/integration/targets/dns/tasks/main.yaml index 7f09672d..0f9597e0 100644 --- a/tests/integration/targets/dns/tasks/main.yaml +++ b/tests/integration/targets/dns/tasks/main.yaml @@ -93,7 +93,7 @@ that: - dns_fl_ch is success - dns_fl_ch is changed - + - name: Updating a dns_floating_ip entry opentelekomcloud.cloud.dns_floating_ip: floating_ip: "{{ fl_ip }}" @@ -113,7 +113,7 @@ - dns_fl.ptr.description is defined - name: Creating a public DNS Zone - check mode - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: present check_mode: true @@ -126,7 +126,7 @@ - dns_zo_ch is changed - name: Creating a public DNS Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: present register: dns_zo @@ -142,7 +142,7 @@ - dns_zo.zone is defined - name: Updating a public DNS Zone - check mode - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: present description: "{{ description }}" @@ -156,7 +156,7 @@ - dns_zo_ch is changed - name: Updating a public DNS Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: present description: "{{ description }}" @@ -173,7 +173,7 @@ - dns_zo.zone.description is defined - name: Creating a DNS private Zone - check mode - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_private_name }}" router: "{{ router_name }}" zone_type: "private" @@ -188,7 +188,7 @@ - dns_zo_pr_ch is changed - name: Creating a DNS private Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_private_name }}" router: "{{ router_name }}" zone_type: "private" @@ -206,7 +206,7 @@ - dns_zo_pr.zone is defined - name: Updating a private DNS Zone - check mode - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_private_name }}" state: present description: "{{ description }}" @@ -220,7 +220,7 @@ - dns_zo_pr_ch is changed - name: Updating a private DNS Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_private_name }}" state: present description: "{{ description }}" @@ -332,13 +332,13 @@ register: dns_rs_dr - name: Drop DNS public Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: absent register: dns_zo_pu_dr - name: Drop DNS private Zone - opentelekomcloud.cloud.dns_zones: + opentelekomcloud.cloud.dns_zone: name: "{{ zone_private_name }}" zone_type: "private" state: absent @@ -368,4 +368,4 @@ name: "{{ network_name }}" state: absent register: dns_net_dr - ignore_errors: yes \ No newline at end of file + ignore_errors: yes diff --git a/tox.ini b/tox.ini index 6d1983a6..516e072a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,24 +1,23 @@ [tox] -minversion = 3.1 +minversion = 3.6 envlist = pep8 skipsdist = True ignore_basepython_conflict = True [testenv] +usedevelop = True skip_install = True basepython = python3 +install_command = python -m pip install {opts} {packages} --upgrade pip passenv = OS_* pip: PIP_INSTALL setenv = - VIRTUAL_ENV={envdir} + # VIRTUAL_ENV={envdir} LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C - OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} - OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} - OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} - pip: PIP_INSTALL={env:PIP_INSTALL:true} + # pip: PIP_INSTALL={env:PIP_INSTALL:true} deps = -r{toxinidir}/test-requirements.txt pip: {toxinidir} @@ -26,6 +25,28 @@ deps = commands = stestr run {posargs} stestr slowest +[testenv:docs] +deps = + -r{toxinidir}/doc/requirements.txt + # requirements below are only required for local execution + ansible-base # ansible is required by antsibull + antsibull>=0.33.0 # antsibull in zuul is installed by the job + sphinx>=4.0.0 # Sphinx in zuul is installed by the job +setenv = + ANSIBLE_COLLECTIONS_PATH={toxinidir}/../../../ +allowlist_externals = + mkdir + cp +commands = + # WARNING OF A DIRTY HACK + mkdir -m 700 -p tmp + antsibull-docs collection --use-current --squash-hierarchy --dest-dir tmp opentelekomcloud.cloud + # copy static local content on top of generated + cp -av doc/source/ tmp + # copy resulting content back to Sphinx location + #cp -av tmp/ doc/source + sphinx-build -W -d doc/build/doctrees --keep-going -b html tmp doc/build/html + [testenv:pep8] commands = flake8 @@ -41,7 +62,6 @@ deps = ansible-base commands = - python {toxinidir}/tools/build.py ansible --version ansible-galaxy collection build --force {toxinidir} --output-path {toxinidir}/build_artifact @@ -64,13 +84,11 @@ deps = [testenv:sanity] passenv = * commands = - python {toxinidir}/tools/build.py {toxinidir}/tests/utils/sanity.sh opentelekomcloud cloud [testenv:units] passenv = * commands = - python {toxinidir}/tools/build.py {toxinidir}/tests/utils/units.sh opentelekomcloud cloud [testenv:functional] @@ -91,7 +109,7 @@ commands = {posargs} # modules unchanged and then clean them in subsequent patches. ignore = W503,H4,E501,E402,H301 show-source = True -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible_collections +exclude=.venv,.git,.tox,dist,doc,tests/output,*lib/python*,*egg,build,ansible_collections [testenv:ansible] # Need to pass some env vars for the Ansible playbooks From 1c745b50532d34960ea8030a91056d7a9997bf1d Mon Sep 17 00:00:00 2001 From: SebastianGode <70581801+SebastianGode@users.noreply.github.com> Date: Tue, 13 Jul 2021 10:39:41 +0200 Subject: [PATCH 10/65] antsibull-changelog - config&directories (#124) antsibull-changelog - config&directories I added changelog directory with the config file. And I added fragments subdirectory. Any correctly created yaml file in the fragments directory will be picked up by antsibull-changelog generate and will be added to changelog.yaml in changelog folder and CHANGELOG.rst in main directory (can be adjusted) Reviewed-by: None Reviewed-by: Artem Goncharov --- changelogs/config.yaml | 29 +++++++++++++++++++++++++++++ changelogs/fragments/.keep | 0 changelogs/fragments/initialize.yml | 2 ++ 3 files changed, 31 insertions(+) create mode 100644 changelogs/config.yaml create mode 100644 changelogs/fragments/.keep create mode 100644 changelogs/fragments/initialize.yml diff --git a/changelogs/config.yaml b/changelogs/config.yaml new file mode 100644 index 00000000..e9f653d4 --- /dev/null +++ b/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: opentelekomcloud.cloud +trivial_section_name: trivial diff --git a/changelogs/fragments/.keep b/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b diff --git a/changelogs/fragments/initialize.yml b/changelogs/fragments/initialize.yml new file mode 100644 index 00000000..6a725045 --- /dev/null +++ b/changelogs/fragments/initialize.yml @@ -0,0 +1,2 @@ +major_changes: +- initializing changelog handling From f63591d6f7c713303d70d5ae0254d3d304403eea Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Thu, 29 Jul 2021 10:29:33 +0300 Subject: [PATCH 11/65] Dds datastore info (#126) Dds datastore info Reviewed-by: Anton Sidelnikov Reviewed-by: None --- plugins/modules/dds_datastore_info.py | 83 +++++++++++++++++++ .../dds_datastore_info/tasks/main.yaml | 16 ++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 4 files changed, 101 insertions(+) create mode 100644 plugins/modules/dds_datastore_info.py create mode 100644 tests/integration/targets/dds_datastore_info/tasks/main.yaml diff --git a/plugins/modules/dds_datastore_info.py b/plugins/modules/dds_datastore_info.py new file mode 100644 index 00000000..f65c19a1 --- /dev/null +++ b/plugins/modules/dds_datastore_info.py @@ -0,0 +1,83 @@ +#!/usr/bin/python +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions, +# limitations under the License. + +DOCUMENTATION = ''' +module: dds_datastore_info +short_description: Obtain database version information about a specified type of a DB instance. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Get datastore info +options: + datastore_name: + description: + - Specifies the database type. DDS Community Edition is supported. + type: str + required: true +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +datastores: + description: Info about datastore. + returned: On Success + type: complex + contains: + storage_engine: + description: Storage engine. + type: str + type: + description: Datastore type. + type: str + version: + description: Datastore version. + type: str +''' + +EXAMPLES = ''' +# Get info about datastore +- opentelekomcloud.cloud.dds_datastore_info: + datastore_name: "test_ds" + register: result +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DDSDatastoreInfo(OTCModule): + argument_spec = dict( + datastore_name=dict(required=True) + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + datastore_name = self.params['datastore_name'] + + data = [] + for raw in self.conn.dds.datastores(datastore_name): + dt = raw.to_dict() + dt.pop('location') + dt.pop('id') + dt.pop('name') + data.append(dt) + + self.exit( + changed=False, + datastores=data + ) + + +def main(): + module = DDSDatastoreInfo() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dds_datastore_info/tasks/main.yaml b/tests/integration/targets/dds_datastore_info/tasks/main.yaml new file mode 100644 index 00000000..d8d99941 --- /dev/null +++ b/tests/integration/targets/dds_datastore_info/tasks/main.yaml @@ -0,0 +1,16 @@ +--- +- module_defaults: + opentelekomcloud.cloud.dds_datastore_info: + cloud: "{{ test_cloud }}" + block: + - name: Get info about datastore + opentelekomcloud.cloud.dds_datastore_info: + datastore_name: "DDS-Community" + register: result + + - name: assert result + assert: + that: + - result is success + - result is not changed + - result.datastores is defined diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 867ddcfe..eb5852a7 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -36,6 +36,7 @@ plugins/modules/deh_host.py validate-modules:missing-gplv3-license plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license plugins/modules/nat_gateway.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 9af90d53..defa30d4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -36,6 +36,7 @@ plugins/modules/deh_host.py validate-modules:missing-gplv3-license plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/floating_ip.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license From 6331f905a94f955a1ce8df2abd74b3602e99da8c Mon Sep 17 00:00:00 2001 From: Irina Pereiaslavskaia <63649585+irina-pereiaslavskaia@users.noreply.github.com> Date: Thu, 29 Jul 2021 16:43:32 +0300 Subject: [PATCH 12/65] AS Group refactoring (#127) AS Group refactoring Refactoring AS Group module, adding "force_delete" logic, adding new functional tests. Reviewed-by: Polina Gubina Reviewed-by: None Reviewed-by: Anton Sidelnikov --- plugins/modules/as_group.py | 1308 ++++++++++++----- .../targets/as_group/tasks/main.yaml | 197 ++- .../targets/as_instance_info/tasks/main.yaml | 8 +- .../targets/as_policy/tasks/main.yaml | 8 +- .../targets/as_policy_info/tasks/main.yaml | 8 +- 5 files changed, 1181 insertions(+), 348 deletions(-) diff --git a/plugins/modules/as_group.py b/plugins/modules/as_group.py index b05f4695..1fcd0c53 100644 --- a/plugins/modules/as_group.py +++ b/plugins/modules/as_group.py @@ -14,52 +14,65 @@ DOCUMENTATION = ''' --- module: as_group -short_description: Create/Remove AutoScaling group from the OTC +short_description: Create/Update/Remove AutoScaling group from the OTC extends_documentation_fragment: opentelekomcloud.cloud.otc version_added: "0.2.0" -author: "Polina Gubina (@Polina-Gubina)" +author: + - "Polina Gubina (@Polina-Gubina)" + - "Irina Pereiaslavskaia (@irina-pereiaslavskaia)" description: - - Create/Remove AutoScaling group from the OTC. + - Create/Update/Remove AutoScaling group from the OTC. options: - scaling_group_name: + scaling_group: description: - - Name of the AS group. - - Mandatory for creating autoscaling group. - type: str - scaling_group_id: - description: - - ID the AS group. - type: str + - Name or ID of the AS Group. + required: true + type: dict + suboptions: + id: + description: + - Specifies the AS Group ID. + - Mandatory for updating and deleting AS Group. + type: str + name: + description: + - Specifies the AS Group name. + - Mandatory for creating AS Group. + type: str scaling_configuration: description: - The AS configuration ID or name. type: str desire_instance_number: description: - - Specifies the expected number of instances. The default value is the minimum number of instances. + - Specifies the expected number of instances. + - The default value is the minimum number of instances. type: int min_instance_number: description: - - Specifies the minimum number of instances. The default value is 0. - - Default is 0. + - Specifies the minimum number of instances. + - The default value is 0. type: int max_instance_number: description: - - Specifies the maximum number of instances. The default value is 0. - - Default is 0. + - Specifies the maximum number of instances. + - The default value is 0. type: int cool_down_time: description: - - Specifies the cooldown period (in seconds). The value ranges from 0 to 86400 and is 300 by default. - - After a scaling action is triggered, the system starts the cooldown period. During the cooldown period,\ - scaling actions triggered by alarms will be denied. Scheduled, periodic,\ - and manual scaling actions are not affected. - - Default is 300. + - Specifies the cooldown period (in seconds). + - The value ranges from 0 to 86400 and is 300 by default. + - After a scaling action is triggered, the system starts the cooldown \ + period. During the cooldown period, scaling actions triggered by alarms \ + will be denied. Scheduled, periodic, and manual scaling actions are not \ + affected. type: int + default: 300 lb_listener: description: - - Specifies ID or name of a classic load balancer listener. The system supports the binding of up\ - to six load balancer listeners, the IDs of which are separated using a comma (,). + - Specifies ID or name of a classic load balancer listener. The system \ + supports the binding of up to six load balancer listeners, the IDs of \ + which are separated using a comma (,). - Mandatory when 'lbaas_listeners' is not specified. type: str lbaas_listeners: @@ -72,134 +85,167 @@ pool_id: description: - Specifies the backend ECS group ID. - - Mandatory. type: str required: true protocol_port: description: - - Specifies the backend protocol ID, which is the port on which \ - a backend ECS listens for traffic. The port ID ranges from 1 to 65535. - - Mandatory. + - Specifies the backend protocol ID, which is the port on which a \ + backend ECS listens for traffic. The port ID ranges from 1 to 65535. type: int required: true weight: description: - - Specifies the weight, which determines the portion\ - of requests a backend ECS processes when being compared to other \ - backend ECSs added to the same listener. - - Mandatory. + - Specifies the weight, which determines the portion of requests a \ + backend ECS processes when being compared to other backend ECSs \ + added to the same listener. type: int required: true - available_zones: + availability_zones: description: - - Specifies the AZ information. The ECS associated with a scaling action will be created in a specified AZ.\ - If you do not specify an AZ, the system automatically specifies one. + - Specifies the AZ information. The ECS associated with a scaling \ + action will be created in a specified AZ.If you do not specify an AZ, \ + the system automatically specifies one. type: list elements: str networks: description: - - Specifies network information. The system supports up to five subnets. The first subnet transferred\ - serves as the primary NIC of the ECS by default. - - Mandatory for creation of autoscaling group. + - Specifies network information. The system supports up to five subnets.\ + The first subnet transferred serves as the primary NIC of the ECS by \ + default. + - Mandatory for creation of AS group. type: list elements: dict suboptions: id: description: - Specifies the network ID. - - Mandatory. type: str required: true security_groups: description: - - Specifies the security group. If the security group is specified both in the AS configuration and AS group,\ - the security group specified in the AS configuration prevails. - - If the security group is not specified in either of them, the default security group is used. + - A maximum of one security group can be selected. + - Specifies the security group. If the security group is specified both \ + in the AS configuration and AS group, the security group specified in \ + the AS configuration prevails. + - If the security group is not specified in either of them, the default \ + security group is used. type: list elements: dict suboptions: id: description: - Specifies the security group ID. - - Mandatory. type: str required: true router: description: - The router ID or name. - - Mandatory for creating resource. + - Mandatory for creating AS group. type: str health_periodic_audit_method: description: - - Specifies the health check method for instances in the AS group. When load balancing is configured for \ - an AS group, the default value is ELB_AUDIT. Otherwise, the default value is NOVA_AUDIT. - - ELB_AUDIT indicates the ELB health check, which takes effect in an AS group with a listener. - - NOVA_AUDIT indicates the ECS health check, which is the health check method delivered with AS. - choices: ['elb_audit', 'nova_audit'] + - Specifies the health check method for instances in the AS group.\ + When load balancing is configured for an AS group, the default value \ + is ELB_AUDIT. Otherwise, the default value is NOVA_AUDIT. + - ELB_AUDIT indicates the ELB health check, which takes effect in an \ + AS group with a listener. + - NOVA_AUDIT indicates the ECS health check, which is the health check \ + method delivered with AS. + choices: [elb_audit, nova_audit] type: str health_periodic_audit_time: description: - - Specifies the instance health check period. The value can be 1, 5, 15, 60, or 180 in the unit of minutes. + - Specifies the instance health check period. + - The value can be 1, 5, 15, 60, or 180 in the unit of minutes. - If this parameter is not specified, the default value is 5. - If the value is set to 0, health check is performed every 10 seconds. - - Default is 5. type: int + default: 5 health_periodic_audit_grace_period: description: - - Specifies the grace period for instance health check. The unit is second and value range is 0-86400.\ - The default value is 600. The health check grace period starts after an instance is added\ - to an AS group and is enabled.\ - The AS group will start checking the instance status only after the grace period ends. - - This parameter is valid only when the instance health check method of the AS group is ELB_AUDIT. - - Default is 600. + - Specifies the grace period for instance health check. + - The unit is second and value range is 0-86400. + - The default value is 600. + - The health check grace period starts after an instance is added to an \ + AS group and is enabled.The AS group will start checking the instance \ + status only after the grace period ends. + - This parameter is valid only when the instance health check method \ + of the AS group is ELB_AUDIT. type: int + default: 600 instance_terminate_policy: description: - - Specifies the instance removal policy. - - OLD_CONFIG_OLD_INSTANCE (default). The earlier-created instances based on the earlier-created \ - AS configurations are removed first. - - OLD_CONFIG_NEW_INSTANCE. The later-created instances based on the earlier-created\ - AS configurations are removed first. - - OLD_INSTANCE. The earlier-created instances are removed first. - - NEW_INSTANCE. The later-created instances are removed first. - choices: ['old_config_old_instance', 'old_config_new_instance', 'old_instance', 'new_instance'] + - Specifies the instance removal policy. + - OLD_CONFIG_OLD_INSTANCE (default). The earlier-created instances \ + based on the earlier-created AS configurations are removed first. + - OLD_CONFIG_NEW_INSTANCE. The later-created instances based on the \ + earlier-created AS configurations are removed first. + - OLD_INSTANCE. The earlier-created instances are removed first. + - NEW_INSTANCE. The later-created instances are removed first. + choices: [old_config_old_instance, old_config_new_instance, + old_instance, new_instance] type: str + default: 'old_config_old_instance' notifications: description: - - Specifies the notification mode. + - Specifies the notification mode. type: list elements: str delete_publicip: description: - - Specifies whether to delete the EIP bound to the ECS when deleting the ECS. + - Specifies whether to delete the EIP bound to the ECS when \ + deleting the ECS. + - The default value is false. type: bool + default: 'no' delete_volume: description: - - Specifies whether to delete the data disks attached to the ECS when deleting the ECS. + - Specifies whether to delete the data disks attached to the \ + ECS when deleting the ECS. + - The default value is false. type: bool - enterprise_project_id: + default: 'no' + force_delete: description: - - Specifies the enterprise project ID, which is used to specify the enterprise project\ - to which the AS group belongs. - - If the value is 0 or left blank, the AS group belongs to the default enterprise project. - - If the value is a UUID, the AS group belongs to the enterprise project corresponding to the UUID. - type: str + - Specifies whether to forcibly delete an AS group, remove the ECS \ + instances and release them when the AS group is running instances or \ + performing scaling actions. + type: bool + default: 'no' multi_az_priority_policy: description: - - Specifies the priority policy used to select target AZs when adjusting the number of instances in an AS group. - - EQUILIBRIUM_DISTRIBUTE (default). When adjusting the number of instances, ensure that instances in each AZ in\ - the available_zones list is evenly distributed. If instances cannot be added in the target AZ, select another AZ\ - based on the PICK_FIRST policy. - - PICK_FIRST. When adjusting the number of instances, target AZs are determined in the order\ - in the available_zones list. - choices: ['equilibrium_distribute', 'pick_first'] + - Specifies the priority policy used to select target AZs when \ + adjusting the number of instances in an AS group. + - EQUILIBRIUM_DISTRIBUTE (default). When adjusting the number of \ + instances, ensure that instances in each AZ in the available_zones list \ + is evenly distributed. If instances cannot be added in the target AZ, \ + select another AZ based on the PICK_FIRST policy. + - PICK_FIRST. When adjusting the number of instances, target AZs are \ + determined in the order in the available_zones list. + choices: [equilibrium_distribute, pick_first] + type: str + default: 'equilibrium_distribute' + action: + description: + - Specifies a flag for enabling or disabling an AS group. type: str + choices: [resume, pause] state: description: - Whether resource should be present or absent. - choices: ['present', 'absent'] + choices: [present, absent] type: str default: 'present' + wait: + description: + - If the module should wait for the AS Group to be created or deleted. + type: bool + default: 'yes' + timeout: + description: + - The duration in seconds that module should wait. + default: 200 + type: int requirements: ["openstacksdk", "otcextensions"] ''' @@ -209,308 +255,906 @@ type: complex returned: On Success. contains: - scaling_group_id: + id: description: Specifies the AS group ID. type: str sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" ''' EXAMPLES = ''' -opentelekomcloud.cloud.as_group: - scaling_group_name: "scaling-group-test" - networks: - - id: "39007a7e-ee4f-4d13-8283-b4da2e037c69" - router: "65707a7e-ee4f-4d13-8283-b4da2e037c69" -register: as_group +#Create AS Group + - opentelekomcloud.cloud.as_group: + scaling_group: + name: "as_group_test" + networks: + - id: "a64b4561-af18-4440-9976-b2398ed39ce5" + router: "5d1ac1f4-bec6-4b8c-aae0-7c4345c68f5d" + scaling_configuration: "as_config_test" + desire_instance_number: 1 + max_instance_number: 1 + action: "resume" + state: "present" + wait: yes + timeout: 360 + register: result + +#Delete AS Group + - opentelekomcloud.cloud.as_group: + scaling_group: + name: "as_group_test" + state: "absent" + force_delete: yes + wait: yes + timeout: 360 + register: result + ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule +def is_value_changed(old: list, new: list): + """Compare two lists of parameters. + + This function compares two lists and returns True, if the two lists + contain different elements. + + :param old: The list of initial parameters. + :param new: The list of new parameters. + + :returns: Result of comparison + :rtype: bool + """ + result = [x for x in old + new if x not in old or x not in new] + return True if result else False + + +def new_list_with_dict_ids(old: list): + """Create new list with dicts + + This function aggregate dict elements with only one key "id" in new list. + + :param old: The initial list with dicts. + + :returns: New list with dicts that contain only id. + :rtype: list + """ + new_list = [] + for elem in old: + if isinstance(elem, dict): + new_elem = {"id": elem.get("id")} + new_list.append(new_elem) + return new_list + + class ASGroupModule(OTCModule): argument_spec = dict( - scaling_group_name=dict(required=False), - scaling_group_id=dict(required=False), + scaling_group=dict( + required=True, type='dict', options=dict( + id=dict(type='str'), + name=dict(type='str') + ) + ), scaling_configuration=dict(required=False), - desire_instance_number=dict(required=False, type='int'), - min_instance_number=dict(required=False, type='int'), - max_instance_number=dict(required=False, type='int'), - cool_down_time=dict(required=False, type='int'), - lb_listener=dict(required=False), - lbaas_listeners=dict(required=False, type='list', elements='dict'), - available_zones=dict(required=False, type='list', elements='str'), - networks=dict(required=False, type='list', elements='dict'), - security_groups=dict(required=False, type='list', elements='dict'), - router=dict(required=False), - health_periodic_audit_method=dict(required=False, type='str', choices=['elb_audit', 'nova_audit']), - health_periodic_audit_time=dict(required=False, type='int'), - health_periodic_audit_grace_period=dict(required=False, type='int'), - instance_terminate_policy=dict(required=False, - choices=['old_config_old_instance', 'old_config_new_instance', - 'old_instance', 'new_instance']), + desire_instance_number=dict(required=False, type='int', default=0), + min_instance_number=dict(required=False, type='int', default=0), + max_instance_number=dict(required=False, type='int', default=0), + cool_down_time=dict(required=False, type='int', default=300), + lb_listener=dict(required=False, type='str'), + lbaas_listeners=dict( + required=False, type='list', elements='dict', options=dict( + pool_id=dict(required=True, type='str'), + protocol_port=dict(required=True, type='int'), + weight=dict(required=True, type='int') + ) + ), + availability_zones=dict(required=False, type='list', elements='str'), + networks=dict( + required=False, type='list', elements='dict', options=dict( + id=dict(required=True, type='str') + ) + ), + security_groups=dict( + required=False, type='list', elements='dict', options=dict( + id=dict(required=True, type='str') + ) + ), + router=dict(required=False, type='str'), + health_periodic_audit_method=dict( + required=False, type='str', choices=['elb_audit', 'nova_audit'] + ), + health_periodic_audit_time=dict(required=False, type='int', default=5), + health_periodic_audit_grace_period=dict( + required=False, type='int', default=600 + ), + instance_terminate_policy=dict( + required=False, + choices=['old_config_old_instance', 'old_config_new_instance', + 'old_instance', 'new_instance'], + default='old_config_old_instance'), notifications=dict(required=False, type='list', elements='str'), - delete_publicip=dict(required=False, type='bool'), - delete_volume=dict(required=False, type='bool'), - enterprise_project_id=dict(required=False), - multi_az_priority_policy=dict(required=False, choices=['equilibrium_distribute', 'pick_first']), - state=dict(type='str', choices=['present', 'absent'], default='present') + delete_publicip=dict(required=False, type='bool', default=False), + delete_volume=dict(required=False, type='bool', default=False), + force_delete=dict(required=False, type='bool', default=False), + multi_az_priority_policy=dict( + required=False, choices=['equilibrium_distribute', 'pick_first'], + default='equilibrium_distribute' + ), + action=dict(required=False, type='str', choices=['resume', 'pause']), + state=dict( + type='str', choices=['present', 'absent'], default='present' + ), + wait=dict(type='bool', default=True), + timeout=dict(type='int', default=200) + ) module_kwargs = dict( - required_if=[ - ('scaling_group_id', None, ['scaling_group_name']) - ], supports_check_mode=True ) - def _find_id_config(self): - config = self.conn.auto_scaling.find_config(self.params['scaling_configuration'], ignore_missing=True) - config_id = None + def _is_as_config_find(self, as_config): + return self.conn.auto_scaling.find_config(as_config) + + def _attrs_id_config(self, attrs, as_config): + config = self._is_as_config_find(as_config) if config: - config_id = config.id + attrs['scaling_configuration_id'] = config.id + return attrs else: - self.fail_json(msg="Scaling configuration not found") - return config_id - - def _find_id_listener(self): - listener = self.conn.network.find_listener(self.params['scaling_configuration'], ignore_missing=True) - listener_id = None - if listener: - listener_id = listener.id + self.fail( + changed=False, + msg="Scaling configuration {0} not found".format(as_config) + ) + + def _attrs_lb_listeners(self, attrs, lb_listener): + lb_listener_list = lb_listener.split(',') + if 0 < len(lb_listener_list) <= 6: + attrs['lb_listener_id'] = ','.join(lb_listener_list) + return attrs else: - self.fail_json(msg="Listener not found") - return listener_id - - def _find_id_router(self): - router = self.conn.network.find_router(self.params['router'], ignore_missing=True) - router_id = None - if router: - router_id = router.id + self.fail( + changed=False, + msg="More then 6 classical load balancers are specified" + ) + + def _attrs_id_router(self, attrs, router): + rtr = self.conn.network.find_router(router) + if rtr: + attrs['router_id'] = rtr.id + return attrs else: - self.fail_json(msg="Router not found") - return router_id - - def run(self): - - as_group = None - - if self.params['scaling_group_id']: - as_group = self.conn.auto_scaling.find_group(self.params['scaling_group_id'], ignore_missing=True) + self.fail( + changed=False, + msg="Router {0} not found".format(router) + ) + + def _attrs_lbaas_listeners(self, attrs, lbaas_listeners): + if 0 < len(lbaas_listeners) <= 6: + lb_listeners = [] + lstnr = {} + for listener in lbaas_listeners: + pool = self.conn.network.find_pool(listener['pool_id']) + if pool: + lstnr['pool_id'] = pool.id + else: + self.fail( + changed=False, + msg="Pool {0} not found".format(listener['pool_id']) + ) + lstnr['protocol_port'] = listener['protocol_port'] + lstnr['weight'] = listener['weight'] + lb_listeners.append(lstnr) + attrs['lbaas_listeners'] = lb_listeners + return attrs else: - as_group = self.conn.auto_scaling.find_group(self.params['scaling_group_name'], ignore_missing=True) - - if self.params['state'] == 'present': - - if as_group: - - attrs = {} - - if self.params['scaling_group_name'] and (as_group.name != self.params['scaling_group_name']): - attrs['scaling_group_name'] = self.params['scaling_group_name'] - - if self.params['scaling_configuration']: - id_config = self._find_id_config() - if as_group.scaling_configuration_id != id_config: - attrs['scaling_configuration_id'] = id_config - - if self.params['desire_instance_number'] and \ - (as_group.desire_instance_number != self.params['desire_instance_number']): - attrs['desire_instance_number'] = self.params['desire_instance_number'] - - if self.params['min_instance_number'] \ - and (as_group.min_instance_number != self.params['min_instance_number']): - attrs['min_instance_number'] = self.params['min_instance_number'] - - if self.params['max_instance_number'] \ - and (as_group.max_instance_number != self.params['max_instance_number']): - attrs['max_instance_number'] = self.params['max_instance_number'] - - if self.params['cool_down_time'] and (as_group.cool_down_time != self.params['cool_down_time']): - attrs['cool_down_time'] = self.params['cool_down_time'] - - if self.params['lb_listener']: - lb_listener_id = self._find_listener_id() - if as_group.lb_listener_id != lb_listener_id: - attrs['lb_listener_id'] = lb_listener_id - - if self.params['available_zones'] and (as_group.available_zones != self.params['available_zones']): - attrs['available_zones'] = self.params['available_zones'] - - if self.params['networks']: - list_ids = [] - list_new_ids = [] - for n in as_group.networks: - list_ids.append(n['id']) - for m in self.params['networks']: - list_new_ids.append(m['id']) - dif = set(list_ids) ^ set(list_new_ids) - if dif: - attrs['networks'] = self.params['networks'] - - if self.params['security_groups'] and (as_group.security_groups != self.params['security_groups']): - attrs['available_zones'] = self.params['available_zones'] - - if self.params['health_periodic_audit_method'] \ - and (as_group.health_periodic_audit_method != self.params['health_periodic_audit_method']): + self.fail( + changed=False, + msg="More then 6 enhanced load balancers are specified" + ) + + def _attrs_networks(self, attrs, networks): + networks = new_list_with_dict_ids(networks) + if 0 < len(networks) <= 5: + netwrks = [] + netwrk = {} + for network in networks: + net = self.conn.network.find_network(network['id']) + if net: + netwrk['id'] = net.id + netwrks.append(netwrk) + else: + self.fail( + changed=False, + msg="Network {0} not found".format(network['id']) + ) + attrs['networks'] = netwrks + return attrs + else: + self.fail( + changed=False, + msg="More than 5 networks are specified" + ) + + def _attrs_security_groups(self, attrs, security_groups, as_config=None): + security_groups = new_list_with_dict_ids(security_groups) + if as_config: + config = self._is_as_config_find(as_config) + if config and config.security_groups: + attrs['security_groups'] = config.security_groups + else: + if len(security_groups) == 1: + sec_groups = [] + sec_group = {} + group = self.conn.network.find_security_group( + name_or_id=security_groups.id + ) + if group: + sec_group['id'] = group.id + sec_groups.append(sec_group) + attrs['security_groups'] = sec_groups + return attrs + else: + self.fail( + changed=False, + msg="The number of security groups in the AS group " + "exceeds the upper limit." + ) - if not as_group.lb_listener_id and (self.params['health_periodic_audit_method'] == 'elb_audit'): - self.fail_json(msg="Without LB only 'nova_audit' is available") + def _find_as_group(self, as_group): + if as_group.get('id'): + return self.conn.auto_scaling.find_group( + name_or_id=as_group.get('id') + ) + elif as_group.get('name'): + return self.conn.auto_scaling.find_group( + name_or_id=as_group.get('name') + ) + + def _attrs_for_as_group_create( + self, as_group, as_configuration, desire_instance_number, + min_instance_number, max_instance_number, cool_down_time, + lb_listener, lbaas_listeners, availability_zones, networks, + security_groups, router, hp_audit_method, hp_audit_time, + hp_audit_grace_period, instance_terminate_policy, notifications, + delete_publicip, delete_volume, multi_az_priority_policy + ): + attrs = {} + if as_group.get('name') and not as_group.get('id'): + attrs['scaling_group_name'] = as_group.get('name') + else: + self.fail( + changed=False, + msg="Name is mandatory for creating AS Group." + ) - attrs['health_periodic_audit_method'] = self.params['health_periodic_audit_method'] + if networks: + attrs = self._attrs_networks(attrs, networks) + else: + self.fail( + changed=False, + msg="'networks' is mandatory for creating an AS Group." + ) - if self.params['instance_terminate_policy']\ - and (as_group.instance_terminate_policy != self.params['instance_terminate_policy']): - attrs['instance_terminate_policy'] = self.params['instance_terminate_policy'] + if router: + attrs = self._attrs_id_router(attrs, router) + else: + self.fail( + changed=False, + msg="'router' is mandatory for creating an AS group." + ) - if self.params['notifications'] and (as_group.notifications != self.params['notifications']): - attrs['notifications'] = self.params['notifications'] + if as_configuration: + attrs = self._attrs_id_config(attrs, as_configuration) - if self.params['delete_publicip'] and (as_group.delete_publicip != self.params['delete_publicip']): - attrs['delete_publicip'] = self.params['delete_publicip'] + if desire_instance_number: + attrs['desire_instance_number'] = desire_instance_number - if self.params['delete_volume'] and (as_group.delete_volume != self.params['delete_volume']): - attrs['delete_volume'] = self.params['delete_volume'] + if min_instance_number: + attrs['min_instance_number'] = min_instance_number - if self.params['enterprise_project_id'] \ - and (as_group.enterprise_project_id != self.params['enterprise_project_id']): - attrs['enterprise_project_id'] = self.params['enterprise_project_id'] + if max_instance_number: + attrs['max_instance_number'] = max_instance_number - changed = False + if cool_down_time: + attrs['cool_down_time'] = cool_down_time - if attrs: - changed = True + if lb_listener and lbaas_listeners: + self.fail( + changed=False, + msg="Either 'lb_listener' or 'lbaas_listener' " + "can be specified" + ) - if self.ansible.check_mode: - self.exit(changed=changed, as_group=as_group) - as_group = self.conn.auto_scaling.update_group(as_group, **attrs) + if lb_listener: + attrs = self._attrs_lb_listeners(attrs, lb_listener) - self.exit_json( - changed=changed, - as_group=as_group - ) + if lbaas_listeners: + attrs = self._attrs_lbaas_listeners(attrs, lbaas_listeners) + if not hp_audit_method: + if lb_listener or lbaas_listeners: + attrs['health_periodic_audit_method'] = "elb_audit".upper() else: - - attrs = {} - - if self.params['scaling_group_name']: - attrs['scaling_group_name'] = self.params['scaling_group_name'] + attrs['health_periodic_audit_method'] = "nova_audit".upper() + else: + if not lb_listener and not lbaas_listeners: + if hp_audit_method == 'elb_audit': + self.fail("Without LB only 'nova_audit' is available") else: - self.json(msg="Name is mandatory for creating.") + attrs['health_periodic_audit_method'] = \ + hp_audit_method.upper() + else: + attrs['health_periodic_audit_method'] = \ + hp_audit_method.upper() + + if availability_zones: + attrs['availability_zones'] = availability_zones + + if security_groups: + attrs = self._attrs_security_groups(attrs, security_groups) + + if hp_audit_time: + attrs['health_periodic_audit_time'] = hp_audit_time + + if delete_publicip: + attrs['delete_publicip'] = delete_publicip + + if delete_volume: + attrs['delete_volume'] = delete_volume + + if hp_audit_grace_period: + attrs['health_periodic_audit_grace_period'] = \ + hp_audit_grace_period + + if instance_terminate_policy: + attrs['instance_terminate_policy'] = \ + instance_terminate_policy.upper() + + if notifications: + attrs['notifications'] = notifications + + if multi_az_priority_policy: + attrs['multi_az_priority_policy'] = \ + multi_az_priority_policy.upper() + + return attrs + + def _attrs_for_as_group_update( + self, as_group, as_configuration, desire_instance_number, + min_instance_number, max_instance_number, cool_down_time, + lb_listener, lbaas_listeners, availability_zones, networks, + security_groups, hp_audit_method, hp_audit_time, + hp_audit_grace_period, instance_terminate_policy, notifications, + delete_publicip, delete_volume, multi_az_priority_policy, group + ): + attrs = {} + if (as_group.get('id')) and as_group.get('name'): + if (as_group.get('id') == group.id + and group.name != as_group.get('name')): + attrs['scaling_group_name'] = as_group.get('name') + + if (as_configuration + and as_configuration != group.scaling_configuration_id + and as_configuration != group.scaling_configuration_name): + attrs = self._attrs_id_config(attrs, as_configuration) + + if (desire_instance_number + and (group.desire_instance_number != desire_instance_number)): + attrs['desire_instance_number'] = desire_instance_number + + if (min_instance_number + and (group.min_instance_number != min_instance_number)): + attrs['min_instance_number'] = min_instance_number + + if (max_instance_number + and (group.max_instance_number != max_instance_number)): + attrs['max_instance_number'] = max_instance_number + + if cool_down_time and group.cool_down_time != cool_down_time: + attrs['cool_down_time'] = cool_down_time + + if lb_listener and lbaas_listeners: + self.fail( + changed=False, + msg="Either 'lb_listener' or 'lbaas_listener' " + "can be specified" + ) + + if lb_listener and group.lb_listner_id != lb_listener: + attrs = self._attrs_lb_listeners(attrs, lb_listener) + + if (lbaas_listeners + and is_value_changed(group.lbaas_listeners, lbaas_listeners)): + attrs = self._attrs_lbaas_listeners(attrs, lbaas_listeners) + + if (availability_zones + and is_value_changed( + group.availability_zones, availability_zones + )): + attrs['availability_zones'] = availability_zones + + if (networks + and is_value_changed( + new_list_with_dict_ids(group.networks), networks + )): + attrs = self._attrs_networks(attrs, networks) + + if (security_groups + and is_value_changed( + new_list_with_dict_ids(group.security_groups), + security_groups + )): + attrs = self._attrs_security_groups(attrs, security_groups) + + if hp_audit_method\ + and group.health_periodic_audit_method != \ + hp_audit_method.upper(): + + if (not group.lb_listener_id + and not group.lbaas_listeners + and hp_audit_method == 'elb_audit'.upper()): + self.fail_json( + msg="Without LB only 'nova_audit' is available" + ) - if self.params['networks']: - attrs['networks'] = self.params['networks'] - else: - self.fail_json(msg="'networks' is mandatory for creating an AS group.") + attrs['health_periodic_audit_method'] = hp_audit_method.upper() + + if (hp_audit_time + and group.health_periodic_audit_time != hp_audit_time): + attrs['health_periodic_audit_time'] = hp_audit_time + + if hp_audit_grace_period\ + and group.health_periodic_audit_grace_period != \ + hp_audit_grace_period: + attrs['health_periodic_audit_grace_period'] = hp_audit_grace_period + + if instance_terminate_policy\ + and group.instance_terminate_policy != \ + instance_terminate_policy.upper(): + attrs['instance_terminate_policy'] = \ + instance_terminate_policy.upper() + + if notifications and group.notifications != notifications: + attrs['notifications'] = notifications + + if delete_publicip and group.delete_publicip != delete_publicip: + attrs['delete_publicip'] = delete_publicip + + if delete_volume and group.delete_volume != delete_volume: + attrs['delete_volume'] = delete_volume + + if multi_az_priority_policy and group.multi_az_priority_policy != \ + multi_az_priority_policy.upper(): + attrs['multi_az_priority_policy'] = multi_az_priority_policy.upper() + + return attrs + + def _wait_for_instances(self, as_group, timeout, desire_instance_number=0): + for count in self.sdk.utils.iterate_timeout( + timeout=timeout, + message="Timeout waiting for AS Instances" + ): + instances = list(self.conn.auto_scaling.instances( + group=as_group + )) + if (len(instances) == desire_instance_number + and [instance.id for instance in instances + if instance.id]): + for instance in instances: + self.conn.auto_scaling.wait_for_instance(instance=instance) + return + + def _resume_group(self, group, wait, timeout, desire_instance_number=0): + result_group = group + self.conn.auto_scaling.resume_group(group=group) + if wait: + try: + if desire_instance_number > 0: + self._wait_for_instances( + as_group=group, + timeout=timeout, + desire_instance_number=desire_instance_number + ) + result_group = self.conn.auto_scaling.wait_for_group( + group=group, + wait=timeout + ) + except self.sdk.exceptions.ResourceTimeout: + self.fail( + msg="Timeout failure waiting for AS Group" + ) + return result_group + + def _pause_group(self, group, wait, timeout): + result_group = group + self.conn.auto_scaling.pause_group(group=group) + if wait: + try: + result_group = self.conn.auto_scaling.wait_for_group( + group=group, + status='PAUSED', + wait=timeout + ) + except self.sdk.exceptions.ResourceTimeout: + self.fail( + msg="Timeout failure waiting for AS Group" + ) + return result_group + + def _action_group( + self, action, group, wait, timeout, desire_instance_number=0 + ): + if action == 'resume': + return self._resume_group(group, wait, timeout, + desire_instance_number) + elif action == 'pause': + return self._pause_group(group, wait, timeout) + + def _needs_update( + self, as_group, as_configuration, desire_instance_number, + min_instance_number, max_instance_number, cool_down_time, + lb_listener, lbaas_listeners, availability_zones, networks, + security_groups, hp_audit_method, hp_audit_time, + hp_audit_grace_period, instance_terminate_policy, notifications, + delete_publicip, delete_volume, multi_az_priority_policy, group + ): + if as_group.get('id') and as_group.get('name'): + if (as_group.get('id') == group.id + and group.name != as_group.get('name')): + return True + + if (as_configuration + and group.scaling_configuration_id != as_configuration + and group.scaling_configuration_name != as_configuration): + return True + + if (desire_instance_number + and group.desire_instance_number != desire_instance_number): + return True + + if (min_instance_number + and group.min_instance_number != min_instance_number): + return True + + if (max_instance_number + and group.max_instance_number != max_instance_number): + return True + + if (cool_down_time + and group.cool_down_time != cool_down_time): + return True + + if (lb_listener + and group.lb_listner_id != lb_listener): + return True + + if (lbaas_listeners + and is_value_changed(group.lbaas_listeners, lbaas_listeners)): + return True + + if (availability_zones + and is_value_changed( + group.availability_zones, availability_zones + )): + return True + + if (networks + and is_value_changed( + new_list_with_dict_ids(group.networks), networks + )): + return True + + if (security_groups and is_value_changed(new_list_with_dict_ids( + group.security_groups), security_groups)): + return True + + if hp_audit_method \ + and group.health_periodic_audit_method != \ + hp_audit_method.upper(): + return True + + if (hp_audit_time + and group.health_periodic_audit_time != hp_audit_time): + return True + + if hp_audit_grace_period \ + and group.health_periodic_audit_grace_period != \ + hp_audit_grace_period: + return True + + if instance_terminate_policy \ + and group.instance_terminate_policy != \ + instance_terminate_policy.upper(): + return True + + if notifications and group.notifications != notifications: + return True + + if delete_publicip and group.delete_publicip != delete_publicip: + return True + + if delete_volume and group.delete_volume != delete_volume: + return True + + if multi_az_priority_policy \ + and group.multi_az_priority_policy != \ + multi_az_priority_policy.upper(): + return True + + return False + + def _is_group_can_be_deleted(self, as_group): + as_instances = list(self.conn.auto_scaling.instances(as_group)) + return False if as_instances else True + + def _delete_as_group(self, as_group, force_delete, wait, timeout): + self.conn.auto_scaling.delete_group( + group=as_group, + force_delete=force_delete + ) + if wait: + try: + self.conn.auto_scaling.wait_for_delete_group( + group=as_group, + wait=timeout + ) + except self.sdk.exceptions.ResourceTimeout: + self.fail( + msg="Timeout failure waiting for delete AS Group" + ) - if self.params['router']: - attrs['vpc_id'] = self._find_id_router() - else: - self.fail_json(msg="'router' is mandatory for creating an AS group.") + def _system_state_change( + self, as_group, as_configuration, desire_instance_number, + min_instance_number, max_instance_number, cool_down_time, + lb_listener, lbaas_listeners, availability_zones, networks, + security_groups, hp_audit_method, hp_audit_time, + hp_audit_grace_period, instance_terminate_policy, notifications, + delete_publicip, delete_volume, multi_az_priority_policy, group + ): + state = self.params['state'] + if state == 'present': + if not group: + return True + return self._needs_update( + as_group=as_group, as_configuration=as_configuration, + desire_instance_number=desire_instance_number, + min_instance_number=min_instance_number, + max_instance_number=max_instance_number, + cool_down_time=cool_down_time, + lb_listener=lb_listener, lbaas_listeners=lbaas_listeners, + availability_zones=availability_zones, networks=networks, + security_groups=security_groups, + hp_audit_method=hp_audit_method, + hp_audit_time=hp_audit_time, + hp_audit_grace_period=hp_audit_grace_period, + instance_terminate_policy=instance_terminate_policy, + notifications=notifications, delete_publicip=delete_publicip, + delete_volume=delete_volume, + multi_az_priority_policy=multi_az_priority_policy, group=group + ) + elif state == 'absent' and group: + return True + return False - if self.params['scaling_configuration']: - attrs['scaling_configuration_id'] = self._find_id_config() + def run(self): - if self.params['lb_listener'] and self.params['lbaas_listeners']: - self.fail_json(msg="Either 'lb_listener' or 'lbaas_listener' can be specified") + as_group = self.params['scaling_group'] + as_configuration = self.params['scaling_configuration'] + desire_instance_number = self.params['desire_instance_number'] + min_instance_number = self.params['min_instance_number'] + max_instance_number = self.params['max_instance_number'] + cool_down_time = self.params['cool_down_time'] + lb_listener = self.params['lb_listener'] + lbaas_listeners = self.params['lbaas_listeners'] + availability_zones = self.params['availability_zones'] + networks = self.params['networks'] + security_groups = self.params['security_groups'] + router = self.params['router'] + hp_audit_method = self.params['health_periodic_audit_method'] + hp_audit_time = self.params['health_periodic_audit_time'] + hp_audit_gr_period = self.params['health_periodic_audit_grace_period'] + instance_terminate_policy = self.params['instance_terminate_policy'] + notifications = self.params['notifications'] + delete_publicip = self.params['delete_publicip'] + delete_volume = self.params['delete_volume'] + force_delete = self.params['force_delete'] + multi_az_priority_policy = self.params['multi_az_priority_policy'] + action = self.params['action'] + wait = self.params['wait'] + timeout = self.params['timeout'] + state = self.params['state'] + + changed = False + + if as_group: + group = self._find_as_group(as_group) + + if self.ansible.check_mode: + self.exit( + changed=self._system_state_change( + as_group=as_group, + as_configuration=as_configuration, + desire_instance_number=desire_instance_number, + min_instance_number=min_instance_number, + max_instance_number=max_instance_number, + cool_down_time=cool_down_time, + lb_listener=lb_listener, + lbaas_listeners=lbaas_listeners, + availability_zones=availability_zones, + networks=networks, + security_groups=security_groups, + hp_audit_method=hp_audit_method, + hp_audit_time=hp_audit_time, + hp_audit_grace_period=hp_audit_gr_period, + instance_terminate_policy=instance_terminate_policy, + notifications=notifications, + delete_publicip=delete_publicip, + delete_volume=delete_volume, + multi_az_priority_policy=multi_az_priority_policy, + group=group) + ) - if not self.params['health_periodic_audit_method']: - # set default values for 'health_periodic_audit_method' - if self.params['lb_listener'] or self.params['lbaas_listeners']: - attrs['health_periodic_audit_method'] = "ELB_AUDIT" - else: - attrs['health_periodic_audit_method'] = "NOVA_AUDIT" - else: - if not self.params['lb_listener'] and not self.params['lbaas_listeners']: - if self.params['health_periodic_audit_method'] == 'elb_audit': - self.fail_json("Without LB only 'nova_audit' is available") - else: - attrs['health_periodic_audit_method'] = self.params['health_periodic_audit_method'].upper() + if group: + + if state == 'present': + + if self._needs_update( + as_group=as_group, + as_configuration=as_configuration, + desire_instance_number=desire_instance_number, + min_instance_number=min_instance_number, + max_instance_number=max_instance_number, + cool_down_time=cool_down_time, + lb_listener=lb_listener, + lbaas_listeners=lbaas_listeners, + availability_zones=availability_zones, + networks=networks, + security_groups=security_groups, + hp_audit_method=hp_audit_method, + hp_audit_time=hp_audit_time, + hp_audit_grace_period=hp_audit_gr_period, + instance_terminate_policy=instance_terminate_policy, + notifications=notifications, + delete_publicip=delete_publicip, + delete_volume=delete_volume, + multi_az_priority_policy=multi_az_priority_policy, + group=group + ): + attrs = self._attrs_for_as_group_update( + as_group=as_group, + as_configuration=as_configuration, + desire_instance_number=desire_instance_number, + min_instance_number=min_instance_number, + max_instance_number=max_instance_number, + cool_down_time=cool_down_time, + lb_listener=lb_listener, + lbaas_listeners=lbaas_listeners, + availability_zones=availability_zones, + networks=networks, security_groups=security_groups, + hp_audit_method=hp_audit_method, + hp_audit_time=hp_audit_time, + hp_audit_grace_period=hp_audit_gr_period, + instance_terminate_policy=instance_terminate_policy, + notifications=notifications, + delete_publicip=delete_publicip, + delete_volume=delete_volume, + multi_az_priority_policy=multi_az_priority_policy, + group=group + ) + group = self.conn.auto_scaling.update_group( + group=group, **attrs + ) + changed = True + if action: + group = self._action_group( + action=action, + group=group, + wait=wait, + timeout=timeout, + desire_instance_number=desire_instance_number + ) + self.exit( + changed=changed, + as_group=group, + msg="AS Group {0} was updated".format(group.id) + ) + elif action: + group = self._action_group( + action=action, + group=group, + wait=wait, + timeout=timeout, + desire_instance_number=desire_instance_number + ) + changed = True + self.exit( + changed=changed, + as_group=group, + msg="Action {0} for AS Group {1} was done".format( + action, group.id + ) + ) else: - attrs['health_periodic_audit_method'] = self.params['health_periodic_audit_method'].upper() - - if self.params['lb_listener']: - attrs['lb_listener_id'] = self._find_id_listener() - - if self.params['lbaas_listeners']: - attrs['lbaas_listeners'] = self.params['lbaas_listeners'] - - if self.params['min_instance_number']: - attrs['min_instance_number'] = self.params['min_instance_number'] - else: - attrs['min_instance_number'] = 0 - - if self.params['max_instance_number']: - attrs['max_instance_number'] = self.params['max_instance_number'] - else: - attrs['max_instance_number'] = 0 - - if self.params['health_periodic_audit_time']: - attrs['health_periodic_audit_time'] = self.params['health_periodic_audit_time'] - else: - attrs['health_periodic_audit_time'] = 5 - - if self.params['delete_publicip']: - attrs['delete_publicip'] = self.params['delete_publicip'] - else: - attrs['delete_publicip'] = False + self.fail( + changed=changed, + msg="AS Group {0} exists".format(group.id) + ) - if self.params['delete_volume']: - attrs['delete_volume'] = self.params['delete_volume'] -# else: -# attrs['delete_volume'] = False - - if self.params['cool_down_time']: - attrs['cool_down_time'] = self.params['cool_down_time'] - else: - attrs['cool_down_time'] = 300 - - if self.params['health_periodic_audit_grace_period']: - attrs['health_periodic_audit_grace_period'] = self.params['health_periodic_audit_grace_period'] else: - attrs['health_periodic_audit_grace_period'] = 600 + if force_delete or self._is_group_can_be_deleted(group): + self._delete_as_group( + as_group=group, + force_delete=force_delete, + wait=wait, + timeout=timeout + ) + changed = True + self.exit( + changed=changed, + msg="AS Group {0} was deleted".format(group.id) + ) + else: + changed = False + self.fail( + changed=changed, + msg="AS Group {0} can not be deleted due to " + "AS Instances presence".format(group.id) + ) - if self.params['desire_instance_number']: - attrs['desire_instance_number'] = self.params['desire_instance_number'] - if self.params['available_zones']: - attrs['available_zones'] = self.params['available_zones'] - if self.params['security_groups']: - attrs['security_groups'] = self.params['security_groups'] + else: - if self.params['instance_terminate_policy']: - attrs['instance_terminate_policy'] = self.params['instance_terminate_policy'].upper() - else: - attrs['instance_terminate_policy'] = 'OLD_CONFIG_OLD_INSTANCE' - - if self.params['notifications']: - attrs['notifications'] = self.params['notifications'] - if self.params['enterprise_project_id']: - attrs['enterprise_project_id'] = self.params['enterprise_project_id'] - if self.params['multi_az_priority_policy']: - attrs['multi_az_priority_policy'] = self.params['multi_az_priority_policy'].upper() + if state == 'present': + attrs = self._attrs_for_as_group_create( + as_group=as_group, + as_configuration=as_configuration, + desire_instance_number=desire_instance_number, + min_instance_number=min_instance_number, + max_instance_number=max_instance_number, + cool_down_time=cool_down_time, + lb_listener=lb_listener, + lbaas_listeners=lbaas_listeners, + availability_zones=availability_zones, + networks=networks, security_groups=security_groups, + router=router, hp_audit_method=hp_audit_method, + hp_audit_time=hp_audit_time, + hp_audit_grace_period=hp_audit_gr_period, + instance_terminate_policy=instance_terminate_policy, + notifications=notifications, + delete_publicip=delete_publicip, + delete_volume=delete_volume, + multi_az_priority_policy=multi_az_priority_policy + ) + group = self.conn.auto_scaling.create_group(**attrs) + changed = True + if ( + as_configuration + and self._is_as_config_find(as_configuration) + and action + ): + group = self._action_group( + action=action, + group=group, + wait=wait, + timeout=timeout, + desire_instance_number=desire_instance_number + ) + self.exit( + changed=changed, + as_group=group, + msg="AS Group {0} was created".format(as_group.get( + "name")) + ) else: - attrs['multi_az_priority_policy'] = 'EQUILIBRIUM_DISTRIBUTE' + self.fail( + changed=changed, + msg="AS Group {0} not found".format(as_group.get('id')) + ) - if self.ansible.check_mode: - self.exit(changed=True) - - as_group = self.conn.auto_scaling.create_group(**attrs) - changed = True - - self.exit_json( - changed=changed, - as_group=as_group - ) - - elif self.params['state'] == 'absent': - if as_group: - if self.ansible.check_mode: - self.exit(changed=True) - self.conn.auto_scaling.delete_group(as_group) - self.exit(changed=True, msg="Resource was deleted") - else: - if self.ansible.check_mode: - self.exit(changed=False) - self.fail_json("The group doesn't exist") + else: + self.fail( + changed=changed, + msg="Name or/and ID should be specified" + ) def main(): diff --git a/tests/integration/targets/as_group/tasks/main.yaml b/tests/integration/targets/as_group/tasks/main.yaml index 7286042b..396bf24e 100644 --- a/tests/integration/targets/as_group/tasks/main.yaml +++ b/tests/integration/targets/as_group/tasks/main.yaml @@ -2,6 +2,18 @@ - module_defaults: opentelekomcloud.cloud.as_group: cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.as_instance_info: + cloud: "{{ test_cloud }}" + openstack.cloud.os_keypair: + cloud: "{{ test_cloud }}" + openstack.cloud.os_network: + cloud: "{{ test_cloud }}" + openstack.cloud.os_subnet: + cloud: "{{ test_cloud }}" + openstack.cloud.os_router: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.as_config: + cloud: "{{ test_cloud }}" block: - name: Set random prefix set_fact: @@ -12,6 +24,7 @@ as_group_name: "{{ ( prefix + '_as_group') }}" new_name: "{{ ( prefix + 'new_name') }}" key_name: "{{ ( prefix + '_key') }}" + config_name: "{{ ( prefix + '_as_conf' ) }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" router_name: "{{ ( prefix + '_router') }}" @@ -46,9 +59,22 @@ subnet: "{{ subnet.subnet.name }}" register: router - - name: Create as group - check_mode + - name: Create AS Config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ config_name }}" + key_name: "{{ key_name }}" + image: "Standard_Debian_9_latest" + flavor: "c4.2xlarge.2" + disk: + - size: 10 + volume_type: 'SAS' + disk_type: 'SYS' + register: as_config + + - name: Create AS Group - check_mode opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" check_mode: yes @@ -58,10 +84,11 @@ assert: that: - as_group_check is changed - - - - name: Create as group + + - name: Create AS Group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group @@ -71,11 +98,44 @@ that: - as_group is success - as_group is changed + - 'as_group.msg == "AS Group {{ as_group_name }} was created"' + + - name: Create AS Group when it already exists + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + networks: [{'id': "{{ network.network.id }}"}] + router: "{{ router.router.id }}" + register: as_group_err + ignore_errors: yes + + - name: assert result + assert: + that: + - as_group_err is not success + - as_group_err is not changed + - 'as_group_err.msg == "AS Group {{ as_group.as_group.id }} exists"' + + - name: Update as group - check mode + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + name: "{{ new_name }}" + max_instance_number: 10 + check_mode: yes + register: as_group_check + + - name: assert result + assert: + that: + - as_group_check is success + - as_group_check is changed - name: Update as group opentelekomcloud.cloud.as_group: - scaling_group_id: "{{ as_group.as_group.id }}" - scaling_group_name: "{{ new_name }}" + scaling_group: + id: "{{ as_group.as_group.id }}" + name: "{{ new_name }}" max_instance_number: 10 register: as_group @@ -84,14 +144,118 @@ that: - as_group is success - as_group is changed + - 'as_group.msg == "AS Group {{ as_group.as_group.id }} was updated"' + + - name: Delete AS Group - check_mode + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + state: absent + check_mode: yes + register: as_group_check + + - name: assert result + assert: + that: + - as_group_check is changed + - as_group_check is success + + - name: Delete AS Group + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + state: absent + wait: yes + timeout: 360 + register: as_gr_del + + - name: assert result + assert: + that: + - as_gr_del is success + - as_gr_del is changed + - 'as_gr_del.msg == "AS Group {{ as_group.as_group.id }} was deleted"' + + - name: Delete AS Group that already deleted + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + state: absent + wait: yes + timeout: 360 + register: as_gr_err + ignore_errors: yes + + - name: assert result + assert: + that: + - as_gr_err is not success + - as_gr_err is not changed + - 'as_gr_err.msg == "AS Group {{ as_group.as_group.id }} not found"' + + - name: Create AS Group with instances + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + scaling_configuration: "{{ as_config.as_config.id }}" + networks: [{'id': "{{ network.network.id }}"}] + router: "{{ router.router.id }}" + desire_instance_number: 1 + max_instance_number: 1 + action: "resume" + state: "present" + wait: yes + timeout: 360 + register: as_group + + - name: assert result + assert: + that: + - as_group is success + - as_group is changed + + - name: Get list of AS instances using as group id + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is not changed + - as_instances['scaling_instances']|length == 1 + + + - name: Delete AS Group with instances without force_delete + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + state: absent + force_delete: no + wait: yes + timeout: 360 + register: as_group_err + ignore_errors: yes + + - name: assert result + assert: + that: + - as_group_err is not success + - as_group_err is not changed + always: - block: # Cleanup - name: Delete as group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ new_name }}" + scaling_group: + id: "{{ as_group.as_group.id }}" state: absent + force_delete: yes + wait: yes + timeout: 360 register: dropped_as_group - name: assert result @@ -100,10 +264,23 @@ - dropped_as_group is success - dropped_as_group is changed + - name: Delete as config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ config_name }}" + state: absent + register: dropped_as_config + ignore_errors: true + + - name: assert result + assert: + that: + - dropped_as_config is success + - dropped_as_config is changed + - name: Delete keypair openstack.cloud.os_keypair: - name: "{{ key_name }}" - state: absent + name: "{{ key_name }}" + state: absent - name: Drop existing router openstack.cloud.os_router: diff --git a/tests/integration/targets/as_instance_info/tasks/main.yaml b/tests/integration/targets/as_instance_info/tasks/main.yaml index 52ce890b..b5e37f5c 100644 --- a/tests/integration/targets/as_instance_info/tasks/main.yaml +++ b/tests/integration/targets/as_instance_info/tasks/main.yaml @@ -50,7 +50,8 @@ - name: Create as group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group @@ -108,8 +109,11 @@ # Cleanup - name: Delete as group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" state: absent + force_delete: yes + wait: yes register: dropped_as_group - name: Drop existing router diff --git a/tests/integration/targets/as_policy/tasks/main.yaml b/tests/integration/targets/as_policy/tasks/main.yaml index a7b5e0f6..a4587ede 100644 --- a/tests/integration/targets/as_policy/tasks/main.yaml +++ b/tests/integration/targets/as_policy/tasks/main.yaml @@ -89,7 +89,8 @@ - name: Create AS Group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group @@ -230,8 +231,11 @@ # Cleanup - name: Delete AS group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" state: absent + force_delete: yes + wait: yes register: dropped_as_group - name: Drop alarm diff --git a/tests/integration/targets/as_policy_info/tasks/main.yaml b/tests/integration/targets/as_policy_info/tasks/main.yaml index 15ff9955..98287207 100644 --- a/tests/integration/targets/as_policy_info/tasks/main.yaml +++ b/tests/integration/targets/as_policy_info/tasks/main.yaml @@ -57,7 +57,8 @@ - name: Create as group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group @@ -116,8 +117,11 @@ # Cleanup - name: Delete as group opentelekomcloud.cloud.as_group: - scaling_group_name: "{{ as_group_name }}" + scaling_group: + name: "{{ as_group_name }}" state: absent + force_delete: yes + wait: yes register: dropped_as_group - name: Delete keypair From fd871c291febbe0738ce0f9f438039637d29671d Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Mon, 2 Aug 2021 14:31:01 +0300 Subject: [PATCH 13/65] DDS flavor info module (#129) DDS flavor info module docs code integration tests ignore files Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Anton Kachurin --- meta/runtime.yml | 1 + plugins/modules/dds_flavor_info.py | 104 ++++++++++++++++++ .../targets/dds_flavor_info/tasks/main.yaml | 16 +++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 123 insertions(+) create mode 100644 plugins/modules/dds_flavor_info.py create mode 100644 tests/integration/targets/dds_flavor_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index aaf5dafa..90a84cca 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -22,6 +22,7 @@ action_groups: - deh_host_info - deh_host_type_info - deh_server_info + - dds_flavor_info - floating_ip - loadbalancer - loadbalancer_info diff --git a/plugins/modules/dds_flavor_info.py b/plugins/modules/dds_flavor_info.py new file mode 100644 index 00000000..b8618cd3 --- /dev/null +++ b/plugins/modules/dds_flavor_info.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions, +# limitations under the License. + +DOCUMENTATION = ''' +module: dds_flavor_info +short_description: Obtain flavor type information about a specified region and DB type. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Get DDS flavor info +options: + region: + description: + - Specifies the region where the DB instance exists. + type: str + required: true + engine_name: + description: + - Specifies the database type. The value is DDS-Community. + type: str + required: false + default: 'DDS-Community' +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +flavors: + description: Info about flavor. + returned: On Success + type: complex + contains: + az_status: + description: Indicates the status of specifications in an AZ. + type: list + engine_name: + description: Indicates the engine name. + type: str + id: + description: Datastore version. + type: str + name: + description: Name of the datastore. + type: str + ram: + description: Indicates the memory size in gigabyte (GB). + type: str + spec_code: + description: Indicates the resource specifications code. + type: str + type: + description: Indicates the node type. + type: str + vcpus: + description: Number of vCPUs. + type: str +''' + +EXAMPLES = ''' +# Get info about flavor +- opentelekomcloud.cloud.dds_flavor_info: + region: "eu-de" + register: result +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DDSFlavorInfo(OTCModule): + argument_spec = dict( + region=dict(required=True), + engine_name=dict(default='DDS-Community'), + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + region = self.params['region'] + engine_name = self.params['engine_name'] + + data = [] + for raw in self.conn.dds.flavors(region=region, engine_name=engine_name): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + flavors=data + ) + + +def main(): + module = DDSFlavorInfo() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dds_flavor_info/tasks/main.yaml b/tests/integration/targets/dds_flavor_info/tasks/main.yaml new file mode 100644 index 00000000..474b13c1 --- /dev/null +++ b/tests/integration/targets/dds_flavor_info/tasks/main.yaml @@ -0,0 +1,16 @@ +--- +- module_defaults: + opentelekomcloud.cloud.dds_flavor_info: + cloud: "{{ test_cloud }}" + block: + - name: Get info about flavor + opentelekomcloud.cloud.dds_flavor_info: + region: "eu-de" + register: result + + - name: assert result + assert: + that: + - result is success + - result is not changed + - result.flavors is defined diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index eb5852a7..b7067f45 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -37,6 +37,7 @@ plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license plugins/modules/nat_gateway.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index defa30d4..3abdde05 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -37,6 +37,7 @@ plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/floating_ip.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license From 624c16a098d65368582261ddaa421714a43c41f3 Mon Sep 17 00:00:00 2001 From: Irina Pereiaslavskaia <63649585+irina-pereiaslavskaia@users.noreply.github.com> Date: Fri, 6 Aug 2021 11:44:44 +0300 Subject: [PATCH 14/65] New AS Instance module (#130) New AS Instance module Add new module that helps to create/modify/delete AS Instances in AS Group. Reviewed-by: Anton Kachurin Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Polina Gubina --- meta/runtime.yml | 1 + plugins/modules/as_group.py | 14 +- plugins/modules/as_instance.py | 497 +++++++++++++++++ .../targets/as_instance/tasks/main.yaml | 508 ++++++++++++++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 6 files changed, 1015 insertions(+), 7 deletions(-) create mode 100644 plugins/modules/as_instance.py create mode 100644 tests/integration/targets/as_instance/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 90a84cca..19e7d0c3 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -6,6 +6,7 @@ action_groups: - as_config_info - as_group - as_group_info + - as_instance - as_instance_info - as_policy - as_policy_info diff --git a/plugins/modules/as_group.py b/plugins/modules/as_group.py index 1fcd0c53..10aef992 100644 --- a/plugins/modules/as_group.py +++ b/plugins/modules/as_group.py @@ -485,11 +485,11 @@ def _attrs_security_groups(self, attrs, security_groups, as_config=None): if len(security_groups) == 1: sec_groups = [] sec_group = {} - group = self.conn.network.find_security_group( - name_or_id=security_groups.id + security_group = self.conn.network.find_security_group( + name_or_id=security_groups[0]["id"] ) - if group: - sec_group['id'] = group.id + if security_group: + sec_group['id'] = security_group.id sec_groups.append(sec_group) attrs['security_groups'] = sec_groups return attrs @@ -737,9 +737,9 @@ def _wait_for_instances(self, as_group, timeout, desire_instance_number=0): instances = list(self.conn.auto_scaling.instances( group=as_group )) - if (len(instances) == desire_instance_number - and [instance.id for instance in instances - if instance.id]): + instances_with_id = [instance.id for instance in instances + if instance.id] + if (len(instances) == len(instances_with_id) == desire_instance_number): for instance in instances: self.conn.auto_scaling.wait_for_instance(instance=instance) return diff --git a/plugins/modules/as_instance.py b/plugins/modules/as_instance.py new file mode 100644 index 00000000..9b21a16b --- /dev/null +++ b/plugins/modules/as_instance.py @@ -0,0 +1,497 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: as_instance +short_description: Managing Instances in an AS Group. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Irina Pereiaslavskaia (@irina-pereiaslavskaia)" +description: + - This interface is used to manage Instances in an AS Group. +options: + scaling_group: + description: + - Specifies the auto-scaling group name or ID. + type: str + required: true + scaling_instances: + description: + - Specifies the instance names or IDs. + type: list + elements: str + required: true + instance_delete: + description: + - Specifies whether an instance is deleted when it is + removed from the AS group. + type: bool + default: 'no' + action: + description: + - Specifies an action to be performed on instances in batches. + choices: [add, remove, protect, unprotect] + type: str + state: + description: + - Whether resource should be present or absent. + choices: [present, absent] + type: str + default: "present" + wait: + description: + - If the module should wait for the RDS backup to be created or deleted. + type: bool + default: 'yes' + timeout: + description: + - The duration in seconds that module should wait. + default: 200 + type: int +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +# This module does not return anything. +''' + +EXAMPLES = ''' +# Add AS instances +- opentelekomcloud.cloud.as_instance: + scaling_group: "89af599d-a8ab-4c29-a063-0b719125468" + scaling_instances: ["inst_name_1", "inst_name_2", "inst_name_3"] + action: "add" + state: present + register: as_instances + +# Protect AS instances +- opentelekomcloud.cloud.as_instance: + scaling_group: "as_group_id" + scaling_instances: ["89af599d-a8ab-4c29-a063-0b719ed77e8e", \ + "89af599d-a8ab-4c29-a063-0b719ed77555"] + action: "protect" + state: present + register: as_instances + +# Unprotect AS instances +- opentelekomcloud.cloud.as_instance: + scaling_group: "89af599d-a8ab-4c29-a063-0b719ed88888" + scaling_instances: ["89af599d-a8ab-4c29-a063-0b719ed77e8e", \ + "89af599d-a8ab-4c29-a063-0b719ed77555"] + action: "protect" + state: present + register: as_instances + +# Remove Instance in an AS Group +- opentelekomcloud.cloud.as_instance: + scaling_group: "test_group" + scaling_instance: "89af599d-a8ab-4c29-a063-0b719ed77e8e" + state: "absent" + register: as_instance + +# Remove AS instances +- opentelekomcloud.cloud.as_instance: + scaling_group: "as_group_id" + scaling_instances: ["inst_name_1", "inst_name_2", "inst_name_3"] + action: "remove" + state: absent + register: as_instances +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class ASInstanceModule(OTCModule): + argument_spec = dict( + scaling_group=dict(type='str', required=True), + scaling_instances=dict(type='list', elements='str', required=True), + instance_delete=dict(type='bool', default=False), + action=dict(type='str', + choices=['add', 'remove', 'protect', 'unprotect']), + state=dict(type='str', + choices=['present', 'absent'], default='present'), + wait=dict(type='bool', default=True), + timeout=dict(type='int', default=200) + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def _system_state_change(self, instances, as_instances, state, action): + if state == 'present': + if action is None: + return False + elif action == 'remove': + return False + elif action == 'add': + if instances: + return True + else: + return False + else: + if instances: + return True + else: + return False + else: + if instances: + if action is None: + if len(as_instances) == 1: + if len(instances) == 1: + return True + else: + return False + else: + return False + elif action == 'remove': + return True + else: + return False + + def _is_group_in_inservice_state(self, group): + return True if group.status.lower() == 'inservice' else False + + def _is_instance_in_inservice_state(self, instance): + inst_inservice_state = instance.lifecycle_state.lower() == 'inservice' + return True if inst_inservice_state else False + + def _max_number_of_instances_for_adding(self, group): + return group.max_instance_number - group.current_instance_number + + def _max_number_of_instances_for_removing(self, group): + return group.current_instance_number - group.min_instance_number + + def _max_number_of_instances_for_protecting(self, group): + return group.current_instance_number + + def _slice_list(self, init_list, part_size): + return [init_list[i:i + part_size] + for i in range(0, len(init_list), part_size)] + + def _join_lists(self, init_list): + result = [] + for element in init_list: + result.extend(element) + return result + + def _get_instances_id_for_adding(self, group, as_instances): + instances = [] + max_instances = self._max_number_of_instances_for_adding(group) + for as_instance in as_instances: + instance_ecs = self.conn.compute.find_server( + name_or_id=as_instance + ) + instance_as_group = self.conn.auto_scaling.find_instance( + group=group, + name_or_id=as_instance + ) + if (instance_ecs + and instance_ecs.availability_zone in group.availability_zones + and not instance_as_group): + instances.append(instance_ecs.id) + if len(instances) <= max_instances: + instances = self._slice_list(instances, 10) + return instances + + def _get_instances_id_for_removing(self, group, as_instances): + instances = [] + max_instances = self._max_number_of_instances_for_removing(group) + for as_instance in as_instances: + instance = self.conn.auto_scaling.find_instance( + group=group, + name_or_id=as_instance + ) + if instance and self._is_instance_in_inservice_state(instance): + instances.append(instance.id) + if len(instances) <= max_instances: + instances = self._slice_list(instances, 10) + return instances + + def _get_instances_id_for_protection(self, group, as_instances): + instances = [] + max_instances = self._max_number_of_instances_for_protecting(group) + for as_instance in as_instances: + instance = self.conn.auto_scaling.find_instance( + group=group, + name_or_id=as_instance + ) + if instance and self._is_instance_in_inservice_state(instance): + instances.append(instance.id) + if len(instances) <= max_instances: + instances = self._slice_list(instances, 10) + return instances + + def _get_instances_id(self, as_group, as_instances, state, action): + instances = [] + if state == 'present': + if action == 'add': + instances = self._get_instances_id_for_adding( + group=as_group, + as_instances=as_instances + ) + elif action == 'protect' or action == 'unprotect': + instances = self._get_instances_id_for_protection( + group=as_group, + as_instances=as_instances + ) + else: + if action == 'remove' or action is None: + instances = self._get_instances_id_for_removing( + group=as_group, + as_instances=as_instances + ) + return instances + + def _batch_instances_action( + self, instances, group, timeout, action, instance_delete=False + ): + for instance_group in instances: + self.conn.auto_scaling.batch_instance_action( + group=self._wait_for_group_inservice_status( + as_group=group, + timeout=timeout + ), + instances=instance_group, + action=action, + delete_instance=instance_delete + ) + + def _delete_single_instance(self, instance, delete_instance=False): + if isinstance(instance, list): + instance = self._join_lists(instance).pop() + return self.conn.auto_scaling.remove_instance( + instance=instance, delete_instance=delete_instance + ) + + def _wait_for_group_inservice_status(self, as_group, timeout, interval=2): + return self.conn.auto_scaling.wait_for_group( + group=as_group, interval=interval, wait=timeout + ) + + def _wait_for_instances_inservice_status( + self, timeout, interval, group, instances_id + ): + inst_ids = self._join_lists(instances_id) + for count in self.sdk.utils.iterate_timeout( + timeout=timeout, + wait=interval, + message="Timeout waiting for instance to be in inservice state" + ): + all_instances = list(self.conn.auto_scaling.instances(group=group)) + instances = [instance for instance in all_instances + if instance.id in inst_ids] + for instance in instances: + self.conn.auto_scaling.wait_for_instance(instance) + return + + def _wait_for_delete_instances(self, group, instances_id, timeout, + interval=5): + inst_ids = self._join_lists(instances_id) + for count in self.sdk.utils.iterate_timeout( + timeout=timeout, + wait=interval, + message="Timeout waiting for instance to be deleted" + ): + all_instances = list( + self.conn.auto_scaling.instances(group=group) + ) + if all_instances: + instances = [instance for instance in all_instances + if instance.id in inst_ids] + if not instances: + return + else: + for instance in instances: + self.conn.auto_scaling.wait_for_delete_instance(instance) + return + + def run(self): + as_group = self.params['scaling_group'] + as_instances = self.params['scaling_instances'] + instance_delete = self.params['instance_delete'] + action = self.params['action'] + state = self.params['state'] + wait = self.params['wait'] + timeout = self.params['timeout'] + + try: + group = self.conn.auto_scaling.find_group( + name_or_id=as_group, + ignore_missing=False + ) + + except self.sdk.exceptions.ResourceNotFound: + self.fail( + changed=False, + msg='Scaling group {0} not found'.format(as_group) + ) + + max_adding = self._max_number_of_instances_for_adding(group) + max_removing = self._max_number_of_instances_for_removing(group) + max_protecting = self._max_number_of_instances_for_protecting(group) + + if as_instances: + instances_id = self._get_instances_id( + as_group=group, + as_instances=as_instances, + state=state, + action=action + ) + + if self.ansible.check_mode: + self.exit( + changed=self._system_state_change( + instances_id, as_instances, state, action + ) + ) + + if state == 'present': + + if not action: + self.exit( + changed=False, + msg='Instances not changed' + ) + elif action == 'remove': + self.fail( + changed=False, + msg='Action is incompatible with this state' + ) + elif action == 'add': + if not instances_id: + msg = 'Instances not found or not in INSERVICE ' \ + 'state or Number of instances is ' \ + 'greater than maximum. Only {0} instances can ' \ + 'be added'.format(max_adding) + self.fail( + changed=False, + msg=msg + ) + else: + self._batch_instances_action( + instances=instances_id, + group=group, + timeout=timeout, + action=action + ) + if wait: + self._wait_for_instances_inservice_status( + timeout=timeout, + interval=5, + group=group, + instances_id=instances_id + ) + self.exit( + changed=True, + msg='Action {0} was done'.format(action.upper()) + ) + else: + if not instances_id: + msg = 'Instances not found or not in INSERVICE ' \ + 'state or Number of instances is ' \ + 'greater then current. ' \ + 'Only {0} instances can be protect ' \ + 'or unprotect'.format(max_protecting) + self.fail( + changed=False, + msg=msg + ) + else: + self._batch_instances_action( + instances=instances_id, + group=group, + timeout=timeout, + action=action + ) + if wait: + self._wait_for_instances_inservice_status( + timeout=timeout, + interval=5, + group=group, + instances_id=instances_id + ) + self.exit( + changed=True, + msg='Action {0} was done'.format(action.upper()) + ) + + else: + + if not instances_id: + msg = 'Instances not found or not in INSERVICE ' \ + 'state or Number of instances is ' \ + 'less than minimum. Only {0} instances can ' \ + 'be removed'.format(max_removing) + self.fail( + changed=False, + msg=msg + ) + else: + if not action and len(instances_id[0]) == 1: + self._delete_single_instance( + instance=instances_id, + delete_instance=instance_delete + ) + if wait: + self._wait_for_delete_instances( + group=group, + instances_id=instances_id, + timeout=timeout + ) + msg = 'Instance {0} was removed'.format( + as_instances[0] + ) + self.exit( + changed=True, + msg=msg + ) + elif action == 'remove': + self._batch_instances_action( + instances=instances_id, + group=group, + timeout=timeout, + action=action, + instance_delete=instance_delete + ) + if wait: + self._wait_for_delete_instances( + timeout=timeout, + interval=15, + group=group, + instances_id=instances_id + ) + self.exit( + changed=True, + msg='Action {0} was done'.format(action.upper()) + ) + else: + self.fail( + changed=False, + msg='Action is incompatible with this state' + ) + + else: + self.fail( + changed=False, + msg='AS instances are empty' + ) + + +def main(): + module = ASInstanceModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/as_instance/tasks/main.yaml b/tests/integration/targets/as_instance/tasks/main.yaml new file mode 100644 index 00000000..07c0f089 --- /dev/null +++ b/tests/integration/targets/as_instance/tasks/main.yaml @@ -0,0 +1,508 @@ +--- +- module_defaults: + opentelekomcloud.cloud.as_instance: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.as_instance_info: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.as_group: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.as_config: + cloud: "{{ test_cloud }}" + openstack.cloud.security_group: + cloud: "{{ test_cloud }}" + openstack.cloud.os_network: + cloud: "{{ test_cloud }}" + openstack.cloud.os_subnet: + cloud: "{{ test_cloud }}" + openstack.cloud.os_router: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.floating_ip: + cloud: "{{ test_cloud }}" + openstack.cloud.server: + cloud: "{{ test_cloud }}" + openstack.cloud.os_keypair: + cloud: "{{ test_cloud }}" + block: + - name: Set random prefix + set_fact: + prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + as_instance_name: "{{ ( prefix + '_as_inst') }}" + min_instance_number: 0 + desire_instance_number: 1 + max_instance_number: 3 + as_group_name: "{{ ( prefix + '_as_group') }}" + as_config_name: "{{ ( prefix + 'as_config') }}" + network_name: "{{ ( prefix + '_network') }}" + subnet_name: "{{ ( prefix + '_subnet') }}" + router_name: "{{ ( prefix + '_router') }}" + secgroup_name: "{{ ( prefix + '_secgroup') }}" + kp_name: "{{ ( prefix + '_kp') }}" + server_name: "{{ ( prefix + '_ecs') }}" + server_flavor: "s3.medium.1" + image_name: Standard_Ubuntu_18.04_latest + volume_type: "SATA" + disk_type: "SYS" + disk_size: 4 + az1_name: "eu-de-01" + az2_name: "eu-de-03" + + - name: Create keypair + openstack.cloud.os_keypair: + name: "{{ kp_name }}" + register: kp + + - name: Create security group + openstack.cloud.security_group: + name: "{{ secgroup_name }}" + state: present + register: secgroup + + - name: Create network + openstack.cloud.os_network: + name: "{{ network_name }}" + state: present + register: network + + - name: Create subnet + openstack.cloud.os_subnet: + name: "{{ subnet_name }}" + state: present + network_name: "{{ network.network.name }}" + cidr: "192.168.110.0/24" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: subnet + + - name: Create router + openstack.cloud.os_router: + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: True + interfaces: + - net: "{{ network.network.name }}" + subnet: "{{ subnet.subnet.name }}" + register: router + + - name: Create AS Config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ as_config_name }}" + key_name: "{{ kp_name }}" + image: "{{ image_name }}" + flavor: "{{ server_flavor }}" + disk: + - size: "{{ disk_size }}" + volume_type: "{{ volume_type }}" + disk_type: "{{ disk_type }}" + public_ip: + eip: + ip_type: "5_bgp" + bandwidth: + size: 10 + share_type: "PER" + charging_mode: "traffic" + security_groups: [{'id': "{{ secgroup.secgroup.id }}"}] + register: as_config + + - name: Create AS group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + scaling_configuration: "{{ as_config_name }}" + min_instance_number: "{{ min_instance_number }}" + desire_instance_number: "{{ desire_instance_number }}" + max_instance_number: "{{ max_instance_number }}" + availability_zones: ["{{ az2_name }}"] + networks: [{'id': "{{ network.network.id }}"}] + security_groups: [{'id': "{{ secgroup.secgroup.id }}"}] + router: "{{ router.router.id }}" + delete_publicip: True + delete_volume: True + action: "resume" + state: "present" + wait: yes + timeout: 400 + register: as_group + + - name: Create ECS1 instance + openstack.cloud.server: + name: "{{ (as_instance_name + '_1') }}" + image: "{{ image_name }}" + network: "{{ network_name }}" + flavor: "{{ server_flavor }}" + availability_zone: "{{ az2_name }}" + delete_fip: True + register: ecs1 + + - name: Create ECS2 instance + openstack.cloud.server: + name: "{{ (as_instance_name + '_2') }}" + image: "{{ image_name }}" + network: "{{ network_name }}" + flavor: "{{ server_flavor }}" + availability_zone: "{{ az2_name }}" + delete_fip: True + register: ecs2 + + - name: Get list of AS Instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get init list of ID of instances + set_fact: + init_id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get init list of Names of instances + set_fact: + init_name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - init_id_list|length == desire_instance_number + - init_name_list|length == desire_instance_number + + - name: Add AS instances - check mode + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: ["{{ ecs1.server.id }}", "{{ ecs2.server.id }}"] + action: "add" + state: present + check_mode: yes + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Get list of AS Instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get init list of ID of instances + set_fact: + init_id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get init list of Names of instances + set_fact: + init_name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - init_id_list|length == desire_instance_number + - init_name_list|length == desire_instance_number + + - name: Get list of AS Instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get init list of ID of instances + set_fact: + id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get init list of Names of instances + set_fact: + name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - id_list|length == init_id_list|length + - name_list|length == init_name_list|length + + - name: Add AS instances + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.id }}" + scaling_instances: ["{{ ecs1.server.id }}", "{{ ecs2.server.id }}"] + action: "add" + state: present + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Get list of AS Instances after adding new instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get list of ID of instances + set_fact: + id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get list of Names of instances + set_fact: + name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - id_list|length == init_id_list|length + 2 + - name_list|length == init_name_list|length + 2 + + - name: Protect AS instances - check mode + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.id }}" + scaling_instances: "{{ id_list }}" + action: "protect" + state: present + check_mode: yes + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Protect AS instances + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: "{{ name_list }}" + action: "protect" + state: present + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Unprotect AS instances - check mode + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.id }}" + scaling_instances: "{{ name_list }}" + action: "unprotect" + state: present + check_mode: yes + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Unprotect AS instances + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: "{{ name_list }}" + action: "unprotect" + state: present + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Remove single AS instance - check mode + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.id }}" + scaling_instances: ["{{ id_list[0] }}"] + instance_delete: yes + state: absent + check_mode: yes + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Get list of AS Instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get list of ID of instances + set_fact: + id_list_2: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get list of Names of instances + set_fact: + name_list_2: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - id_list_2|length == id_list|length + - name_list_2|length == name_list|length + + - name: Remove single AS instance + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: ["{{ name_list[0] }}"] + instance_delete: yes + state: absent + wait: true + timeout: 360 + register: as_instances + + - name: assert result + assert: + that: + - as_instances is success + - as_instances is changed + + - name: Get list of AS Instances after removing single instance + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get list of ID of instances + set_fact: + id_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get list of Names of instances + set_fact: + name_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - id_list_after_remove|length == id_list|length - 1 + - name_list_after_remove|length == name_list|length - 1 + + - name: Remove group of AS instances - check mode + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: "{{ name_list_after_remove }}" + instance_delete: yes + action: "remove" + state: absent + check_mode: yes + register: result + + - name: assert result + assert: + that: + - result is success + - result is changed + + - name: Get list of AS Instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list + + - name: Get list of ID of instances + set_fact: + id_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" + + - name: Get list of Names of instances + set_fact: + name_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" + + - name: assert result + assert: + that: + - id_list_after_remove|length == id_list|length - 1 + - name_list_after_remove|length == name_list|length - 1 + + - name: Remove group of AS instances + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group.as_group.name }}" + scaling_instances: "{{ name_list }}" + instance_delete: yes + action: "remove" + state: absent + wait: yes + timeout: 360 + register: result + + - name: assert result + assert: + that: + - result is success + - result is changed + + - name: Get list of AS instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: result + + - name: assert result + assert: + that: + - result.scaling_instances|length == {{ min_instance_number }} + + always: + - block: + # Cleanup + - name: Delete ECS1 + openstack.cloud.server: + name: "{{ (as_instance_name + '_1') }}" + state: absent + + - name: Delete ECS2 + openstack.cloud.server: + name: "{{ (as_instance_name + '_2') }}" + state: absent + + - name: Delete as group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + state: absent + force_delete: yes + wait: yes + timeout: 360 + register: dropped_as_group + + - name: assert result + assert: + that: + - dropped_as_group is success + - dropped_as_group is changed + + - name: Delete as config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ as_config_name }}" + state: absent + register: dropped_as_config + + - name: assert result + assert: + that: + - dropped_as_config is success + - dropped_as_config is changed + + - name: Delete existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent + + - name: Delete existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent + + - name: Delete existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent + + - name: Delete security group + openstack.cloud.security_group: + name: "{{ secgroup_name }}" + state: absent + + - name: Delete keypair + openstack.cloud.os_keypair: + name: "{{ kp_name }}" + state: absent + ignore_errors: true diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index b7067f45..c66461e1 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -2,6 +2,7 @@ plugins/modules/as_config.py validate-modules:missing-gplv3-license plugins/modules/as_config_info.py validate-modules:missing-gplv3-license plugins/modules/as_group.py validate-modules:missing-gplv3-license plugins/modules/as_group_info.py validate-modules:missing-gplv3-license +plugins/modules/as_instance.py validate-modules:missing-gplv3-license plugins/modules/as_instance_info.py validate-modules:missing-gplv3-license plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 3abdde05..669b8620 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -2,6 +2,7 @@ plugins/modules/as_config.py validate-modules:missing-gplv3-license plugins/modules/as_config_info.py validate-modules:missing-gplv3-license plugins/modules/as_group.py validate-modules:missing-gplv3-license plugins/modules/as_group_info.py validate-modules:missing-gplv3-license +plugins/modules/as_instance.py validate-modules:missing-gplv-license plugins/modules/as_instance_info.py validate-modules:missing-gplv3-license plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license From 87cd94dc3c206b06f7d8e71bf09617a71a135e25 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Mon, 30 Aug 2021 11:38:48 +0300 Subject: [PATCH 15/65] DDS Instance info module (#131) DDS Instance info module Docs Code Integration tests Ignore files Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Rodion Gyrbu Reviewed-by: None --- meta/runtime.yml | 1 + plugins/modules/dds_instance_info.py | 314 ++++++++++++++++++ .../targets/dds_instance_info/tasks/main.yaml | 15 + tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 332 insertions(+) create mode 100644 plugins/modules/dds_instance_info.py create mode 100644 tests/integration/targets/dds_instance_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 19e7d0c3..695c7f8e 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -24,6 +24,7 @@ action_groups: - deh_host_type_info - deh_server_info - dds_flavor_info + - dds_instance_info - floating_ip - loadbalancer - loadbalancer_info diff --git a/plugins/modules/dds_instance_info.py b/plugins/modules/dds_instance_info.py new file mode 100644 index 00000000..c7ca29db --- /dev/null +++ b/plugins/modules/dds_instance_info.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions, +# limitations under the License. + +DOCUMENTATION = ''' +module: dds_instance_info +short_description: Obtain information about a specified DB instance. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Get info about instances. +options: + instance: + description: + - Specifies the DB instance ID or name. + type: str + mode: + description: + - Specifies the instance type. + choices: [sharding, replicaset] + type: str + datastore_type: + description: + - Specifies the database type. The value is DDS-Community. + type: str + default: 'DDS-Community' + vpc_id: + description: + - Specifies the VPC ID. + type: str + subnet_id: + description: + - Specifies the network ID of the subnet. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +instance: + description: + - Info about a specified DB instance. + - If ID or name is not specified, info about all instances inside one project. + returned: On Success + type: complex + contains: + actions: + description: Indicates the operation that is executed on the DB instance. + type: list + sample: "CREATE" + availability_zone: + description: Indicates the AZ. + type: str + sample: null + backup_strategy: + description: Indicates the backup policy. + type: complex + contains: + keep_days: + description: Indicates the number of days to retain the generated backup files. + type: int + sample: 7 + start_time: + description: + - Indicates the backup time window. + - Automated backups will be triggered during the backup time window. + - The current time is the UTC time. + type: str + sample: "22:00-23:00" + created: + description: Indicates the DB instance creation time. + type: str + sample: "2021-08-12T13:58:08" + datastore: + description: Specifies the domain name associated with the server certificate. + type: complex + contains: + storage_engine: + description: Storage engine. + type: str + sample: null + type: + description: Indicates the DB engine. + type: str + sample: "DDS-Community" + version: + description: Indicates the database version. + type: str + sample: "3.4" + datastore_type: + description: Specifies the database type. + type: str + sample: null + disk_encryption_id: + description: Indicates the disk encryption key ID. + type: str + sample: null + engine: + description: Indicates the storage engine. + type: str + sample: "wiredTiger" + flavor: + description: Indicates the DB instance flavor. + type: str + sample: null + groups: + description: Indicates group information. + type: complex + contains: + id: + description: + - Indicates the group ID. + - This parameter is valid only when the node type is shard or config. + type: str + sample: null + name: + description: Indicates the group name. + type: str + sample: null + nodes: + description: Indicates the AZ. + type: complex + contains: + availability_zone: + description: Indicates the AZ. + type: str + sample: null + id: + description: Indicates the node ID. + type: str + sample: "254c36d7e72a40d0b1667983a8a2fc09no02" + name: + description: Indicates the node name. + type: str + sample: "test_dds_replica_node_3" + private_ip: + description: + - Indicates the private IP address of a node. + - Valid only for mongos and replica set instances. + - The value exists only after ECSs are created successfully. + type: str + sample: "192.168.115.80" + public_ip: + description: + - Indicates the EIP that has been bound. + - Valid only for mongos nodes of cluster instances. + type: str + sample: "" + role: + description: Indicates the node role. + type: str + sample: "Primary" + spec_code: + description: Indicates the resource specifications code. + type: str + sample: "dds.mongodb.s2.medium.4.repset" + status: + description: Indicates the node status. + type: str + sample: "normal" + status: + description: + - Indicates the group status. + - This parameter is valid only when the node type is shard or config. + type: str + sample: null + type: + description: Indicates the node type. + type: str + sample: null + volume: + description: Indicates the volume information. + type: complex + contains: + size: + description: Indicates the disk size. Unit GB + type: str + sample: "10" + used: + description: Indicates the disk usage. Unit GB + type: str + sample: "0.333129882812" + id: + description: Indicates the DB instance ID. + type: str + sample: "7ddf3c02aea54610bb6ba324e653484din02" + maintenance_window: + description: Indicates the maintenance time window. + type: str + sample: "02:00-06:00" + mode: + description: Indicates the instance type, which is the same as the request parameter. + type: str + sample: "CREATE" + name: + description: Indicates the operation that is executed on the DB instance. + type: str + sample: "ReplicaSet" + pay_mode: + description: Indicates the billing mode. 0 indicates the pay-per-use billing mode. + type: str + sample: "0" + port: + description: Indicates the database port number. The port range is 2100 to 9500. + type: int + sample: 8635 + region: + description: Indicates the region where the DB instance is deployed. + type: str + sample: "eu-de" + security_group_id: + description: Indicates the security group ID. + type: str + sample: "120888d9-65be-4899-b07d-aa151c2895d4" + ssl: + description: Indicates that SSL is enabled or not. + type: bool + sample: 0 + status: + description: Indicates the DB instance status. + type: str + sample: "normal" + subnet_id: + description: Indicates the subnet ID. + type: str + sample: "c2fdde80-9a24-4a84-99fe-d07e942274b1" + time_zone: + description: Indicates the time zone. + type: str + sample: "" + updated: + description: Indicates the time when a DB instance is updated. + type: str + sample: "2021-08-12T13:58:03" + vpc_id: + description: Indicates the VPC ID. + type: str + sample: "199dcd34-9c6f-49d5-b12a-5fa96351acf1" +''' + +EXAMPLES = ''' +# Get info about instances +- opentelekomcloud.cloud.dds_instance_info: + register: result +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DDSInstanceInfo(OTCModule): + argument_spec = dict( + instance=dict(), + mode=dict(choices=['sharding', 'replicaset']), + datastore_type=dict(default='DDS-Community'), + vpc_id=dict(), + subnet_id=dict() + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + + data = [] + query = {} + + instance = self.params['instance'] + mode = self.params['mode'] + datastore_type = self.params['datastore_type'] + vpc_id = self.params['vpc_id'] + subnet_id = self.params['subnet_id'] + + if instance: + db_instance = self.conn.dds.find_instance(name_or_id=instance) + if db_instance: + query['id'] = db_instance.id + query['name'] = db_instance.name + if mode: + query['mode'] = mode + + if datastore_type: + query['datastore_type'] = datastore_type + + if vpc_id: + vpc = self.conn.network.find_router(name_or_id=vpc_id) + if vpc: + query['vpc_id'] = vpc.id + + if subnet_id: + subnet = self.conn.network.find_subnet(name_or_id=subnet_id) + if subnet: + query['subnet_id'] = subnet.id + + for raw in self.conn.dds.instances(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + instances=data + ) + + +def main(): + module = DDSInstanceInfo() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dds_instance_info/tasks/main.yaml b/tests/integration/targets/dds_instance_info/tasks/main.yaml new file mode 100644 index 00000000..dcc7c25f --- /dev/null +++ b/tests/integration/targets/dds_instance_info/tasks/main.yaml @@ -0,0 +1,15 @@ +--- +- module_defaults: + opentelekomcloud.cloud.dds_instance_info: + cloud: "{{ test_cloud }}" + block: + - name: Get info about instances + opentelekomcloud.cloud.dds_instance_info: + register: result + + - name: assert result + assert: + that: + - result is success + - result is not changed + - result.instances is defined diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index c66461e1..ffed71ed 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -39,6 +39,7 @@ plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_instance_info.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license plugins/modules/nat_gateway.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 669b8620..f82b3646 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -39,6 +39,7 @@ plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_instance_info.py validate-modules:missing-gplv3-license plugins/modules/floating_ip.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license From ad5c4bd60abd5b51d039fd70ebe9269d6609594e Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Thu, 23 Sep 2021 13:44:56 +0300 Subject: [PATCH 16/65] DDS instance module (#132) DDS instance module Docs First version of working code Reviewed-by: Anton Sidelnikov Reviewed-by: None --- meta/runtime.yml | 1 + plugins/modules/dds_instance.py | 278 ++++++++++++++++++ .../integration/targets/dds_instance/aliases | 1 + .../targets/dds_instance/tasks/main.yaml | 113 +++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 6 files changed, 395 insertions(+) create mode 100644 plugins/modules/dds_instance.py create mode 100644 tests/integration/targets/dds_instance/aliases create mode 100644 tests/integration/targets/dds_instance/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 695c7f8e..c9b5f286 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -25,6 +25,7 @@ action_groups: - deh_server_info - dds_flavor_info - dds_instance_info + - dds_instance - floating_ip - loadbalancer - loadbalancer_info diff --git a/plugins/modules/dds_instance.py b/plugins/modules/dds_instance.py new file mode 100644 index 00000000..de10678e --- /dev/null +++ b/plugins/modules/dds_instance.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: dds_instance +short_description: Manage DDS instance +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.0.2" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Manage DDS instances. +options: + name: + description: + - Specifies the DB instance name. + - The DB instance name of the same DB engine is unique for the same tenant. + - The value must be 4 to 64 characters in length and start with a letter. + - It is case-sensitive and can contain only letters, digits, hyphens, and underscores. + type: str + datastore_version: + description: Specifies the database version. + choices: [3.2, 3.4] + type: str + default: '3.4' + region: + description: + - Specifies the region ID. + - The value cannot be empty. + type: str + availability_zone: + description: + - Specifies the AZ ID. + - The value cannot be empty. + type: str + router: + description: + - Specifies the VPC ID. The value cannot be empty. + - The string length and whether the string complying with UUID regex rules are verified. + type: str + network: + description: Specifies the subnet ID. + type: str + security_group: + description: Specifies the ID of the security group where a specified DB instance belongs to. + type: str + state: + choices: [present, absent] + default: present + description: Instance state + type: str + password: + description: + - Specifies the database password. The value must be 8 to 32 characters in length, + - contain uppercase and lowercase letters, digits and special characters. + type: str + disk_encryption: + description: + - Specifies the key ID used for disk encryption. + - The string must comply with UUID regular expression rules. + - If this parameter is not transferred, disk encryption is not performed. + type: str + mode: + description: + - Specifies the instance type. Cluster, replica set instances are supported. + choices: [Sharding, ReplicaSet] + type: str + flavors: + description: + - Specifies the instance specifications. + type: list + elements: dict + suboptions: + type: + description: + - Specifies the node type. For a replica set instance, the value is replica. + - For a cluster instance, the value can be mongos, shard, or config + choices: [mongos, shard, config, replica] + type: str + num: + description: Specifies node quantity. + type: int + storage: + description: + - Specifies the disk type. This parameter is optional for all nodes except mongos. + - This parameter is invalid for the mongos nodes. + type: str + default: 'ULTRAHIGH' + spec_code: + description: Specifies the resource specification code. + type: str + size: + description: + - Specifies the disk size. This parameter is mandatory for all nodes except mongos. + - This parameter is invalid for the mongos nodes. + - For a cluster instance, the storage space of a shard node can be 10 to 1000 GB, + - and the config storage space is 20 GB. This parameter is invalid for mongos nodes. + - Therefore, you do not need to specify the storage space for mongos nodes. + - For a replica set instance, the value ranges from 10 to 2000. + type: int + backup_timeframe: + description: + - Specifies the backup time window. + - Automated backups will be triggered during the backup time window. Value cannot be empty. + type: str + backup_keepdays: + description: + - Specifies the number of days to retain the generated backup files. + - The value range is from 0 to 732. + type: int + ssl_option: + description: + - Specifies whether to enable SSL. The value 0 indicates that SSL is disabled, 1 - enabled. + - If this parameter is not transferred, SSL is enabled by default. + type: int + + +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +dds_instance: + description: List of dictionaries describing DDS Instance. + type: complex + returned: On Success. + contains: + id: + description: Unique UUID. + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + name: + description: Name (version) of the instance. + type: str + sample: "test" +''' + +EXAMPLES = ''' +- name: Create DDS Instance + opentelekomcloud.cloud.dds_instance: + name: "{{ instance_name }}" + datastore_version: "3.4" + region: "eu-de" + availability_zone: "eu-de-01" + router: "{{ router_name }}" + mode: "ReplicaSet" + network: "{{ network_name }}" + security_group: "default" + password: "Test@123" + flavors: + - type: "replica" + num: 1 + storage: "ULTRAHIGH" + size: 10 + spec_code: "dds.mongodb.s2.medium.4.repset" + backup_timeframe: "00:00-01:00" + backup_keepdays: 7 + ssl_option: 1 + state: present +''' + + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DdsInstanceModule(OTCModule): + argument_spec = dict( + name=dict(type='str'), + datastore_version=dict(type='str', choices=['3.2', '3.4'], default='3.4'), + region=dict(type='str'), + availability_zone=dict(type='str'), + router=dict(type='str'), + network=dict(type='str'), + security_group=dict(type='str'), + password=dict(type='str', no_log=True), + disk_encryption=dict(type='str'), + mode=dict(type='str', choices=['Sharding', 'ReplicaSet']), + flavors=dict(type='list', elements='dict'), + backup_timeframe=dict(type='str'), + backup_keepdays=dict(type='int'), + ssl_option=dict(type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + required_if=[ + ('backup_keepdays', not None, ['backup_timeframe']), + ('backup_timeframe', not None, ['backup_keepdays']), + ], + supports_check_mode=True + ) + + otce_min_version = '0.11.0' + + def _system_state_change(self, obj): + state = self.params['state'] + if state == 'present': + if not obj: + return True + elif state == 'absent' and obj: + return True + return False + + def run(self): + attrs = {} + if self.params['disk_encryption']: + attrs['disk_encryption_id'] = self.params.pop('disk_encryption') + if self.params['name']: + attrs['name'] = self.params['name'] + if self.params['datastore_version']: + attrs['datastore_version'] = self.params['datastore_version'] + if self.params['region']: + attrs['region'] = self.params['region'] + if self.params['availability_zone']: + attrs['availability_zone'] = self.params['availability_zone'] + if self.params['router']: + attrs['router'] = self.params['router'] + if self.params['mode']: + attrs['mode'] = self.params['mode'] + if self.params['network']: + attrs['network'] = self.params['network'] + if self.params['security_group']: + attrs['security_group'] = self.params['security_group'] + if self.params['password']: + attrs['password'] = self.params['password'] + if self.params['backup_timeframe']: + attrs['backup_timeframe'] = self.params['backup_timeframe'] + if self.params['backup_keepdays']: + attrs['backup_keepdays'] = self.params['backup_keepdays'] + if self.params['ssl_option']: + attrs['ssl_option'] = self.params['ssl_option'] + if self.params['flavors']: + attrs['flavors'] = self.params['flavors'] + + changed = False + + instance = self.conn.dds.find_instance( + name_or_id=attrs['name']) + + if self.ansible.check_mode: + self.exit(changed=self._system_state_change(instance)) + + if self.params['state'] == 'absent': + changed = False + + if instance: + attrs = { + 'instance': instance.id + } + if self.params['wait']: + attrs['wait'] = True + + self.conn.delete_dds_instance(**attrs) + changed = True + + elif self.params['state'] == 'present': + if not instance: + instance = self.conn.create_dds_instance(**attrs) + self.exit(changed=True, instance=instance.to_dict()) + + self.exit(changed=changed) + + +def main(): + module = DdsInstanceModule() + module() + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/dds_instance/aliases b/tests/integration/targets/dds_instance/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/dds_instance/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/dds_instance/tasks/main.yaml b/tests/integration/targets/dds_instance/tasks/main.yaml new file mode 100644 index 00000000..8b8864a7 --- /dev/null +++ b/tests/integration/targets/dds_instance/tasks/main.yaml @@ -0,0 +1,113 @@ +--- +- module_defaults: + opentelekomcloud.cloud.dds_instance: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + network_name: "{{ ( prefix + 'dds_test-network') }}" + subnet_name: "{{ ( prefix + 'dds_test-subnet') }}" + router_name: "{{ ( prefix + 'dds_test-router') }}" + instance_name: "{{ ( 'z-' + prefix + 'dds_test-instance') }}" + dds_flavor: "dds.mongodb.s2.medium.4.repset" + + - name: Delete missing instance + dds_instance: + state: absent + name: "definitely_missing_instance" + register: dds + + - name: assert result + assert: + that: + - dds is success + - dds is not changed + + - name: Create network for test + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: present + register: test_network + + - name: Create subnet for test + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: present + network_name: "{{ test_network.network.name }}" + cidr: "192.168.0.0/24" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: test_subnet + + - name: Create router for test + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: True + interfaces: + - net: "{{ test_network.network.name }}" + subnet: "{{ test_subnet.subnet.name }}" + register: test_router + + - name: Create DDS Instance + opentelekomcloud.cloud.dds_instance: + name: "{{ instance_name }}" + datastore_version: "3.4" + region: "eu-de" + availability_zone: "eu-de-01" + router: "{{ test_router }}" + mode: "replicaset" + network: "{{ test_network }}" + security_group: "default" + password: "Test@123" + flavors: + - flavor_type: "replica" + flavor_num: 2 + flavor_storage: "ULTRAHIGH" + flavor_size: 10 + code: "{{ dds_flavor }}" + backup_timeframe: "00:00-01:00" + backup_keepdays: 7 + ssl_option: 1 + state: present + + - name: assert result + assert: + that: + - obj is success + - obj is changed + + always: + - block: + # Cleanup + - name: Drop instance + opentelekomcloud.cloud.dds_instance: + name: "{{ instance_name }}" + state: "absent" + + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent + + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent + + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + ignore_errors: yes diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index ffed71ed..47204e76 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -40,6 +40,7 @@ plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/dds_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_instance.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer_info.py validate-modules:missing-gplv3-license plugins/modules/nat_gateway.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f82b3646..fe5ea08f 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -39,6 +39,7 @@ plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license plugins/modules/deh_server_info.py validate-modules:missing-gplv3-license plugins/modules/dds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/dds_flavor_info.py validate-modules:missing-gplv3-license +plugins/modules/dds_instance.py validate-modules:missing-gplv3-license plugins/modules/dds_instance_info.py validate-modules:missing-gplv3-license plugins/modules/floating_ip.py validate-modules:missing-gplv3-license plugins/modules/loadbalancer.py validate-modules:missing-gplv3-license From 60b90d5a96af04a5abcd55636e4397fb2cb438bc Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Fri, 1 Oct 2021 16:20:58 +0300 Subject: [PATCH 17/65] Dns recordset info module (#112) Dns recordset info module Dns recordset info module Reviewed-by: Polina Gubina Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Artem Goncharov Reviewed-by: None Reviewed-by: Irina Pereiaslavskaia --- meta/runtime.yml | 1 + plugins/modules/dns_recordset_info.py | 165 ++++++++++++++++++ .../dns_recordset_info/tasks/main.yaml | 76 ++++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 244 insertions(+) create mode 100644 plugins/modules/dns_recordset_info.py create mode 100644 tests/integration/targets/dns_recordset_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index c9b5f286..3bd07a06 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -23,6 +23,7 @@ action_groups: - deh_host_info - deh_host_type_info - deh_server_info + - dns_recordset_info - dds_flavor_info - dds_instance_info - dds_instance diff --git a/plugins/modules/dns_recordset_info.py b/plugins/modules/dns_recordset_info.py new file mode 100644 index 00000000..f39ac79a --- /dev/null +++ b/plugins/modules/dns_recordset_info.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dns_recordset_info +short_description: Get info about DNS recordsets. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.8.1" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Get DNS recordset info from the OTC. +options: + zone: + description: + - ID or name of the required zone. If name had been provided, only public zone could be\ + returned. If private zone is required, only ID should be passed. + type: str + name: + description: + - ID or name of the existing record set. if zone is set we try to search recordsets in this\ + zone, otherwise we list all recordsets and filter them by name. + type: str + tags: + description: + - Resource tag. + type: str + status: + description: + - Status of the recordsets to be queried. + choices: [active, error, disable, freeze, pending_create, pending_update, pending_delete] + type: str + type: + description: + - Type of the recordsets to be queried. + choices: [a, aaaa, mx, cname, txt, ns, srv, caa, ptr] + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +recordset: + description: List of dictionaries describing recordset and its metadata. + type: complex + returned: On Success. + contains: + created_at: + description: Timestamp when recordset had been created + type: str + sample: "2021-05-24T15:27:19.335" + description: + description: Description of the recordset. + type: str + sample: "null" + id: + description: IDs of record sets to be queried. + type: str + sample: "ff80808275f5fb9c01799efcd1307062" + is_default: + description: Whether the record set is created by default. + type: bool + sample: false + name: + description: Name of the recordset. + type: str + sample: "recordset.test.zone." + project_id: + description: Project ID of the record set. + type: str + sample: "5dd3c0b24cdc4d31952c49589182a89d" + records: + description: Record set value. + type: list + sample: ["2.2.2.2", "1.1.1.1"] +''' + +EXAMPLES = ''' +#Get info about choosen DNS recordset. +- opentelekomcloud.cloud.dns_recordset_info: + zone: "test.zone." + register: recordsets +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DNSRecordsetInfoModule(OTCModule): + + argument_spec = dict( + zone=dict(required=False), + name=dict(required=False), + tags=dict(required=False), + status=dict(required=False, choices=['active', 'error', 'disable', 'freeze', + 'pending_create', 'pending_update', 'pending_delete']), + type=dict(required=False, choices=['a', 'aaaa', 'mx', 'cname', 'txt', 'ns', 'srv', 'caa', + 'ptr']) + ) + module_kwargs = dict( + supports_check_mode=True, + required_if=[('name', not None, ['zone'])] + ) + + def run(self): + + data = [] + query = {} + recordset = None + + if self.params['zone']: + try: + query['zone'] = self.conn.dns.find_zone( + name_or_id=self.params['zone'], ignore_missing=False).id + except self.sdk.exceptions.ResourceNotFound: + self.fail_json(msg="Zone not found") + if self.params['name']: + try: + recordset = self.conn.dns.find_recordset( + zone=query['zone'], name_or_id=self.params['name'], + ignore_missing=False) + dt = recordset.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + recordset=data + ) + except self.sdk.exceptions.ResourceNotFound: + self.fail_json(msg="Recordset not found") + if self.params['name']: + query['name'] = self.params['name'] + if self.params['tags']: + query['tags'] = self.params['tags'] + if self.params['status']: + query['status'] = self.params['status'].upper() + if self.params['type']: + query['type'] = self.params['type'].upper() + + for raw in self.conn.dns.recordsets(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + recordset=data + ) + + +def main(): + module = DNSRecordsetInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dns_recordset_info/tasks/main.yaml b/tests/integration/targets/dns_recordset_info/tasks/main.yaml new file mode 100644 index 00000000..e4d2c81e --- /dev/null +++ b/tests/integration/targets/dns_recordset_info/tasks/main.yaml @@ -0,0 +1,76 @@ +--- +- module_defaults: + opentelekomcloud.cloud.dns_recordset: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.dns_recordset_info: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.dns_zone: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + zone_name: "{{ ( prefix + 'test.zone') }}" + recordset_random_name: "{{ ( prefix + 'recordset.' + prefix + 'test.zone') }}" + + - name: Creating a public DNS Zone + opentelekomcloud.cloud.dns_zone: + name: "{{ zone_name }}" + state: present + register: dns_zo + + - name: Creating a DNS Recordset + opentelekomcloud.cloud.dns_recordset: + zone_id: "{{ dns_zo.zone.id }}" + recordset_name: "{{ recordset_random_name }}" + type: A + records: + - "1.1.1.1" + - "2.2.2.2" + state: present + register: dns_rs + + - name: Getting info about recordset in created zone + opentelekomcloud.cloud.dns_recordset_info: + zone: "{{ dns_zo.zone.id }}" + register: recordsets + + - name: assert result + assert: + that: + - recordsets is success + - recordsets is not changed + - recordsets | length > 0 + + - name: Get info about created recordset + opentelekomcloud.cloud.dns_recordset_info: + zone: "{{ dns_zo.zone.id }}" + name: "{{ dns_rs.recordset.name }}" + register: rs + + - name: assert result + assert: + that: + - rs is success + - rs is not changed + - rs | length > 0 + + + always: + - block: + # Cleanup + - name: Drop created recordset + opentelekomcloud.cloud.dns_recordset: + recordset_name: "{{ recordset_random_name }}" + state: absent + zone_id: "{{ dns_zo.zone.id }}" + + - name: Drop created DNS zone + opentelekomcloud.cloud.dns_zone: + name: "{{ dns_zo.zone.name }}" + state: absent + ignore_errors: yes diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 47204e76..0118bc7d 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -33,6 +33,7 @@ plugins/modules/dms_queue_group_info.py validate-modules:missing-gplv3-license plugins/modules/dns_floating_ip.py validate-modules:missing-gplv3-license plugins/modules/dns_recordset.py validate-modules:missing-gplv3-license plugins/modules/dns_zone.py validate-modules:missing-gplv3-license +plugins/modules/dns_recordset_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host.py validate-modules:missing-gplv3-license plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license plugins/modules/deh_host_type_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index fe5ea08f..47d95b1a 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -32,6 +32,7 @@ plugins/modules/dms_queue_info.py validate-modules:missing-gplv3-license plugins/modules/dms_queue_group_info.py validate-modules:missing-gplv3-license plugins/modules/dns_floating_ip.py validate-modules:missing-gplv3-license plugins/modules/dns_recordset.py validate-modules:missing-gplv3-license +plugins/modules/dns_recordset_info.py validate-modules:missing-gplv3-license plugins/modules/dns_zone.py validate-modules:missing-gplv3-license plugins/modules/deh_host.py validate-modules:missing-gplv3-license plugins/modules/deh_host_info.py validate-modules:missing-gplv3-license From d94a332a63bc184092c94f5fc5ce4d59318f6c28 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Wed, 27 Oct 2021 21:14:02 +0400 Subject: [PATCH 18/65] fixed wait false is still waiting (#141) fixed wait:false is still waiting Fixes #139 Reviewed-by: Anton Sidelnikov Reviewed-by: Irina Pereiaslavskaia Reviewed-by: None --- plugins/modules/rds_instance.py | 5 ++--- tests/unit/modules/rds_instance/test_rds_instance.py | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index e6a96d7b..ed6d6a8e 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -228,10 +228,9 @@ def run(self): if instance: attrs = { - 'instance': instance.id + 'instance': instance.id, + 'wait': self.params['wait'] } - if self.params['wait']: - attrs['wait'] = True self.conn.delete_rds_instance(**attrs) changed = True diff --git a/tests/unit/modules/rds_instance/test_rds_instance.py b/tests/unit/modules/rds_instance/test_rds_instance.py index 66654495..de403792 100644 --- a/tests/unit/modules/rds_instance/test_rds_instance.py +++ b/tests/unit/modules/rds_instance/test_rds_instance.py @@ -162,7 +162,9 @@ def test_ensure_deleted(self): self.conn.delete_rds_instance.return_value = None self.module().run() self.conn.delete_rds_instance.assert_called_with( - instance=self.conn.instance.id) + instance=self.conn.instance.id, + wait=False + ) self.assertTrue(result.exception.args[0]['changed']) def test_ensure_not_deleted(self): From af665a3e76e82b9248342a26e35e71f033570d08 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Thu, 28 Oct 2021 11:47:59 +0300 Subject: [PATCH 19/65] CSS cluster info (#140) CSS cluster info 1st version of the code some docs Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Vladimir Vshivkov Reviewed-by: Rodion Gyrbu --- meta/runtime.yml | 1 + plugins/modules/css_cluster_info.py | 232 ++++++++++++++++++ .../targets/css_cluster_info/tasks/main.yaml | 15 ++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 250 insertions(+) create mode 100644 plugins/modules/css_cluster_info.py create mode 100644 tests/integration/targets/css_cluster_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 3bd07a06..dad3fee4 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -19,6 +19,7 @@ action_groups: - cce_cluster_node - cce_node_pool_info - cce_node_pool + - css_cluster_info - deh_host - deh_host_info - deh_host_type_info diff --git a/plugins/modules/css_cluster_info.py b/plugins/modules/css_cluster_info.py new file mode 100644 index 00000000..60ff30c4 --- /dev/null +++ b/plugins/modules/css_cluster_info.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: css_cluster_info +short_description: Get info about CSS clusters. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Yustina Kvrivishvili (@YustinaKvr)" +description: + - Get CSS cluster info from the OTC. +options: + name: + description: + - name or ID of the cluster to be queried. + type: str + start: + description: + - Start value of the query. + - The default value is 1, indicating that the query starts from the first cluster. + type: int + default: 1 + limit: + description: + - Number of clusters to be queried. + - The default value is 10, indicating that 10 clusters are queried at a time. + type: int +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +cluster: + description: + - Info about specified CSS cluster. + returned: On Success + type: complex + contains: + actions: + description: Indicates the operation that is executed on the cluster. + type: list + sample: "REBOOTING" + cmk_id: + description: Key ID used for disk encryption. + type: str + sample: "null" + created_at: + description: Time when a cluster is created. The format is ISO8601. + type: str + sample: "2021-10-05T15:55:06" + datastore: + description: Type of the data search engine. + type: complex + contains: + type: + description: Supported type is elasticsearch + type: str + sample: "elasticsearch" + version: + description: Engine version number. The current version is 6.2.3, 7.1.1, or 7.6.2. + type: str + sample: "7.6.2" + disk_encryption: + description: Whether disks are encrypted. + type: str + sample: "null" + endpoint: + description: Indicates the IP address and port number of the user used to access the VPC. + type: str + sample: "10.0.0.169:9200,10.0.0.191:9200,10.0.0.112:9200" + error: + description: + - Error codes. + - CSS.6000 indicates that a cluster fails to be created. + - CSS.6001 indicates that capacity expansion of a cluster fails. + - CSS.6002 indicates that a cluster fails to be restarted. + - CSS.6004 indicates that a node fails to be created in a cluster. + - CSS.6005 indicates that the service fails to be initialized. + type: str + sample: "null" + id: + description: Cluster ID. + type: str + sample: "a4edb35e-bded-4a44-ba9c-6b5d1f585f3d" + is_disk_encrypted: + description: Whether disks are encrypted. + type: bool + sample: false + is_https_enabled: + description: Communication encryption status. + type: bool + sample: false + name: + description: Cluster name. + type: str + sample: "css-test" + nodes: + description: Info about nodes included in cluster. + type: complex + contains: + azCode: + description: AZ to which a node belongs. + type: str + sample: "eu-de-01" + id: + description: Node ID. + type: str + sample: "7575d430-c918-4a80-9dba-8baa9ab49862" + name: + description: Node name. + type: str + sample: "css-test-iustina-ess-esn-3-1" + specCode: + description: Node specifications. + type: str + sample: "css.medium.8" + status: + description: + - Node status. + - 100 The operation, such as node creation, is in progress. + - 200 The instance is available. + - 303 The instance is unavailable. + type: str + sample: "200" + type: + description: Supported type is ess (indicating the Elasticsearch node) + type: str + sample: "ess" + progress: + description: Cluster operation progress, which indicates the progress of cluster creation. + type: complex + contains: + CREATING: + description: Inicates creation of cluster in percentage. + type: str + sample: "2%" + router_id: + description: Indicates the VPC ID. + type: str + sample: "7ea09482-793a-4aed-b4ce-447113d10d69" + security_group_id: + description: Security group ID. + type: str + sample: "120888d9-65be-4899-b07d-aa151c2895d4" + status: + description: + - Return value. + - 400 BadRequest. Invalid request. + - 404 NotFound. The requested resource cannot be found. + - 200 OK. The request is processed successfully. + type: str + sample: "200" + subnet_id: + description: Subnet ID. + type: str + sample: "8d9bd4e8-3c88-4991-8df3-d5e3cfd9a835" + updated_at: + description: + - Last modification time of a cluster. + - The format is ISO8601. + type: str + sample: "2021-10-13T10:35:56" +''' + +EXAMPLES = ''' +# Get info about clusters +- opentelekomcloud.cloud.css_cluster_info: + register: result +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CSSClusterInfoModule(OTCModule): + + argument_spec = dict( + name=dict(required=False), + start=dict(required=False, type='int', default=1), + limit=dict(required=False, type='int') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + otce_min_version = '0.24.1' + + def run(self): + + data = [] + query = {} + + if self.params['start']: + query['start'] = self.params['start'] + if self.params['limit']: + query['limit'] = self.params['limit'] + + if self.params['name']: + raw = self.conn.css.find_cluster( + name_or_id=self.params['name'], + ignore_missing=True) + if raw: + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + else: + for raw in self.conn.css.clusters(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + clusters=data + ) + + +def main(): + module = CSSClusterInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/css_cluster_info/tasks/main.yaml b/tests/integration/targets/css_cluster_info/tasks/main.yaml new file mode 100644 index 00000000..0e16fd64 --- /dev/null +++ b/tests/integration/targets/css_cluster_info/tasks/main.yaml @@ -0,0 +1,15 @@ +--- +- module_defaults: + opentelekomcloud.cloud.css_cluster_info: + cloud: "{{ test_cloud }}" + block: + - name: Get info about clusters. + opentelekomcloud.cloud.css_cluster_info: + register: result + + - name: assert result + assert: + that: + - result is success + - result is not changed + - result.clusters is defined diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 0118bc7d..446689ce 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -21,6 +21,7 @@ plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license +plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 47d95b1a..f17b06b8 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -21,6 +21,7 @@ plugins/modules/ces_metric_data_info.py validate-modules:missing-gplv3-license plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license +plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license From 19f1dac961295f66bf4681d61c489bdab246f96e Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Tue, 23 Nov 2021 20:14:04 +0400 Subject: [PATCH 20/65] changed linter to ansible-linter (#150) changed linter to ansible-linter resolves #149 Depends-On: opentelekomcloud-infra/otc-zuul-jobs#103 Reviewed-by: None Reviewed-by: Artem Goncharov Reviewed-by: Rodion Gyrbu Reviewed-by: Vladimir Vshivkov --- .ansible-lint | 10 + changelogs/config.yaml | 32 ++-- changelogs/fragments/initialize.yml | 2 +- galaxy.yml | 6 +- plugins/modules/as_group.py | 6 +- plugins/modules/loadbalancer.py | 2 +- plugins/modules/rds_backup.py | 2 +- test-requirements-2.9.txt | 1 + test-requirements.txt | 1 + .../tasks/main.yaml | 2 +- .../targets/as_config/tasks/main.yaml | 10 +- .../targets/as_group/tasks/main.yaml | 125 ++++++------ .../targets/as_instance/tasks/main.yaml | 158 +++++++-------- .../targets/as_instance_info/tasks/main.yaml | 54 +++--- .../targets/as_policy/tasks/main.yaml | 86 ++++----- .../targets/as_policy_info/tasks/main.yaml | 54 +++--- .../availability_zone_info/tasks/main.yaml | 2 +- .../targets/cce_cluster/tasks/main.yaml | 42 ++-- .../targets/cce_cluster_node/tasks/main.yaml | 2 +- .../targets/cce_lifecycle/tasks/main.yaml | 74 +++---- .../targets/cce_node_pool/tasks/main.yaml | 4 +- tests/integration/targets/ces/tasks/main.yaml | 38 ++-- .../targets/dds_instance/tasks/main.yaml | 4 +- .../targets/deh_host/tasks/main.yaml | 42 ++-- tests/integration/targets/dms/tasks/main.yaml | 100 +++++----- tests/integration/targets/dns/tasks/main.yaml | 108 +++++------ .../dns_recordset_info/tasks/main.yaml | 24 +-- .../targets/lb_certificate/tasks/main.yaml | 22 +-- .../lb_listener_certificates/tasks/main.yaml | 94 ++++----- .../targets/loadbalancer/tasks/main.yaml | 58 +++--- tests/integration/targets/nat/tasks/main.yaml | 180 +++++++++--------- .../targets/prepare_tests/tasks/main.yaml | 2 +- .../targets/rds_backup/tasks/main.yaml | 2 +- .../targets/rds_backup_info/tasks/main.yaml | 4 +- .../targets/rds_flavor_info/tasks/main.yaml | 2 +- .../targets/rds_instance/tasks/main.yaml | 42 ++-- .../targets/vpc_peering_test/tasks/main.yaml | 92 ++++----- .../targets/vpc_route_test/tasks/main.yaml | 100 +++++----- .../targets/vpn_service_info/tasks/main.yaml | 10 +- .../targets/waf_domain/tasks/main.yaml | 30 +-- tox.ini | 4 +- 41 files changed, 821 insertions(+), 812 deletions(-) create mode 100644 .ansible-lint diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 00000000..ffee53f1 --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,10 @@ +--- +parseable: true +exclude_paths: + - ci/playbooks + - tests/integration/targets/deh_host/tasks/main.yaml +skip_list: + - '106' # Role name does not match ``^[a-z][a-z0-9_]+$`` pattern + - '204' # Lines should be no longer than 160 chars + - '301' # Commands should not change things if nothing needs doing + - '701' # No 'galaxy_info' found diff --git a/changelogs/config.yaml b/changelogs/config.yaml index e9f653d4..c97db3e6 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -9,21 +9,21 @@ notesdir: fragments prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: opentelekomcloud.cloud trivial_section_name: trivial diff --git a/changelogs/fragments/initialize.yml b/changelogs/fragments/initialize.yml index 6a725045..1284d215 100644 --- a/changelogs/fragments/initialize.yml +++ b/changelogs/fragments/initialize.yml @@ -1,2 +1,2 @@ major_changes: -- initializing changelog handling + - initializing changelog handling diff --git a/galaxy.yml b/galaxy.yml index 3ec632ae..fd9b605c 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -3,12 +3,12 @@ name: cloud version: 0.9.0 readme: README.md authors: -- Artem Goncharov + - Artem Goncharov description: Modules for working with OpenTelekomCloud (base on OpenStack modules, adding proprietary services) license: -- Apache-2.0 -tags: + - Apache-2.0 +tags: - cloud - openstack - opentelekomcloud diff --git a/plugins/modules/as_group.py b/plugins/modules/as_group.py index 10aef992..e0872ddb 100644 --- a/plugins/modules/as_group.py +++ b/plugins/modules/as_group.py @@ -274,7 +274,7 @@ max_instance_number: 1 action: "resume" state: "present" - wait: yes + wait: true timeout: 360 register: result @@ -283,8 +283,8 @@ scaling_group: name: "as_group_test" state: "absent" - force_delete: yes - wait: yes + force_delete: true + wait: true timeout: 360 register: result diff --git a/plugins/modules/loadbalancer.py b/plugins/modules/loadbalancer.py index 0a5638fa..8b983c56 100644 --- a/plugins/modules/loadbalancer.py +++ b/plugins/modules/loadbalancer.py @@ -148,7 +148,7 @@ state: present vip_subnet: default_subnet auto_public_ip: yes - wait: yes + wait: true timeout: 600 # Delete a load balancer(and all its related resources) diff --git a/plugins/modules/rds_backup.py b/plugins/modules/rds_backup.py index ac552be2..0eb561dc 100644 --- a/plugins/modules/rds_backup.py +++ b/plugins/modules/rds_backup.py @@ -146,7 +146,7 @@ name: "test_ansible_module" description: "This is a description" state: present - wait: yes + wait: true timeout: 200 register: rds_backup diff --git a/test-requirements-2.9.txt b/test-requirements-2.9.txt index ff73523c..9a3197f6 100644 --- a/test-requirements-2.9.txt +++ b/test-requirements-2.9.txt @@ -1,4 +1,5 @@ ansible +ansible-lint openstacksdk pycodestyle==2.6.0 flake8==3.8.4 diff --git a/test-requirements.txt b/test-requirements.txt index f6c4ac5a..38ca38d2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,5 +1,6 @@ openstacksdk ansible-base +ansible-lint pycodestyle==2.6.0 flake8==3.8.4 pylint diff --git a/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml b/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml index 8fcca2b9..6987bb71 100644 --- a/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml +++ b/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml @@ -28,7 +28,7 @@ opentelekomcloud.cloud.anti_ddos_fip_statuses_info: status: "wrongStatus" register: anti_ddos - ignore_errors: yes + ignore_errors: true - name: assert result assert: diff --git a/tests/integration/targets/as_config/tasks/main.yaml b/tests/integration/targets/as_config/tasks/main.yaml index f6e6fa9d..6c403b99 100644 --- a/tests/integration/targets/as_config/tasks/main.yaml +++ b/tests/integration/targets/as_config/tasks/main.yaml @@ -14,7 +14,7 @@ - name: Create keypair openstack.cloud.os_keypair: - name: "{{ key_name }}" + name: "{{ key_name }}" - name: Create as config - check_mode opentelekomcloud.cloud.as_config: @@ -27,7 +27,7 @@ volume_type: 'SAS' disk_type: 'SYS' register: as_config_check - check_mode: yes + check_mode: true - name: assert result assert: @@ -71,6 +71,6 @@ - name: Delete keypair openstack.cloud.os_keypair: - name: "{{ key_name }}" - state: absent - ignore_errors: true + name: "{{ key_name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/as_group/tasks/main.yaml b/tests/integration/targets/as_group/tasks/main.yaml index 396bf24e..04f11fec 100644 --- a/tests/integration/targets/as_group/tasks/main.yaml +++ b/tests/integration/targets/as_group/tasks/main.yaml @@ -27,11 +27,11 @@ config_name: "{{ ( prefix + '_as_conf' ) }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" - router_name: "{{ ( prefix + '_router') }}" + router_name: "{{ ( prefix + '_router') }}" - name: Create keypair openstack.cloud.os_keypair: - name: "{{ key_name }}" + name: "{{ key_name }}" - name: Create network openstack.cloud.os_network: @@ -53,7 +53,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ network.network.name }}" subnet: "{{ subnet.subnet.name }}" @@ -77,7 +77,7 @@ name: "{{ as_group_name }}" networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" - check_mode: yes + check_mode: true register: as_group_check - name: assert result @@ -107,7 +107,7 @@ networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group_err - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -122,7 +122,7 @@ id: "{{ as_group.as_group.id }}" name: "{{ new_name }}" max_instance_number: 10 - check_mode: yes + check_mode: true register: as_group_check - name: assert result @@ -151,7 +151,7 @@ scaling_group: id: "{{ as_group.as_group.id }}" state: absent - check_mode: yes + check_mode: true register: as_group_check - name: assert result @@ -165,7 +165,7 @@ scaling_group: id: "{{ as_group.as_group.id }}" state: absent - wait: yes + wait: true timeout: 360 register: as_gr_del @@ -181,10 +181,10 @@ scaling_group: id: "{{ as_group.as_group.id }}" state: absent - wait: yes + wait: true timeout: 360 register: as_gr_err - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -204,7 +204,7 @@ max_instance_number: 1 action: "resume" state: "present" - wait: yes + wait: true timeout: 360 register: as_group @@ -232,11 +232,11 @@ scaling_group: id: "{{ as_group.as_group.id }}" state: absent - force_delete: no - wait: yes + force_delete: false + wait: true timeout: 360 register: as_group_err - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -247,53 +247,52 @@ always: - block: - # Cleanup - - name: Delete as group - opentelekomcloud.cloud.as_group: - scaling_group: - id: "{{ as_group.as_group.id }}" - state: absent - force_delete: yes - wait: yes - timeout: 360 - register: dropped_as_group - - - name: assert result - assert: - that: - - dropped_as_group is success - - dropped_as_group is changed - - - name: Delete as config - opentelekomcloud.cloud.as_config: - scaling_configuration: "{{ config_name }}" - state: absent - register: dropped_as_config - ignore_errors: true - - - name: assert result - assert: - that: - - dropped_as_config is success - - dropped_as_config is changed - - - name: Delete keypair - openstack.cloud.os_keypair: - name: "{{ key_name }}" - state: absent - - - name: Drop existing router - openstack.cloud.os_router: - name: "{{ router.router.name }}" - state: absent - - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet.subnet.name }}" - state: absent - - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network.network.name }}" - state: absent + - name: Delete as group + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + state: absent + force_delete: true + wait: true + timeout: 360 + register: dropped_as_group + + - name: assert result + assert: + that: + - dropped_as_group is success + - dropped_as_group is changed + + - name: Delete as config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ config_name }}" + state: absent + register: dropped_as_config + ignore_errors: true + + - name: assert result + assert: + that: + - dropped_as_config is success + - dropped_as_config is changed + + - name: Delete keypair + openstack.cloud.os_keypair: + name: "{{ key_name }}" + state: absent + + - name: Drop existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent + + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent + + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent ignore_errors: true diff --git a/tests/integration/targets/as_instance/tasks/main.yaml b/tests/integration/targets/as_instance/tasks/main.yaml index 07c0f089..8ec922fa 100644 --- a/tests/integration/targets/as_instance/tasks/main.yaml +++ b/tests/integration/targets/as_instance/tasks/main.yaml @@ -37,7 +37,7 @@ as_config_name: "{{ ( prefix + 'as_config') }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" - router_name: "{{ ( prefix + '_router') }}" + router_name: "{{ ( prefix + '_router') }}" secgroup_name: "{{ ( prefix + '_secgroup') }}" kp_name: "{{ ( prefix + '_kp') }}" server_name: "{{ ( prefix + '_ecs') }}" @@ -80,7 +80,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ network.network.name }}" subnet: "{{ subnet.subnet.name }}" @@ -118,11 +118,11 @@ networks: [{'id': "{{ network.network.id }}"}] security_groups: [{'id': "{{ secgroup.secgroup.id }}"}] router: "{{ router.router.id }}" - delete_publicip: True - delete_volume: True + delete_publicip: true + delete_volume: true action: "resume" state: "present" - wait: yes + wait: true timeout: 400 register: as_group @@ -133,7 +133,7 @@ network: "{{ network_name }}" flavor: "{{ server_flavor }}" availability_zone: "{{ az2_name }}" - delete_fip: True + delete_fip: true register: ecs1 - name: Create ECS2 instance @@ -143,7 +143,7 @@ network: "{{ network_name }}" flavor: "{{ server_flavor }}" availability_zone: "{{ az2_name }}" - delete_fip: True + delete_fip: true register: ecs2 - name: Get list of AS Instances @@ -171,7 +171,7 @@ scaling_instances: ["{{ ecs1.server.id }}", "{{ ecs2.server.id }}"] action: "add" state: present - check_mode: yes + check_mode: true register: as_instances - name: assert result @@ -257,7 +257,7 @@ scaling_instances: "{{ id_list }}" action: "protect" state: present - check_mode: yes + check_mode: true register: as_instances - name: assert result @@ -286,7 +286,7 @@ scaling_instances: "{{ name_list }}" action: "unprotect" state: present - check_mode: yes + check_mode: true register: as_instances - name: assert result @@ -313,9 +313,9 @@ opentelekomcloud.cloud.as_instance: scaling_group: "{{ as_group.as_group.id }}" scaling_instances: ["{{ id_list[0] }}"] - instance_delete: yes + instance_delete: true state: absent - check_mode: yes + check_mode: true register: as_instances - name: assert result @@ -347,7 +347,7 @@ opentelekomcloud.cloud.as_instance: scaling_group: "{{ as_group.as_group.name }}" scaling_instances: ["{{ name_list[0] }}"] - instance_delete: yes + instance_delete: true state: absent wait: true timeout: 360 @@ -382,10 +382,10 @@ opentelekomcloud.cloud.as_instance: scaling_group: "{{ as_group.as_group.name }}" scaling_instances: "{{ name_list_after_remove }}" - instance_delete: yes + instance_delete: true action: "remove" state: absent - check_mode: yes + check_mode: true register: result - name: assert result @@ -417,10 +417,10 @@ opentelekomcloud.cloud.as_instance: scaling_group: "{{ as_group.as_group.name }}" scaling_instances: "{{ name_list }}" - instance_delete: yes + instance_delete: true action: "remove" state: absent - wait: yes + wait: true timeout: 360 register: result @@ -443,66 +443,66 @@ always: - block: # Cleanup - - name: Delete ECS1 - openstack.cloud.server: - name: "{{ (as_instance_name + '_1') }}" - state: absent - - - name: Delete ECS2 - openstack.cloud.server: - name: "{{ (as_instance_name + '_2') }}" - state: absent - - - name: Delete as group - opentelekomcloud.cloud.as_group: - scaling_group: - name: "{{ as_group_name }}" - state: absent - force_delete: yes - wait: yes - timeout: 360 - register: dropped_as_group - - - name: assert result - assert: - that: - - dropped_as_group is success - - dropped_as_group is changed - - - name: Delete as config - opentelekomcloud.cloud.as_config: - scaling_configuration: "{{ as_config_name }}" - state: absent - register: dropped_as_config - - - name: assert result - assert: - that: - - dropped_as_config is success - - dropped_as_config is changed - - - name: Delete existing router - openstack.cloud.os_router: - name: "{{ router.router.name }}" - state: absent - - - name: Delete existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet.subnet.name }}" - state: absent - - - name: Delete existing network - openstack.cloud.os_network: - name: "{{ network.network.name }}" - state: absent - - - name: Delete security group - openstack.cloud.security_group: - name: "{{ secgroup_name }}" - state: absent - - - name: Delete keypair - openstack.cloud.os_keypair: - name: "{{ kp_name }}" - state: absent + - name: Delete ECS1 + openstack.cloud.server: + name: "{{ (as_instance_name + '_1') }}" + state: absent + + - name: Delete ECS2 + openstack.cloud.server: + name: "{{ (as_instance_name + '_2') }}" + state: absent + + - name: Delete as group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + state: absent + force_delete: true + wait: true + timeout: 360 + register: dropped_as_group + + - name: assert result + assert: + that: + - dropped_as_group is success + - dropped_as_group is changed + + - name: Delete as config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ as_config_name }}" + state: absent + register: dropped_as_config + + - name: assert result + assert: + that: + - dropped_as_config is success + - dropped_as_config is changed + + - name: Delete existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent + + - name: Delete existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent + + - name: Delete existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent + + - name: Delete security group + openstack.cloud.security_group: + name: "{{ secgroup_name }}" + state: absent + + - name: Delete keypair + openstack.cloud.os_keypair: + name: "{{ kp_name }}" + state: absent ignore_errors: true diff --git a/tests/integration/targets/as_instance_info/tasks/main.yaml b/tests/integration/targets/as_instance_info/tasks/main.yaml index b5e37f5c..cadbada3 100644 --- a/tests/integration/targets/as_instance_info/tasks/main.yaml +++ b/tests/integration/targets/as_instance_info/tasks/main.yaml @@ -20,7 +20,7 @@ as_group_name: "{{ ( prefix + '_as_group') }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" - router_name: "{{ ( prefix + '_router') }}" + router_name: "{{ ( prefix + '_router') }}" - name: Create network openstack.cloud.os_network: @@ -42,7 +42,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ network.network.name }}" subnet: "{{ subnet.subnet.name }}" @@ -83,7 +83,7 @@ - name: Get error message that required parameter is missing opentelekomcloud.cloud.as_instance_info: register: as_instances - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -96,7 +96,7 @@ opentelekomcloud.cloud.as_instance_info: scaling_group: register: as_instances - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -107,27 +107,27 @@ always: - block: # Cleanup - - name: Delete as group - opentelekomcloud.cloud.as_group: - scaling_group: - name: "{{ as_group_name }}" - state: absent - force_delete: yes - wait: yes - register: dropped_as_group - - - name: Drop existing router - openstack.cloud.os_router: - name: "{{ router.router.name }}" - state: absent - - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet.subnet.name }}" - state: absent - - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network.network.name }}" - state: absent + - name: Delete as group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + state: absent + force_delete: true + wait: true + register: dropped_as_group + + - name: Drop existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent + + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent + + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent ignore_errors: true diff --git a/tests/integration/targets/as_policy/tasks/main.yaml b/tests/integration/targets/as_policy/tasks/main.yaml index a4587ede..791c4d6d 100644 --- a/tests/integration/targets/as_policy/tasks/main.yaml +++ b/tests/integration/targets/as_policy/tasks/main.yaml @@ -28,7 +28,7 @@ alarm_name: "{{ prefix + '_alarm' }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" - router_name: "{{ ( prefix + '_router') }}" + router_name: "{{ ( prefix + '_router') }}" - name: Create network openstack.cloud.os_network: @@ -63,8 +63,8 @@ value: 6 unit: "B" count: 1 - alarm_enabled: True - alarm_action_enabled: False + alarm_enabled: true + alarm_action_enabled: false register: alarm - name: Create subnet @@ -81,7 +81,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ network.network.name }}" subnet: "{{ subnet.subnet.name }}" @@ -102,7 +102,7 @@ scaling_policy_type: "alarm" alarm: "{{ alarm_name }}" state: "present" - check_mode: yes + check_mode: true register: as_policy - name: assert result @@ -134,7 +134,7 @@ alarm: "{{ alarm_name }}" state: "present" register: as_policy - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -175,11 +175,11 @@ - name: Delete AS policy -check mode opentelekomcloud.cloud.as_policy: - scaling_group: "{{ as_group_name}}" + scaling_group: "{{ as_group_name }}" scaling_policy: "{{ as_policy_name }}" state: "absent" register: as_policy - check_mode: yes + check_mode: true - name: assert result assert: @@ -217,7 +217,7 @@ scaling_policy: "{{ as_policy_name }}" state: "absent" register: as_policy - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -229,38 +229,38 @@ always: - block: # Cleanup - - name: Delete AS group - opentelekomcloud.cloud.as_group: - scaling_group: - name: "{{ as_group_name }}" - state: absent - force_delete: yes - wait: yes - register: dropped_as_group - - - name: Drop alarm - opentelekomcloud.cloud.ces_alarms: - alarm_name: "{{ alarm_name }}" - state: absent - - - name: Drop floating ip - opentelekomcloud.cloud.floating_ip: - floating_ip_address: "{{ fl_ip }}" - state: absent - purge: true - - - name: Drop existing router - openstack.cloud.os_router: - name: "{{ router.router.name }}" - state: absent - - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet.subnet.name }}" - state: absent - - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network.network.name }}" - state: absent + - name: Delete AS group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + state: absent + force_delete: true + wait: true + register: dropped_as_group + + - name: Drop alarm + opentelekomcloud.cloud.ces_alarms: + alarm_name: "{{ alarm_name }}" + state: absent + + - name: Drop floating ip + opentelekomcloud.cloud.floating_ip: + floating_ip_address: "{{ fl_ip }}" + state: absent + purge: true + + - name: Drop existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent + + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent + + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent ignore_errors: true diff --git a/tests/integration/targets/as_policy_info/tasks/main.yaml b/tests/integration/targets/as_policy_info/tasks/main.yaml index 98287207..573720f7 100644 --- a/tests/integration/targets/as_policy_info/tasks/main.yaml +++ b/tests/integration/targets/as_policy_info/tasks/main.yaml @@ -23,7 +23,7 @@ key_name: "{{ ( prefix + '_key') }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" - router_name: "{{ ( prefix + '_router') }}" + router_name: "{{ ( prefix + '_router') }}" - name: Create keypair openstack.cloud.os_keypair: @@ -49,7 +49,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ network.network.name }}" subnet: "{{ subnet.subnet.name }}" @@ -90,7 +90,7 @@ - name: Get error message that required parameter is missing opentelekomcloud.cloud.as_policy_info: register: as_policies - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -103,7 +103,7 @@ opentelekomcloud.cloud.as_policy_info: scaling_group: register: as_policies - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -115,32 +115,32 @@ always: - block: # Cleanup - - name: Delete as group - opentelekomcloud.cloud.as_group: - scaling_group: - name: "{{ as_group_name }}" - state: absent - force_delete: yes - wait: yes - register: dropped_as_group - - - name: Delete keypair - openstack.cloud.os_keypair: + - name: Delete as group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + state: absent + force_delete: true + wait: true + register: dropped_as_group + + - name: Delete keypair + openstack.cloud.os_keypair: name: "{{ key_name }}" state: absent - - name: Drop existing router - openstack.cloud.os_router: - name: "{{ router.router.name }}" - state: absent + - name: Drop existing router + openstack.cloud.os_router: + name: "{{ router.router.name }}" + state: absent - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet.subnet.name }}" - state: absent + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet.subnet.name }}" + state: absent - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network.network.name }}" - state: absent + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network.network.name }}" + state: absent ignore_errors: true diff --git a/tests/integration/targets/availability_zone_info/tasks/main.yaml b/tests/integration/targets/availability_zone_info/tasks/main.yaml index 005eb2e6..5e3422a3 100644 --- a/tests/integration/targets/availability_zone_info/tasks/main.yaml +++ b/tests/integration/targets/availability_zone_info/tasks/main.yaml @@ -5,7 +5,7 @@ block: - name: Get AZ info (check mode) opentelekomcloud.cloud.availability_zone_info: - check_mode: yes + check_mode: true register: az - name: Get AZ info diff --git a/tests/integration/targets/cce_cluster/tasks/main.yaml b/tests/integration/targets/cce_cluster/tasks/main.yaml index 63f819c6..a3ff5b98 100644 --- a/tests/integration/targets/cce_cluster/tasks/main.yaml +++ b/tests/integration/targets/cce_cluster/tasks/main.yaml @@ -41,7 +41,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network.network.name }}" subnet: "{{ test_subnet.subnet.name }}" @@ -66,26 +66,26 @@ always: - block: # Cleanup - - name: Drop cluster - opentelekomcloud.cloud.cce_cluster: - name: "{{ cce_cluster_name }}" - state: "absent" + - name: Drop cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + state: "absent" - - name: Drop router - openstack.cloud.router: - cloud: "{{ test_cloud }}" - name: "{{ router_name }}" - state: absent + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent - - name: Drop subnet - openstack.cloud.subnet: - cloud: "{{ test_cloud }}" - name: "{{ subnet_name }}" - state: absent + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent - - name: Drop network - openstack.cloud.network: - cloud: "{{ test_cloud }}" - name: "{{ network_name }}" - state: absent - ignore_errors: yes + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/cce_cluster_node/tasks/main.yaml b/tests/integration/targets/cce_cluster_node/tasks/main.yaml index 6ad23c3a..ade645fc 100644 --- a/tests/integration/targets/cce_cluster_node/tasks/main.yaml +++ b/tests/integration/targets/cce_cluster_node/tasks/main.yaml @@ -159,4 +159,4 @@ openstack.cloud.network: name: "{{ network_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/tests/integration/targets/cce_lifecycle/tasks/main.yaml b/tests/integration/targets/cce_lifecycle/tasks/main.yaml index 49ec4074..bfdd26c2 100644 --- a/tests/integration/targets/cce_lifecycle/tasks/main.yaml +++ b/tests/integration/targets/cce_lifecycle/tasks/main.yaml @@ -177,40 +177,40 @@ always: - block: # Cleanup - - name: Drop node 2 - opentelekomcloud.cloud.cce_cluster_node: - cluster: "{{ cce_cluster_name }}" - name: "{{ cce_node_name2 }}" - state: absent - wait: true - register: node2 - - - name: Drop cluster - opentelekomcloud.cloud.cce_cluster: - name: "{{ cce_cluster_name }}" - timeout: 3000 - state: "absent" - - - name: Drop Keypair - openstack.cloud.keypair: - state: "absent" - name: "{{ keypair_name }}" - - - name: Drop router - openstack.cloud.router: - cloud: "{{ test_cloud }}" - name: "{{ router_name }}" - state: absent - - - name: Drop subnet - openstack.cloud.subnet: - cloud: "{{ test_cloud }}" - name: "{{ subnet_name }}" - state: absent - - - name: Drop network - openstack.cloud.network: - cloud: "{{ test_cloud }}" - name: "{{ network_name }}" - state: absent - ignore_errors: yes + - name: Drop node 2 + opentelekomcloud.cloud.cce_cluster_node: + cluster: "{{ cce_cluster_name }}" + name: "{{ cce_node_name2 }}" + state: absent + wait: true + register: node2 + + - name: Drop cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + timeout: 3000 + state: "absent" + + - name: Drop Keypair + openstack.cloud.keypair: + state: "absent" + name: "{{ keypair_name }}" + + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent + + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent + + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/cce_node_pool/tasks/main.yaml b/tests/integration/targets/cce_node_pool/tasks/main.yaml index 032f6be9..d6f4ad6e 100644 --- a/tests/integration/targets/cce_node_pool/tasks/main.yaml +++ b/tests/integration/targets/cce_node_pool/tasks/main.yaml @@ -4,14 +4,14 @@ cce_node_pool: cloud: "{{ test_cloud }}" availability_zone: "random" - autoscaling_enabled: True + autoscaling_enabled: true cluster: test-cluster data_volumes: - volumetype: SSD size: 120 - volumetype: SATA size: 100 - encrypted: False + encrypted: false cmk_id: '' flavor: s2.large.2 initial_node_count: 0 diff --git a/tests/integration/targets/ces/tasks/main.yaml b/tests/integration/targets/ces/tasks/main.yaml index 302dc4cf..c285f0dc 100644 --- a/tests/integration/targets/ces/tasks/main.yaml +++ b/tests/integration/targets/ces/tasks/main.yaml @@ -37,11 +37,11 @@ value: 6 unit: "B" count: 1 - alarm_enabled: True - alarm_action_enabled: False - check_mode: True + alarm_enabled: true + alarm_action_enabled: false + check_mode: true register: ces_al_ch - + - name: assert result assert: that: @@ -65,8 +65,8 @@ value: 6 unit: "B" count: 1 - alarm_enabled: True - alarm_action_enabled: False + alarm_enabled: true + alarm_action_enabled: false register: ces_al - name: debug @@ -165,16 +165,16 @@ always: - block: - - name: Drop dns_floating_ip entry - opentelekomcloud.cloud.floating_ip: - floating_ip_address: "{{ fl_ip }}" - state: absent - purge: true - register: dns_fl_dr - - - name: Drop Alarm - opentelekomcloud.cloud.ces_alarms: - alarm_name: "{{ alarm_name }}" - state: absent - register: dns_rs_dr - ignore_errors: yes \ No newline at end of file + - name: Drop dns_floating_ip entry + opentelekomcloud.cloud.floating_ip: + floating_ip_address: "{{ fl_ip }}" + state: absent + purge: true + register: dns_fl_dr + + - name: Drop Alarm + opentelekomcloud.cloud.ces_alarms: + alarm_name: "{{ alarm_name }}" + state: absent + register: dns_rs_dr + ignore_errors: true diff --git a/tests/integration/targets/dds_instance/tasks/main.yaml b/tests/integration/targets/dds_instance/tasks/main.yaml index 8b8864a7..1f766577 100644 --- a/tests/integration/targets/dds_instance/tasks/main.yaml +++ b/tests/integration/targets/dds_instance/tasks/main.yaml @@ -51,7 +51,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network.network.name }}" subnet: "{{ test_subnet.subnet.name }}" @@ -110,4 +110,4 @@ cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/tests/integration/targets/deh_host/tasks/main.yaml b/tests/integration/targets/deh_host/tasks/main.yaml index 96ba1840..ad9bed53 100644 --- a/tests/integration/targets/deh_host/tasks/main.yaml +++ b/tests/integration/targets/deh_host/tasks/main.yaml @@ -10,7 +10,7 @@ - name: Set random prefix set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - + - name: Set initial facts set_fact: deh_host_name: "{{ ( prefix + '_deh-host') }}" @@ -22,14 +22,14 @@ name: "{{ deh_host_name }}" state: present quantity: 1 - tags: + tags: - key: key1 value: value1 - key: key2 value: value2 check_mode: true register: deh_ch - + - name: assert result assert: that: @@ -43,53 +43,53 @@ name: "{{ deh_host_name }}" state: present quantity: 1 - tags: + tags: - key: key1 value: value1 - key: key2 value: value2 register: deh - + - name: assert result assert: that: - deh is success - deh.deh_host.dedicated_host_ids[0] is defined - + - name: Modify DeH host - check mode deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" auto_placement: off check_mode: true - when: + when: - deh is defined register: deh_ch - + - name: assert result assert: that: - deh_ch is success - + - name: Modify DeH host deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" auto_placement: off - when: + when: - deh is defined register: deh - + - name: assert result assert: that: - deh is success - + - name: Query not existing ECS on dedicated host deh_server_info: dedicated_host: "{{ deh.deh_host.id }}" - when: + when: - deh is defined register: server - + - name: assert result assert: that: @@ -98,10 +98,10 @@ always: - block: - # Cleanup - - name: Drop existing DeH host - deh_host: - name: "{{ deh.deh_host.name }}" - state: absent - register: deh - ignore_errors: yes + # Cleanup + - name: Drop existing DeH host + deh_host: + name: "{{ deh.deh_host.name }}" + state: absent + register: deh + ignore_errors: true diff --git a/tests/integration/targets/dms/tasks/main.yaml b/tests/integration/targets/dms/tasks/main.yaml index 108d99d5..da07de43 100644 --- a/tests/integration/targets/dms/tasks/main.yaml +++ b/tests/integration/targets/dms/tasks/main.yaml @@ -9,11 +9,11 @@ queue_name: "{{ ( 'a' + prefix + '-queue' ) }}" group_name: "{{ ( 'group_test' ) }}" instance_name: "{{ ( 'a' + prefix + '-instance' ) }}" - network_name: "{{ ( prefix + '-dmsnetwork' )}}" - subnet_name: "{{ ( prefix + '-dmssubnet' )}}" - router_name: "{{ ( prefix + '-dmsrouter' )}}" - sg_name: "{{ ( prefix + '-dmssg' )}}" - + network_name: "{{ ( prefix + '-dmsnetwork' ) }}" + subnet_name: "{{ ( prefix + '-dmssubnet' ) }}" + router_name: "{{ ( prefix + '-dmsrouter' ) }}" + sg_name: "{{ ( prefix + '-dmssg' ) }}" + - name: DMS Queue opentelekomcloud.cloud.dms_queue: name: '{{ queue_name }}' @@ -39,7 +39,7 @@ queue: '{{ queue_name }}' include_deadletter: true register: zone_net - + - name: Send Messages opentelekomcloud.cloud.dms_message: queue: '{{ queue_name }}' @@ -47,14 +47,14 @@ - body: 'test1' attributes: attribute1: 'value1' - attribute2: 'value2' + attribute2: 'value2' - body: 'test2' attributes: attribute1: 'value3' - attribute2: 'value4' + attribute2: 'value4' task: send register: dms_mess_send - + - name: List DMS Queue Group Info opentelekomcloud.cloud.dms_queue_group_info: queue: '{{ queue_name }}' @@ -72,7 +72,7 @@ - name: List DMS Instance Info opentelekomcloud.cloud.dms_instance_info: register: dms_instance - + - name: Create network for DMS Instance openstack.cloud.network: name: "{{ network_name }}" @@ -98,7 +98,7 @@ - net: "{{ dms_net.network.name }}" subnet: "{{ dms_subnet.subnet.name }}" register: dms_router - + - name: Create Security Group for DMS Instance openstack.cloud.security_group: name: "{{ sg_name }}" @@ -106,42 +106,42 @@ always: - block: - - name: DMS Delete Queue Group - opentelekomcloud.cloud.dms_queue_group: - queue_name: '{{ queue_name }}' - group_name: '{{ group_name }}' - state: absent - register: dms_queue_group_rm - check_mode: false - - - name: Delete Queue - opentelekomcloud.cloud.dms_queue: - name: '{{ queue_name }}' - state: absent - register: dms_queue_rm - check_mode: false - - - name: Delete Security Group - openstack.cloud.security_group: - name: "{{ sg_name }}" - state: absent - register: dms_sg - - - name: Drop existing Router - openstack.cloud.router: - name: "{{ router_name }}" - state: absent - register: dns_rout_dr - - - name: Drop existing subnet - openstack.cloud.subnet: - name: "{{ subnet_name }}" - state: absent - register: dns_subnet_dr - - - name: Drop existing network - openstack.cloud.network: - name: "{{ network_name }}" - state: absent - register: dns_net_dr - ignore_errors: yes + - name: DMS Delete Queue Group + opentelekomcloud.cloud.dms_queue_group: + queue_name: '{{ queue_name }}' + group_name: '{{ group_name }}' + state: absent + register: dms_queue_group_rm + check_mode: false + + - name: Delete Queue + opentelekomcloud.cloud.dms_queue: + name: '{{ queue_name }}' + state: absent + register: dms_queue_rm + check_mode: false + + - name: Delete Security Group + openstack.cloud.security_group: + name: "{{ sg_name }}" + state: absent + register: dms_sg + + - name: Drop existing Router + openstack.cloud.router: + name: "{{ router_name }}" + state: absent + register: dns_rout_dr + + - name: Drop existing subnet + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: absent + register: dns_subnet_dr + + - name: Drop existing network + openstack.cloud.network: + name: "{{ network_name }}" + state: absent + register: dns_net_dr + ignore_errors: true diff --git a/tests/integration/targets/dns/tasks/main.yaml b/tests/integration/targets/dns/tasks/main.yaml index 0f9597e0..d1a77724 100644 --- a/tests/integration/targets/dns/tasks/main.yaml +++ b/tests/integration/targets/dns/tasks/main.yaml @@ -18,9 +18,9 @@ zone_public_name: "{{ ( prefix + '-dnszone.com.' ) }}" zone_private_name: "{{ ( prefix + '-dnszone.com.' ) }}" rs_name: "{{ ( prefix + '-rs.' + prefix + '-dnszone.com.' ) }}" - network_name: "{{ ( prefix + '-dnsnetwork' )}}" - subnet_name: "{{ ( prefix + '-dnssubnet' )}}" - router_name: "{{ ( prefix + '-dnsrouter' )}}" + network_name: "{{ ( prefix + '-dnsnetwork' ) }}" + subnet_name: "{{ ( prefix + '-dnssubnet' ) }}" + router_name: "{{ ( prefix + '-dnsrouter' ) }}" - name: Create network for DNS private Zone openstack.cloud.network: @@ -318,54 +318,54 @@ always: - block: - - name: Drop dns_floating_ip entry - opentelekomcloud.cloud.dns_floating_ip: - floating_ip: "{{ fl_ip }}" - state: absent - register: dns_fl_dr - - - name: Dropping DNS Recordset - opentelekomcloud.cloud.dns_recordset: - zone_id: "{{ dns_zo.zone.id }}" - recordset_name: "{{ rs_name }}" - state: absent - register: dns_rs_dr - - - name: Drop DNS public Zone - opentelekomcloud.cloud.dns_zone: - name: "{{ zone_public_name }}" - state: absent - register: dns_zo_pu_dr - - - name: Drop DNS private Zone - opentelekomcloud.cloud.dns_zone: - name: "{{ zone_private_name }}" - zone_type: "private" - state: absent - register: dns_zo_pr_dr - - - name: Drop Floating IP - opentelekomcloud.cloud.floating_ip: - floating_ip_address: "{{ fl_ip }}" - state: absent - purge: true - register: fl_dr - - - name: Drop existing Router - openstack.cloud.router: - name: "{{ router_name }}" - state: absent - register: dns_rout_dr - - - name: Drop existing subnet - openstack.cloud.subnet: - name: "{{ subnet_name }}" - state: absent - register: dns_subnet_dr - - - name: Drop existing network - openstack.cloud.network: - name: "{{ network_name }}" - state: absent - register: dns_net_dr - ignore_errors: yes + - name: Drop dns_floating_ip entry + opentelekomcloud.cloud.dns_floating_ip: + floating_ip: "{{ fl_ip }}" + state: absent + register: dns_fl_dr + + - name: Dropping DNS Recordset + opentelekomcloud.cloud.dns_recordset: + zone_id: "{{ dns_zo.zone.id }}" + recordset_name: "{{ rs_name }}" + state: absent + register: dns_rs_dr + + - name: Drop DNS public Zone + opentelekomcloud.cloud.dns_zone: + name: "{{ zone_public_name }}" + state: absent + register: dns_zo_pu_dr + + - name: Drop DNS private Zone + opentelekomcloud.cloud.dns_zone: + name: "{{ zone_private_name }}" + zone_type: "private" + state: absent + register: dns_zo_pr_dr + + - name: Drop Floating IP + opentelekomcloud.cloud.floating_ip: + floating_ip_address: "{{ fl_ip }}" + state: absent + purge: true + register: fl_dr + + - name: Drop existing Router + openstack.cloud.router: + name: "{{ router_name }}" + state: absent + register: dns_rout_dr + + - name: Drop existing subnet + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: absent + register: dns_subnet_dr + + - name: Drop existing network + openstack.cloud.network: + name: "{{ network_name }}" + state: absent + register: dns_net_dr + ignore_errors: true diff --git a/tests/integration/targets/dns_recordset_info/tasks/main.yaml b/tests/integration/targets/dns_recordset_info/tasks/main.yaml index e4d2c81e..197aaf89 100644 --- a/tests/integration/targets/dns_recordset_info/tasks/main.yaml +++ b/tests/integration/targets/dns_recordset_info/tasks/main.yaml @@ -6,7 +6,7 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.dns_zone: cloud: "{{ test_cloud }}" - + block: - name: Set random prefix set_fact: @@ -62,15 +62,15 @@ always: - block: - # Cleanup - - name: Drop created recordset - opentelekomcloud.cloud.dns_recordset: - recordset_name: "{{ recordset_random_name }}" - state: absent - zone_id: "{{ dns_zo.zone.id }}" + # Cleanup + - name: Drop created recordset + opentelekomcloud.cloud.dns_recordset: + recordset_name: "{{ recordset_random_name }}" + state: absent + zone_id: "{{ dns_zo.zone.id }}" - - name: Drop created DNS zone - opentelekomcloud.cloud.dns_zone: - name: "{{ dns_zo.zone.name }}" - state: absent - ignore_errors: yes + - name: Drop created DNS zone + opentelekomcloud.cloud.dns_zone: + name: "{{ dns_zo.zone.name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/lb_certificate/tasks/main.yaml b/tests/integration/targets/lb_certificate/tasks/main.yaml index b247dae9..c81b7ec9 100644 --- a/tests/integration/targets/lb_certificate/tasks/main.yaml +++ b/tests/integration/targets/lb_certificate/tasks/main.yaml @@ -230,15 +230,15 @@ always: - block: # Cleanup - - name: Drop perhaps existing CA cert - opentelekomcloud.cloud.lb_certificate: - name: "{{ cert_name_ca }}" - state: absent - register: drop - - - name: Drop perhaps existing Server cert - opentelekomcloud.cloud.lb_certificate: - name: "{{ cert_name_srv }}" - state: absent - register: drop + - name: Drop perhaps existing CA cert + opentelekomcloud.cloud.lb_certificate: + name: "{{ cert_name_ca }}" + state: absent + register: drop + + - name: Drop perhaps existing Server cert + opentelekomcloud.cloud.lb_certificate: + name: "{{ cert_name_srv }}" + state: absent + register: drop ignore_errors: true diff --git a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml index 3b1091ec..edbb3b68 100644 --- a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml +++ b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml @@ -137,7 +137,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ lb_net.network.name }}" subnet: "{{ lb_net_subnet.subnet.name }}" @@ -231,7 +231,7 @@ loadbalancer: "{{ lb.loadbalancer.id }}" name: "{{ listener_name }}" default_tls_container_ref: "{{ cert_1_info.elb_certificates[0].id }}" - sni_container_refs: [ "{{ cert_1.elb_certificate.id }}" ] + sni_container_refs: ["{{ cert_1.elb_certificate.id }}"] register: listener - name: assert result @@ -248,7 +248,7 @@ loadbalancer: "{{ lb.loadbalancer.id }}" name: "{{ listener_name }}" default_tls_container_ref: "{{ cert_1_info.elb_certificates[0].id }}" - sni_container_refs: [ "{{ cert_1.elb_certificate.id }}" ] + sni_container_refs: ["{{ cert_1.elb_certificate.id }}"] register: listener - name: assert result @@ -265,7 +265,7 @@ loadbalancer: "{{ lb.loadbalancer.id }}" name: "{{ listener_name }}" default_tls_container_ref: "{{ cert_1_info.elb_certificates[0].id }}" - sni_container_refs: [ "{{ cert_2.elb_certificate.id }}" ] + sni_container_refs: ["{{ cert_2.elb_certificate.id }}"] register: listener - name: assert result @@ -290,7 +290,7 @@ opentelekomcloud.cloud.loadbalancer: name: "{{ lb.loadbalancer.id }}" state: absent - delete_public_ip: yes + delete_public_ip: true register: loadbalancer_drop - name: assert result @@ -325,46 +325,46 @@ always: - block: # Cleanup - - name: Drop perhaps existing Server cert 1 - opentelekomcloud.cloud.lb_certificate: - name: "{{ cert_name_srv_1 }}" - state: absent - register: drop - - - name: Drop perhaps existing Server cert 2 - opentelekomcloud.cloud.lb_certificate: - name: "{{ cert_name_srv_2 }}" - state: absent - register: drop - - - name: Drop perhaps existing listener - opentelekomcloud.cloud.lb_listener: - state: absent - name: "{{ listener_name }}" - register: drop - - - name: Drop perhaps existing loadbalancer - opentelekomcloud.cloud.loadbalancer: - name: "{{ lb.loadbalancer.id }}" - state: absent - delete_public_ip: yes - register: drop - - - name: Drop existing Router - openstack.cloud.os_router: - name: "{{ router_name }}" - state: absent - register: drop - - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet_name }}" - state: absent - register: drop - - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network_name }}" - state: absent - register: drop + - name: Drop perhaps existing Server cert 1 + opentelekomcloud.cloud.lb_certificate: + name: "{{ cert_name_srv_1 }}" + state: absent + register: drop + + - name: Drop perhaps existing Server cert 2 + opentelekomcloud.cloud.lb_certificate: + name: "{{ cert_name_srv_2 }}" + state: absent + register: drop + + - name: Drop perhaps existing listener + opentelekomcloud.cloud.lb_listener: + state: absent + name: "{{ listener_name }}" + register: drop + + - name: Drop perhaps existing loadbalancer + opentelekomcloud.cloud.loadbalancer: + name: "{{ lb.loadbalancer.id }}" + state: absent + delete_public_ip: true + register: drop + + - name: Drop existing Router + openstack.cloud.os_router: + name: "{{ router_name }}" + state: absent + register: drop + + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet_name }}" + state: absent + register: drop + + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network_name }}" + state: absent + register: drop ignore_errors: true diff --git a/tests/integration/targets/loadbalancer/tasks/main.yaml b/tests/integration/targets/loadbalancer/tasks/main.yaml index 491608a6..138dcc23 100644 --- a/tests/integration/targets/loadbalancer/tasks/main.yaml +++ b/tests/integration/targets/loadbalancer/tasks/main.yaml @@ -36,7 +36,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ lb_net.network.name }}" subnet: "{{ lb_net_subnet.subnet.name }}" @@ -49,7 +49,7 @@ vip_subnet: "{{ subnet_name }}" timeout: 150 register: loadbalancer_check - check_mode: yes + check_mode: true - name: assert result assert: @@ -75,8 +75,8 @@ name: "{{ loadbalancer_name }}" state: present vip_subnet: "{{ subnet_name }}" - auto_public_ip: yes - wait: yes + auto_public_ip: true + wait: true timeout: 600 - name: Get loadbalancer by Name @@ -204,7 +204,7 @@ # pass ID as name name: "{{ lb.loadbalancer.id }}" state: absent - delete_public_ip: yes + delete_public_ip: true register: dropped - name: assert result @@ -229,28 +229,28 @@ always: - block: # Cleanup - - name: Drop perhaps existing loadbalancer - opentelekomcloud.cloud.loadbalancer: - name: "{{ lb.loadbalancer.id }}" - state: absent - delete_public_ip: yes - register: lb - - - name: Drop existing Router - openstack.cloud.os_router: - name: "{{ router_name }}" - state: absent - register: lb_net_router - - - name: Drop existing subnet - openstack.cloud.os_subnet: - name: "{{ subnet_name }}" - state: absent - register: lb_net_subnet - - - name: Drop existing network - openstack.cloud.os_network: - name: "{{ network_name }}" - state: absent - register: lb_net + - name: Drop perhaps existing loadbalancer + opentelekomcloud.cloud.loadbalancer: + name: "{{ lb.loadbalancer.id }}" + state: absent + delete_public_ip: true + register: lb + + - name: Drop existing Router + openstack.cloud.os_router: + name: "{{ router_name }}" + state: absent + register: lb_net_router + + - name: Drop existing subnet + openstack.cloud.os_subnet: + name: "{{ subnet_name }}" + state: absent + register: lb_net_subnet + + - name: Drop existing network + openstack.cloud.os_network: + name: "{{ network_name }}" + state: absent + register: lb_net ignore_errors: true diff --git a/tests/integration/targets/nat/tasks/main.yaml b/tests/integration/targets/nat/tasks/main.yaml index 69ea6b7a..eda57f11 100644 --- a/tests/integration/targets/nat/tasks/main.yaml +++ b/tests/integration/targets/nat/tasks/main.yaml @@ -21,7 +21,7 @@ nat_gateway_name: "{{ ( prefix + '_nat-gateway') }}" server_name: "{{ ( prefix + '_nat-server') }}" server_flavor: "s2.medium.2" - image_name: Standard_CentOS_8_latest + image_name: Standard_CentOS_8_latest - name: Create network for NAT openstack.cloud.network: @@ -43,7 +43,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: False + enable_snat: false interfaces: - net: "{{ nat_net.network.name }}" subnet: "{{ nat_subnet.subnet.name }}" @@ -56,7 +56,7 @@ router: "{{ router_name }}" check_mode: true register: nat_gw_ch - + - name: assert result assert: that: @@ -69,13 +69,13 @@ internal_network: "{{ network_name }}" router: "{{ router_name }}" register: nat_gw - + - name: assert result assert: that: - nat_gw is success - nat_gw.gateway.id is defined - + - name: Add NAT gateway description - check mode nat_gateway: name: "{{ nat_gw.gateway.name }}" @@ -88,7 +88,7 @@ that: - nat_gw_ch is success - nat_gw_ch is changed - + - name: Add NAT gateway description nat_gateway: name: "{{ nat_gw.gateway.name }}" @@ -100,12 +100,12 @@ that: - nat_gw is success - nat_gw.gateway.description is defined - + - name: Allocate EIP for SNAT rule opentelekomcloud.cloud.floating_ip: network: admin_external_net register: fip - + - name: assert result assert: that: @@ -158,7 +158,7 @@ opentelekomcloud.cloud.floating_ip: network: admin_external_net register: fip2 - + - name: assert result assert: that: @@ -200,84 +200,84 @@ always: - block: - # Cleanup - - name: List SNAT rules of gateway - nat_snat_rule_info: - gateway: "{{ nat_gateway_name }}" - when: - - nat_gw.gateway.id is defined - register: snat_rules - - - name: Drop SNAT rules - nat_snat_rule: - id: "{{ item.id }}" - state: absent - loop: "{{ snat_rules.snat_rules }}" - when: - - nat_gw.gateway.id is defined - - snat_rules.snat_rules is defined - register: snat - - - name: Drop EIP for snat rule - opentelekomcloud.cloud.floating_ip: - floating_ip_address: "{{ fip.floating_ip.floating_ip_address }}" - purge: true - state: absent - when: fip is defined - register: fip - - - name: List DNAT rules of gateway - nat_dnat_rule_info: - gateway: "{{ nat_gateway_name }}" - when: - - nat_gw.gateway.id is defined - register: dnat_rules - - - name: Drop DNAT rules - nat_dnat_rule: - id: "{{ item.id }}" - state: absent - loop: "{{ dnat_rules.dnat_rules }}" - when: - - nat_gw.gateway.id is defined - - dnat_rules.dnat_rules is defined - register: dnat - - - name: Drop EIP for dnat rule - opentelekomcloud.cloud.floating_ip: - floating_ip_address: "{{ fip2.floating_ip.floating_ip_address }}" - purge: true - state: absent - when: fip2 is defined - register: fip2 - - - name: Drop server instance - openstack.cloud.server: - name: "{{ server_name }}" - state: absent - register: server - - - name: Drop existing NAT gateway - nat_gateway: - name: "{{ nat_gateway_name }}" - state: absent - register: nat_gw - - - name: Drop existing Router - openstack.cloud.router: - name: "{{ router_name }}" - state: absent - register: nat_router - - - name: Drop existing subnet - openstack.cloud.subnet: - name: "{{ subnet_name }}" - state: absent - register: nat_subnet - - - name: Drop existing network - openstack.cloud.network: - name: "{{ network_name }}" - state: absent - register: nat_net - ignore_errors: yes \ No newline at end of file + # Cleanup + - name: List SNAT rules of gateway + nat_snat_rule_info: + gateway: "{{ nat_gateway_name }}" + when: + - nat_gw.gateway.id is defined + register: snat_rules + + - name: Drop SNAT rules + nat_snat_rule: + id: "{{ item.id }}" + state: absent + loop: "{{ snat_rules.snat_rules }}" + when: + - nat_gw.gateway.id is defined + - snat_rules.snat_rules is defined + register: snat + + - name: Drop EIP for snat rule + opentelekomcloud.cloud.floating_ip: + floating_ip_address: "{{ fip.floating_ip.floating_ip_address }}" + purge: true + state: absent + when: fip is defined + register: fip + + - name: List DNAT rules of gateway + nat_dnat_rule_info: + gateway: "{{ nat_gateway_name }}" + when: + - nat_gw.gateway.id is defined + register: dnat_rules + + - name: Drop DNAT rules + nat_dnat_rule: + id: "{{ item.id }}" + state: absent + loop: "{{ dnat_rules.dnat_rules }}" + when: + - nat_gw.gateway.id is defined + - dnat_rules.dnat_rules is defined + register: dnat + + - name: Drop EIP for dnat rule + opentelekomcloud.cloud.floating_ip: + floating_ip_address: "{{ fip2.floating_ip.floating_ip_address }}" + purge: true + state: absent + when: fip2 is defined + register: fip2 + + - name: Drop server instance + openstack.cloud.server: + name: "{{ server_name }}" + state: absent + register: server + + - name: Drop existing NAT gateway + nat_gateway: + name: "{{ nat_gateway_name }}" + state: absent + register: nat_gw + + - name: Drop existing Router + openstack.cloud.router: + name: "{{ router_name }}" + state: absent + register: nat_router + + - name: Drop existing subnet + openstack.cloud.subnet: + name: "{{ subnet_name }}" + state: absent + register: nat_subnet + + - name: Drop existing network + openstack.cloud.network: + name: "{{ network_name }}" + state: absent + register: nat_net + ignore_errors: true diff --git a/tests/integration/targets/prepare_tests/tasks/main.yaml b/tests/integration/targets/prepare_tests/tasks/main.yaml index 5c800ecb..534f008f 100644 --- a/tests/integration/targets/prepare_tests/tasks/main.yaml +++ b/tests/integration/targets/prepare_tests/tasks/main.yaml @@ -2,4 +2,4 @@ - name: set fact set_fact: # otc_cloud: "{{ lookup('env', 'OS_CLOUD') | default('otc') }}" - #otc_cloud: otc + # otc_cloud: otc diff --git a/tests/integration/targets/rds_backup/tasks/main.yaml b/tests/integration/targets/rds_backup/tasks/main.yaml index cf4abf2b..78e98358 100644 --- a/tests/integration/targets/rds_backup/tasks/main.yaml +++ b/tests/integration/targets/rds_backup/tasks/main.yaml @@ -9,7 +9,7 @@ name: "test_ansible_module" register: rds_backup check_mode: true - ignore_errors: yes + ignore_errors: true - name: assert result assert: diff --git a/tests/integration/targets/rds_backup_info/tasks/main.yaml b/tests/integration/targets/rds_backup_info/tasks/main.yaml index 85974b53..e022fa27 100644 --- a/tests/integration/targets/rds_backup_info/tasks/main.yaml +++ b/tests/integration/targets/rds_backup_info/tasks/main.yaml @@ -7,7 +7,7 @@ opentelekomcloud.cloud.rds_backup_info: instance: register: rds_backup_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -19,7 +19,7 @@ - name: Get error message that required parameter is missing opentelekomcloud.cloud.rds_backup_info: register: rds_backup_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: diff --git a/tests/integration/targets/rds_flavor_info/tasks/main.yaml b/tests/integration/targets/rds_flavor_info/tasks/main.yaml index e1a92d4e..ce91fc9b 100644 --- a/tests/integration/targets/rds_flavor_info/tasks/main.yaml +++ b/tests/integration/targets/rds_flavor_info/tasks/main.yaml @@ -30,7 +30,7 @@ version: "10" instance_mode: "single" register: rds - ignore_errors: yes + ignore_errors: true - name: assert result assert: diff --git a/tests/integration/targets/rds_instance/tasks/main.yaml b/tests/integration/targets/rds_instance/tasks/main.yaml index 562f0994..fcf210a9 100644 --- a/tests/integration/targets/rds_instance/tasks/main.yaml +++ b/tests/integration/targets/rds_instance/tasks/main.yaml @@ -50,7 +50,7 @@ name: "{{ router_name }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network.network.name }}" subnet: "{{ test_subnet.subnet.name }}" @@ -82,26 +82,26 @@ always: - block: # Cleanup - - name: Drop instance - opentelekomcloud.cloud.rds_instance: - name: "{{ instance_name }}" - state: "absent" + - name: Drop instance + opentelekomcloud.cloud.rds_instance: + name: "{{ instance_name }}" + state: "absent" - - name: Drop router - openstack.cloud.router: - cloud: "{{ test_cloud }}" - name: "{{ router_name }}" - state: absent + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent - - name: Drop subnet - openstack.cloud.subnet: - cloud: "{{ test_cloud }}" - name: "{{ subnet_name }}" - state: absent + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent - - name: Drop network - openstack.cloud.network: - cloud: "{{ test_cloud }}" - name: "{{ network_name }}" - state: absent - ignore_errors: yes + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/vpc_peering_test/tasks/main.yaml b/tests/integration/targets/vpc_peering_test/tasks/main.yaml index 12b53386..0cc7f1f4 100644 --- a/tests/integration/targets/vpc_peering_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_peering_test/tasks/main.yaml @@ -41,7 +41,7 @@ name: "{{ router_name_1 }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network_1.network.name }}" subnet: "{{ test_subnet_1.subnet.name }}" @@ -67,7 +67,7 @@ name: "{{ router_name_2 }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network_2.network.name }}" subnet: "{{ test_subnet_2.subnet.name }}" @@ -81,7 +81,7 @@ remote_router: "{{ test_router_2.router.id }}" remote_project: "{{ project_id }}" register: vpc_peering_check - check_mode: yes + check_mode: true - name: assert result assert: @@ -123,46 +123,46 @@ - block: # Cleanup - - name: Drop existing vpc peering - opentelekomcloud.cloud.vpc_peering: - # pass ID as name - name: "{{ updated_vpc_peering.vpc_peering.name }}" - state: absent - register: dropped - - - name: assert result - assert: - that: - - dropped is success - - dropped is changed - - - name: Drop existing first router - openstack.cloud.router: - name: "{{ router_name_1 }}" - state: absent - - - name: Drop existing first subnet - openstack.cloud.subnet: - name: "{{ subnet_name_1 }}" - state: absent - - - name: Drop existing first network - openstack.cloud.network: - name: "{{ network_name_1 }}" - state: absent - - - name: Drop existing second router - openstack.cloud.router: - name: "{{ router_name_2 }}" - state: absent - - - name: Drop existing second subnet - openstack.cloud.subnet: - name: "{{ subnet_name_2 }}" - state: absent - - - name: Drop existing second network - openstack.cloud.network: - name: "{{ network_name_2 }}" - state: absent - ignore_errors: yes + - name: Drop existing vpc peering + opentelekomcloud.cloud.vpc_peering: + # pass ID as name + name: "{{ updated_vpc_peering.vpc_peering.name }}" + state: absent + register: dropped + + - name: assert result + assert: + that: + - dropped is success + - dropped is changed + + - name: Drop existing first router + openstack.cloud.router: + name: "{{ router_name_1 }}" + state: absent + + - name: Drop existing first subnet + openstack.cloud.subnet: + name: "{{ subnet_name_1 }}" + state: absent + + - name: Drop existing first network + openstack.cloud.network: + name: "{{ network_name_1 }}" + state: absent + + - name: Drop existing second router + openstack.cloud.router: + name: "{{ router_name_2 }}" + state: absent + + - name: Drop existing second subnet + openstack.cloud.subnet: + name: "{{ subnet_name_2 }}" + state: absent + + - name: Drop existing second network + openstack.cloud.network: + name: "{{ network_name_2 }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/vpc_route_test/tasks/main.yaml b/tests/integration/targets/vpc_route_test/tasks/main.yaml index 3cfe056d..d4adc36c 100644 --- a/tests/integration/targets/vpc_route_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_route_test/tasks/main.yaml @@ -41,7 +41,7 @@ name: "{{ router_name_1 }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network_1.network.name }}" subnet: "{{ test_subnet_1.subnet.name }}" @@ -67,7 +67,7 @@ name: "{{ router_name_2 }}" state: present network: admin_external_net - enable_snat: True + enable_snat: true interfaces: - net: "{{ test_network_2.network.name }}" subnet: "{{ test_subnet_2.subnet.name }}" @@ -112,51 +112,51 @@ - block: # Cleanup - - name: Drop existing vpc route - opentelekomcloud.cloud.vpc_route: - # pass ID as name - route_id: "{{ vpc_route.vpc_route.id }}" - state: absent - register: dropped - - - name: assert result - assert: - that: - - dropped is success - - dropped is changed - - - name: Drop vpc peering - opentelekomcloud.cloud.vpc_peering: - name: "{{ vpc_peering_name }}" - state: absent - - - name: Drop existing first router - openstack.cloud.router: - name: "{{ router_name_1 }}" - state: absent - - - name: Drop existing first subnet - openstack.cloud.subnet: - name: "{{ subnet_name_1 }}" - state: absent - - - name: Drop existing first network - openstack.cloud.network: - name: "{{ network_name_1 }}" - state: absent - - - name: Drop existing second router - openstack.cloud.router: - name: "{{ router_name_2 }}" - state: absent - - - name: Drop existing second subnet - openstack.cloud.subnet: - name: "{{ subnet_name_2 }}" - state: absent - - - name: Drop existing second network - openstack.cloud.network: - name: "{{ network_name_2 }}" - state: absent - ignore_errors: yes + - name: Drop existing vpc route + opentelekomcloud.cloud.vpc_route: + # pass ID as name + route_id: "{{ vpc_route.vpc_route.id }}" + state: absent + register: dropped + + - name: assert result + assert: + that: + - dropped is success + - dropped is changed + + - name: Drop vpc peering + opentelekomcloud.cloud.vpc_peering: + name: "{{ vpc_peering_name }}" + state: absent + + - name: Drop existing first router + openstack.cloud.router: + name: "{{ router_name_1 }}" + state: absent + + - name: Drop existing first subnet + openstack.cloud.subnet: + name: "{{ subnet_name_1 }}" + state: absent + + - name: Drop existing first network + openstack.cloud.network: + name: "{{ network_name_1 }}" + state: absent + + - name: Drop existing second router + openstack.cloud.router: + name: "{{ router_name_2 }}" + state: absent + + - name: Drop existing second subnet + openstack.cloud.subnet: + name: "{{ subnet_name_2 }}" + state: absent + + - name: Drop existing second network + openstack.cloud.network: + name: "{{ network_name_2 }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/vpn_service_info/tasks/main.yaml b/tests/integration/targets/vpn_service_info/tasks/main.yaml index 6a772032..5596eee1 100644 --- a/tests/integration/targets/vpn_service_info/tasks/main.yaml +++ b/tests/integration/targets/vpn_service_info/tasks/main.yaml @@ -6,7 +6,7 @@ - name: Get list of vpn services opentelekomcloud.cloud.vpn_service_info: register: vpn_service_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -18,7 +18,7 @@ opentelekomcloud.cloud.vpn_service_info: vpn_service: "fake_vpn" register: vpn_service_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -32,7 +32,7 @@ opentelekomcloud.cloud.vpn_service_info: subnet: "fake_subnet" register: vpn_service_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -46,7 +46,7 @@ opentelekomcloud.cloud.vpn_service_info: router: "fake_router" register: vpn_service_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: @@ -62,7 +62,7 @@ subnet: "fake_subnet" router: "fake_router" register: vpn_service_info - ignore_errors: yes + ignore_errors: true - name: assert result assert: diff --git a/tests/integration/targets/waf_domain/tasks/main.yaml b/tests/integration/targets/waf_domain/tasks/main.yaml index 0c23a655..ae365c67 100644 --- a/tests/integration/targets/waf_domain/tasks/main.yaml +++ b/tests/integration/targets/waf_domain/tasks/main.yaml @@ -94,7 +94,7 @@ sip_header_list: - X-Forwarded-For register: domain_check - check_mode: yes + check_mode: true - name: assert result assert: @@ -208,18 +208,18 @@ always: - block: # Cleanup - - name: Drop perhaps existing domain - waf_domain: - name: "{{ domain_name }}" - state: absent - - - name: Drop certificate - waf_certificate: - name: "{{ cert_name }}" - state: absent - - - name: Drop 2nd certificate - waf_certificate: - name: "{{ cert2_name }}" - state: absent + - name: Drop perhaps existing domain + waf_domain: + name: "{{ domain_name }}" + state: absent + + - name: Drop certificate + waf_certificate: + name: "{{ cert_name }}" + state: absent + + - name: Drop 2nd certificate + waf_certificate: + name: "{{ cert2_name }}" + state: absent ignore_errors: true diff --git a/tox.ini b/tox.ini index 516e072a..2d38f709 100644 --- a/tox.ini +++ b/tox.ini @@ -70,9 +70,7 @@ passenv = * deps = {[testenv]deps} commands = - {[testenv:build]commands} - {[testenv:pep8]commands} -# {[testenv:yamllint]commands} + ansible-lint -vvv ansible --version [testenv:linters-29] From fd6c36e1d2e1cc8fa62bfde1fd2daf7474523eae Mon Sep 17 00:00:00 2001 From: Rodion Gyrbu Date: Wed, 24 Nov 2021 12:59:30 +0300 Subject: [PATCH 21/65] Fix remaining orphans after deletion in `as_instance` (#152) Fix remaining orphans after deletion in `as_instance` Reviewed-by: Anton Kachurin Reviewed-by: Vladimir Vshivkov Reviewed-by: Artem Goncharov Reviewed-by: None --- .../targets/as_instance/tasks/main.yaml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/integration/targets/as_instance/tasks/main.yaml b/tests/integration/targets/as_instance/tasks/main.yaml index 8ec922fa..088f1269 100644 --- a/tests/integration/targets/as_instance/tasks/main.yaml +++ b/tests/integration/targets/as_instance/tasks/main.yaml @@ -103,7 +103,7 @@ size: 10 share_type: "PER" charging_mode: "traffic" - security_groups: [{'id': "{{ secgroup.secgroup.id }}"}] + security_groups: [{"id": "{{ secgroup.secgroup.id }}"}] register: as_config - name: Create AS group @@ -115,8 +115,8 @@ desire_instance_number: "{{ desire_instance_number }}" max_instance_number: "{{ max_instance_number }}" availability_zones: ["{{ az2_name }}"] - networks: [{'id': "{{ network.network.id }}"}] - security_groups: [{'id': "{{ secgroup.secgroup.id }}"}] + networks: [{"id": "{{ network.network.id }}"}] + security_groups: [{"id": "{{ secgroup.secgroup.id }}"}] router: "{{ router.router.id }}" delete_publicip: true delete_volume: true @@ -133,7 +133,6 @@ network: "{{ network_name }}" flavor: "{{ server_flavor }}" availability_zone: "{{ az2_name }}" - delete_fip: true register: ecs1 - name: Create ECS2 instance @@ -143,7 +142,6 @@ network: "{{ network_name }}" flavor: "{{ server_flavor }}" availability_zone: "{{ az2_name }}" - delete_fip: true register: ecs2 - name: Get list of AS Instances @@ -442,15 +440,17 @@ always: - block: - # Cleanup + # Cleanup - name: Delete ECS1 openstack.cloud.server: name: "{{ (as_instance_name + '_1') }}" + delete_fip: true state: absent - name: Delete ECS2 openstack.cloud.server: name: "{{ (as_instance_name + '_2') }}" + delete_fip: true state: absent - name: Delete as group @@ -481,6 +481,11 @@ - dropped_as_config is success - dropped_as_config is changed + - name: Delete security group + openstack.cloud.security_group: + name: "{{ secgroup_name }}" + state: absent + - name: Delete existing router openstack.cloud.os_router: name: "{{ router.router.name }}" @@ -496,11 +501,6 @@ name: "{{ network.network.name }}" state: absent - - name: Delete security group - openstack.cloud.security_group: - name: "{{ secgroup_name }}" - state: absent - - name: Delete keypair openstack.cloud.os_keypair: name: "{{ kp_name }}" From e14c672350346223312604261cc1e07eec12d5e6 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Wed, 24 Nov 2021 15:02:10 +0400 Subject: [PATCH 22/65] added css_snapshot_info module (#144) added css_snapshot_info module Resolves #142 Reviewed-by: None Reviewed-by: Polina Gubina Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin Reviewed-by: Irina Pereiaslavskaia --- meta/runtime.yml | 1 + plugins/modules/css_snapshot_info.py | 153 ++++++++++++++++++ .../targets/css_snapshot_info/tasks/main.yaml | 17 ++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 173 insertions(+) create mode 100644 plugins/modules/css_snapshot_info.py create mode 100644 tests/integration/targets/css_snapshot_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index dad3fee4..8e394fc1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -20,6 +20,7 @@ action_groups: - cce_node_pool_info - cce_node_pool - css_cluster_info + - css_snapshot_info - deh_host - deh_host_info - deh_host_type_info diff --git a/plugins/modules/css_snapshot_info.py b/plugins/modules/css_snapshot_info.py new file mode 100644 index 00000000..439023bf --- /dev/null +++ b/plugins/modules/css_snapshot_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: css_snapshot_info +short_description: Get CSS snapshot info +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Vladimir Vshivkov (@enrrou)" +description: + - Get Cloud Search Service snapshot info +options: + cluster: + description: Name of the cluster, to which the snapshot to be queried belongs. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +snapshots: + description: Dictionary of CSS snapshot info + returned: changed + type: list + sample: [ + { + "backups": [ + { + "created": "2021-11-22T13:00:00", + "datastore": { + "type": "elasticsearch", + "version": "7.6.2" + }, + "description": "", + "id": "e29d99c1-3d19-4ea4-ae8d-f252df76cbe9", + "clusterId": "37cb1075-c38e-4cd8-81df-442d52df3786", + "clusterName": "Es-xfx", + "name": "snapshot-002", + "status": "COMPLETED", + "updated": "2021-11-22T13:00:00", + "backupType": "1", + "backupMethod": "manual", + "backupExpectedStartTime": null, + "backupKeepDay": null, + "backupPeriod": null, + "indices": ".kibana,website2", + "totalShards": 6, + "failedShards": 0, + "version": "6.2.3", + "restoreStatus": "success", + "startTime": 1520408087099, + "endTime": 1520408412219, + "bucketName": "obs-b8ed" + }, + { + "created": "2021-11-22T13:00:00", + "datastore": { + "type": "elasticsearch", + "version": "7.6.2" + }, + "description": "", + "id": "29a2254e-947f-4463-b65a-5f0b17515fae", + "clusterId": "37cb1075-c38e-4cd8-81df-442d52df3786", + "clusterName": "Es-xfx", + "name": "snapshot-001", + "status": "COMPLETED", + "updated": "2021-11-22T13:00:00", + "backupType": "1", + "backupMethod": "manual", + "backupExpectedStartTime": null, + "backupKeepDay": null, + "backupPeriod": null, + "indices": ".kibana", + "totalShards": 1, + "failedShards": 0, + "version": "7.6.2", + "restoreStatus": "none", + "startTime": 1520350957275, + "endTime": 1520351284357, + "bucketName": "obs-b8ed" + } + ] + } +] +''' + +EXAMPLES = ''' +#Query CSS Snapshots +--- +- hosts: localhost + tasks: + - name: Get CSS Snapshots + opentelekomcloud.cloud.css_snapshot_info: + cluster: test + register: result +''' + + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CssSnapshotInfoModule(OTCModule): + + argument_spec = dict( + cluster=dict(required=False) + ) + + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + data = [] + cluster_name = self.params['cluster'] + + # search cluster by name or id + if self.params['cluster']: + cluster = self.conn.css.find_cluster(name_or_id=cluster_name) + else: + self.fail(changed=False, + msg='CSS cluster is missing') + + # if exists list snapshots + if cluster: + snapshots = self.conn.css.snapshots(cluster['id']) + for snapshot in snapshots: + dt = snapshot.to_dict() + dt.pop('location') + data.append(dt) + + self.exit_json( + changed=False, + snapshot_list=data + ) + + +def main(): + module = CssSnapshotInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/css_snapshot_info/tasks/main.yaml b/tests/integration/targets/css_snapshot_info/tasks/main.yaml new file mode 100644 index 00000000..5f2bea17 --- /dev/null +++ b/tests/integration/targets/css_snapshot_info/tasks/main.yaml @@ -0,0 +1,17 @@ +--- +- module_defaults: + opentelekomcloud.cloud.css_snapshot_info: + cloud: "{{ test_cloud }}" + block: + - name: Get info about snapshots. + opentelekomcloud.cloud.css_snapshot_info: + cluster: + register: result + ignore_errors: true + + - name: assert result + assert: + that: + - result is not success + - result is not changed + - 'result.msg == "CSS cluster is missing"' diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 446689ce..fca6c949 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -22,6 +22,7 @@ plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license +plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f17b06b8..45883eff 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -22,6 +22,7 @@ plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license +plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license From bf94b1d89fdc5c84fef70752446346d36153b377 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Fri, 26 Nov 2021 12:21:06 +0300 Subject: [PATCH 23/65] CSS cluster info fix (#153) CSS cluster info fix add 2 parameters into the tests Reviewed-by: None Reviewed-by: Vladimir Vshivkov --- tests/integration/targets/css_cluster_info/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/targets/css_cluster_info/tasks/main.yaml b/tests/integration/targets/css_cluster_info/tasks/main.yaml index 0e16fd64..74d506f5 100644 --- a/tests/integration/targets/css_cluster_info/tasks/main.yaml +++ b/tests/integration/targets/css_cluster_info/tasks/main.yaml @@ -5,6 +5,8 @@ block: - name: Get info about clusters. opentelekomcloud.cloud.css_cluster_info: + limit: 1 + start: 0 register: result - name: assert result From a4e9852c4baa7d5ad618f87b56de4aca6e1a19f6 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Fri, 26 Nov 2021 13:41:07 +0400 Subject: [PATCH 24/65] set on/off to bool (#154) rework deh_host to use proper bool instead "on/off" value resolves #151 Reviewed-by: None Reviewed-by: Rodion Gyrbu Reviewed-by: Artem Goncharov Reviewed-by: Vladimir Vshivkov --- .ansible-lint | 1 - plugins/modules/deh_host.py | 24 +++++++++---------- .../targets/deh_host/tasks/main.yaml | 4 ++-- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index ffee53f1..ab70b73d 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -2,7 +2,6 @@ parseable: true exclude_paths: - ci/playbooks - - tests/integration/targets/deh_host/tasks/main.yaml skip_list: - '106' # Role name does not match ``^[a-z][a-z0-9_]+$`` pattern - '204' # Lines should be no longer than 160 chars diff --git a/plugins/modules/deh_host.py b/plugins/modules/deh_host.py index edb3a501..f494c7c5 100644 --- a/plugins/modules/deh_host.py +++ b/plugins/modules/deh_host.py @@ -24,9 +24,8 @@ description: - Specifies whether to allow an ECS to be placed on any available DeH if - its DeH ID is not specified during its creation. - type: str - default: 'on' - choices: ['on', 'off'] + type: bool + default: true availability_zone: description: - Specifies the Availability zone to which the Dedicated host belongs. @@ -73,7 +72,7 @@ sample: { deh_host: { "allocated_at": null, - "auto_placement": "on", + "auto_placement": "true", "availability_zone": "eu-de-01", "available_memory": null, "available_vcpus": null, @@ -124,7 +123,7 @@ - opentelekomcloud.cloud.deh_host: cloud: otc id: "{{ deh.deh_host.dedicated_host_ids[0] }}" - auto_placement: off + auto_placement: false when: - deh is defined register: deh @@ -135,9 +134,7 @@ class DehHostModule(OTCModule): argument_spec = dict( - auto_placement=dict(required=False, - default='on', - choices=['on', 'off']), + auto_placement=dict(required=False, type='bool', default=True), availability_zone=dict(required=False), host_type=dict(required=False), id=dict(required=False), @@ -196,12 +193,17 @@ def run(self): changed = False attrs = {} + if self.params['auto_placement']: + attrs['auto_placement'] = 'on' + else: + attrs['auto_placement'] = 'off' + if host: # DeH host modification if self.ansible.check_mode: self.exit(changed=True) - if self.params['auto_placement'] and (self.params['auto_placement'] != host.auto_placement): - attrs['auto_placement'] = self.params['auto_placement'] + if attrs['auto_placement'] == host.auto_placement: + attrs.pop('auto_placement') if self.params['name'] and (self.params['name'] != host.name): attrs['name'] = self.params['name'] if attrs: @@ -222,8 +224,6 @@ def run(self): if self.ansible.check_mode: self.exit(changed=True) attrs['name'] = self.params['name'] - if self.params['auto_placement']: - attrs['auto_placement'] = self.params['auto_placement'] attrs['availability_zone'] = self.params['availability_zone'] attrs['host_type'] = self.params['host_type'] if self.params['quantity']: diff --git a/tests/integration/targets/deh_host/tasks/main.yaml b/tests/integration/targets/deh_host/tasks/main.yaml index ad9bed53..622b130d 100644 --- a/tests/integration/targets/deh_host/tasks/main.yaml +++ b/tests/integration/targets/deh_host/tasks/main.yaml @@ -59,7 +59,7 @@ - name: Modify DeH host - check mode deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" - auto_placement: off + auto_placement: false check_mode: true when: - deh is defined @@ -73,7 +73,7 @@ - name: Modify DeH host deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" - auto_placement: off + auto_placement: false when: - deh is defined register: deh From 84745ae8b1eab2491f5c2c1210f80c6609e0f4ba Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Mon, 13 Dec 2021 13:01:55 +0400 Subject: [PATCH 25/65] security_group module rework (#156) security_group module rework Resolves #148 We need to add support for passing rules (as list of dict) into the security_group module. In addition to that there must be additional parameter "exclusive" (default False) that will ensure only passed rules are existing on the group (module should delete whatever is currently present but not passed in the list of rules) Reviewed-by: None Reviewed-by: Anton Sidelnikov --- meta/runtime.yml | 1 + plugins/modules/security_group.py | 221 ++++++++++++++++++ .../targets/security_group/tasks/main.yaml | 45 ++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 269 insertions(+) create mode 100644 plugins/modules/security_group.py create mode 100644 tests/integration/targets/security_group/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 8e394fc1..cb52ba8f 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -43,6 +43,7 @@ action_groups: - rds_instance - rds_backup - rds_backup_info + - security_group - security_group_info - server_group_info - tag diff --git a/plugins/modules/security_group.py b/plugins/modules/security_group.py new file mode 100644 index 00000000..03d24c3a --- /dev/null +++ b/plugins/modules/security_group.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: security_group +short_description: Add/Delete security groups from an OpenTelekomCloud cloud. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.9.0" +author: "Vladimir Vshivkov (@enrrou)" +description: + - Add or Remove security groups from an OpenTelekomCloud cloud. +options: + name: + description: + - Name that has to be given to the security group. This module + requires that security group names be unique. + required: true + type: str + description: + description: + - Long description of the purpose of the security group + type: str + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + type: str + project: + description: + - Unique name or ID of the project. + required: false + type: str + security_group_rules: + type: list + elements: dict + description: + - list of security group rules + exclusive: + type: bool + default: false + description: + - Deletes existing rules if true +requirements: + - "python >= 3.6" + - "openstacksdk" + - "otcextensions" +''' + +EXAMPLES = ''' +# Create a security group +- opentelekomcloud.cloud.security_group: + cloud: otc + state: present + name: foo + description: security group for foo servers + exclusive: true + +# Update the existing 'foo' security group description +- opentelekomcloud.cloud.security_group: + cloud: otc + state: present + name: foo + description: updated description for the foo security group + +# Create a security group for a given project +- opentelekomcloud.cloud.security_group: + cloud: otc + state: present + name: foo + project: myproj + +#Create a security groups with exclusive and with rules +- opentelekomcloud.cloud.security_group: + cloud: otc + state: present + name: foo + description: security group for foo servers + exclusive: True + security_group_rules: + - "direction": "egress" + "ethertype": "IPv4" + "port_range_min": "1" + "port_range_max": "50000" + "protocol": "tcp" + - "direction": "egress" + "ethertype": "IPv6" + - "direction": "ingress" + "ethertype": "IPv4" + "protocol": "icmp" +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class SecurityGroupModule(OTCModule): + + argument_spec = dict( + name=dict(required=True), + description=dict(default=''), + state=dict(default='present', choices=['absent', 'present']), + project=dict(default=None), + security_group_rules=dict(type='list', elements='dict'), + exclusive=dict(type='bool', default=False) + ) + + def _needs_update(self, secgroup): + """Check for differences in the updatable values. + + NOTE: We don't currently allow name updates. + """ + if secgroup['description'] != self.params['description']: + return True + return False + + def _system_state_change(self, secgroup): + state = self.params['state'] + if state == 'present': + if not secgroup: + return True + return self._needs_update(secgroup) + if state == 'absent' and secgroup: + return True + return False + + def run(self): + + name = self.params['name'] + state = self.params['state'] + description = self.params['description'] + project = self.params['project'] + security_group_rules = self.params['security_group_rules'] + exclusive = self.params['exclusive'] + + data = [] + + if project is not None: + proj = self.conn.get_project(project) + if proj is None: + self.fail_json(msg='Project %s could not be found' % project) + project_id = proj['id'] + else: + project_id = self.conn.current_project_id + + if project_id: + filters = {'tenant_id': project_id} + else: + filters = None + + secgroup = self.conn.get_security_group(name, filters=filters) + sg_rules = None + + if self.ansible.check_mode: + self.exit(changed=self._system_state_change(secgroup)) + + changed = False + if state == 'present': + if not secgroup: + kwargs = {} + if project_id: + kwargs['project_id'] = project_id + secgroup = self.conn.create_security_group(name, description, + **kwargs) + changed = True + else: + if self._needs_update(secgroup): + secgroup = self.conn.update_security_group( + secgroup['id'], description=description) + changed = True + + if exclusive: + # delete security group rules if any exists + sg_rules = self.conn.network.security_group_rules( + security_group_id=secgroup.id) + if sg_rules: + for rule in sg_rules: + self.conn.network.delete_security_group_rule( + security_group_rule=rule.id) + + if security_group_rules is not None: + # create rules + for rule in security_group_rules: + self.conn.create_security_group_rule(name, **rule) + sg_rules = self.conn.network.security_group_rules( + security_group_id=secgroup.id) + # prepare sg rules data + for raw in sg_rules: + dt = raw.to_dict() + data.append(dt) + changed = True + + self.exit( + changed=changed, id=secgroup['id'], + secgroup=secgroup, + secgroup_rules=data) + + if state == 'absent': + if secgroup: + self.conn.delete_security_group(secgroup['id']) + changed = True + self.exit(changed=changed) + + +def main(): + module = SecurityGroupModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/security_group/tasks/main.yaml b/tests/integration/targets/security_group/tasks/main.yaml new file mode 100644 index 00000000..f790044c --- /dev/null +++ b/tests/integration/targets/security_group/tasks/main.yaml @@ -0,0 +1,45 @@ +--- +- module_defaults: + opentelekomcloud.cloud.security_group: + cloud: "{{ test_cloud }}" + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + security_group_name: "{{ ( prefix + 'security_group') }}" + + - name: Create security group + opentelekomcloud.cloud.security_group: + state: present + name: "{{ security_group_name }}" + description: security group for foo servers + exclusive: true + security_group_rules: + - "direction": "egress" + "ethertype": "IPv4" + "port_range_min": "1" + "port_range_max": "50000" + "protocol": "tcp" + - "direction": "egress" + "ethertype": "IPv6" + - "direction": "ingress" + "ethertype": "IPv4" + "protocol": "icmp" + register: sg + + - name: assert result + assert: + that: + - sg is success + - sg is changed + + always: + - block: + # Cleanup + - name: Drop security group + opentelekomcloud.cloud.security_group: + name: "{{ security_group_name }}" + state: "absent" diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index fca6c949..fbe888c6 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -56,6 +56,7 @@ plugins/modules/rds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/rds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/rds_instance.py validate-modules:missing-gplv3-license plugins/modules/rds_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license plugins/modules/volume_backup.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 45883eff..2e4abebb 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -57,6 +57,7 @@ plugins/modules/rds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/rds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/rds_instance.py validate-modules:missing-gplv3-license plugins/modules/rds_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license plugins/modules/volume_backup.py validate-modules:missing-gplv3-license From 4c2ff5376a1814bc172a5cf4c859db740fb17117 Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Thu, 16 Dec 2021 17:23:00 +0300 Subject: [PATCH 26/65] Vpc module (#157) Vpc module Reviewed-by: None Reviewed-by: Vladimir Vshivkov Reviewed-by: Anton Kachurin Reviewed-by: Anton Sidelnikov --- meta/runtime.yml | 1 + plugins/modules/vpc.py | 196 ++++++++++++++++++ tests/integration/targets/vpc/tasks/main.yaml | 65 ++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 264 insertions(+) create mode 100644 plugins/modules/vpc.py create mode 100644 tests/integration/targets/vpc/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index cb52ba8f..9414c135 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -50,6 +50,7 @@ action_groups: - volume_backup - waf_certificate - waf_certificate_info + - vpc - volume_backup_info - volume_snapshot_info - vpc_peering diff --git a/plugins/modules/vpc.py b/plugins/modules/vpc.py new file mode 100644 index 00000000..c18ab5c8 --- /dev/null +++ b/plugins/modules/vpc.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: vpc +short_description: Create or delete vpc from Open Telekom Cloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +author: "Polina Gubina (@polina-gubina)" +description: + - Create or Delete vpc from OpenStack. +options: + state: + description: Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + type: str + name: + description: Name to be give to the router. + required: false + type: str + description: + description: Provides supplementary information about the VPC. + required: false + type: str + cidr: + description: + - Specifies the available IP address ranges for subnets in the VPC. + - If cidr is not specified, the default value is left blank. + - The value must be in CIDR format, for example, 192.168.0.0/16. + required: false + type: str + routes: + description: Specifies the route list. + required: false + type: list + elements: dict + suboptions: + destination: + description: + - Specifies the destination network segment of a route. + - The value must be in the CIDR format. Currently, only the value + 0.0.0.0/0 is supported. + type: str + required: false + nexthop: + description: + - Specifies the next hop of a route. + - The value must be an IP address and must belong to the subnet + in the VPC. Otherwise, this value does not take effect. + type: str + required: false + enabled_shared_snat: + description: Specifies whether the shared SNAT function is enabled. + required: false + type: bool +requirements: ["openstacksdk", "otcextensions"] +''' + +EXAMPLES = ''' +- name: Create vpc + opentelekomcloud.cloud.vpc: + name: "vpc-test" + cidr: "192.168.0.0/24" + state: present + +- name: Update vpc + opentelekomcloud.cloud.vpc: + name: "vpc-test" + description: "New description" + +- name: Delete vpc + opentelekomcloud.cloud.vpc: + name: "vpc-test" + state: absent +''' + +RETURN = ''' +router: + description: Dictionary describing the router. + returned: On success when I(state) is 'present' + type: complex + contains: + id: + description: Router ID. + type: str + sample: "474acfe5-be34-494c-b339-50f06aa143e4" + name: + description: Router name. + type: str + sample: "router1" + admin_state_up: + description: Administrative state of the router. + type: bool + sample: true + status: + description: The router status. + type: str + sample: "ACTIVE" + tenant_id: + description: The tenant ID. + type: str + sample: "861174b82b43463c9edc5202aadc60ef" + external_gateway_info: + description: The external gateway parameters. + type: dict + sample: { + "enable_snat": true, + "external_fixed_ips": [ + { + "ip_address": "10.6.6.99", + "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81" + } + ] + } + routes: + description: The extra routes configuration for L3 router. + type: list +''' + + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class VpcModule(OTCModule): + argument_spec = dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(required=False), + description=dict(required=False), + cidr=dict(required=False), + routes=dict(type='list', elements='dict', required=False), + enabled_shared_snat=dict(type='bool', required=False) + ) + + def run(self): + + query = {} + state = self.params['state'] + name = self.params['name'] + description = self.params['description'] + cidr = self.params['cidr'] + routes = self.params['routes'] + enabled_shared_snat = self.params['enabled_shared_snat'] + + if name: + query['name'] = name + if description: + query['description'] = description + if cidr: + query['cidr'] = cidr + + vpc = None + if name: + vpc = self.conn.vpc.find_vpc(name, ignore_missing=True) + + if state == 'present': + if self.ansible.check_mode: + self.exit(changed=True) + + if not vpc: + new_vpc = self.conn.vpc.create_vpc(**query) + self.exit(changed=True, vpc=new_vpc) + else: + if routes: + query['routes'] = routes + if enabled_shared_snat: + query['enabled_shared_snat'] = enabled_shared_snat + updated_vpc = self.conn.vpc.update_vpc(vpc=vpc, **query) + self.exit(changed=True, vpc=updated_vpc) + else: + if vpc: + if self.ansible.check_mode: + self.exit(changed=True) + self.conn.network.delete_router(vpc.id) + self.exit(changed=True) + else: + self.exit(changed=False) + + +def main(): + module = VpcModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/vpc/tasks/main.yaml b/tests/integration/targets/vpc/tasks/main.yaml new file mode 100644 index 00000000..456d3229 --- /dev/null +++ b/tests/integration/targets/vpc/tasks/main.yaml @@ -0,0 +1,65 @@ +--- +- module_defaults: + opentelekomcloud.cloud.vpc: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + vpc_name: "{{ ( prefix + '_vpc') }}" + cidr: "192.168.0.0/24" + + - name: Create VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + cidr: "{{ cidr }}" + state: present + check_mode: true + register: vpc_check_mode + + - name: assert result + assert: + that: + - vpc_check_mode is success + - vpc_check_mode is not changed + + - name: Create VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + cidr: "{{ cidr }}" + state: present + register: vpc + + - name: assert result + assert: + that: + - vpc is success + - vpc is changed + + - name: Update VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + description: "Test vpc" + register: updated_vpc + + - name: assert result + assert: + that: + - updated_vpc is success + - updated_vpc is defined + + - name: Delete VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: "absent" + register: deleted_vpc + + - name: assert result + assert: + that: + - deleted_vpc is success + - deleted_vpc is changed diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index fbe888c6..8288fd30 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -59,6 +59,7 @@ plugins/modules/rds_instance_info.py validate-modules:missing-gplv3-license plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license +plugins/modules/vpc.py validate-modules:missing-gplv3-license plugins/modules/volume_backup.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 2e4abebb..8141cecf 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -65,6 +65,7 @@ plugins/modules/waf_certificate.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate_info.py validate-modules:missing-gplv3-license plugins/modules/volume_backup_info.py validate-modules:missing-gplv3-license plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license +plugins/modules/vpc.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_mode.py validate-modules:missing-gplv3-license From 249c16ec4062485ba267914886032fdbcd3ddeb1 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Wed, 22 Dec 2021 04:00:57 -0500 Subject: [PATCH 27/65] css cluster module (#147) css cluster module resolves #146 Reviewed-by: Anton Kachurin Reviewed-by: Vladimir Vshivkov Reviewed-by: None Reviewed-by: Anton Sidelnikov --- meta/runtime.yml | 1 + plugins/modules/css_cluster.py | 347 ++++++++++++++++++ tests/integration/targets/as_instance/aliases | 1 + tests/integration/targets/css_cluster/aliases | 1 + .../targets/css_cluster/tasks/main.yaml | 120 ++++++ .../targets/css_cluster_info/aliases | 1 + tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 8 files changed, 473 insertions(+) create mode 100644 plugins/modules/css_cluster.py create mode 100644 tests/integration/targets/as_instance/aliases create mode 100644 tests/integration/targets/css_cluster/aliases create mode 100644 tests/integration/targets/css_cluster/tasks/main.yaml create mode 100644 tests/integration/targets/css_cluster_info/aliases diff --git a/meta/runtime.yml b/meta/runtime.yml index 9414c135..6b91e8e6 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -19,6 +19,7 @@ action_groups: - cce_cluster_node - cce_node_pool_info - cce_node_pool + - css_cluster - css_cluster_info - css_snapshot_info - deh_host diff --git a/plugins/modules/css_cluster.py b/plugins/modules/css_cluster.py new file mode 100644 index 00000000..15abb650 --- /dev/null +++ b/plugins/modules/css_cluster.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: css_cluster +short_description: Manage CSS clusters +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: 0.9.0 +author: Vladimir Vshivkov (@enrrou) +description: + - Manage CSS clusters +options: + name: + description: + - Cluster name. + - It contains 4 to 32 characters. Only letters, digits, hyphens (-), and + underscores (_) are allowed. + - The value must start with a letter. + required: true + type: str + datastore_version: + description: + - Engine version. The value can be 6.2.3, 7.1.1 or 7.6.2. + - The default value is 7.6.2. + type: str + choices: [6.2.3, 7.1.1, 7.6.2] + default: 7.6.2 + datastore_type: + description: + - Engine type. + - The default value is elasticsearch. Currently, the value can only be + elasticsearch. + type: str + default: elasticsearch + instance_num: + description: + - Number of clusters. + - The value range is 1 to 32. + type: int + flavor: + description: Instance flavor name. + type: str + volume_type: + description: + - Information about the volume. + - COMMON Common I/O + - HIGH High I/O + - ULTRAHIGH Ultra-high I/O + type: str + choices: + - common + - high + - ultrahigh + volume_size: + description: + - 'Volume size, which must be a multiple of 4 and 10.' + - Unit GB + type: int + system_encrypted: + description: + - Value 1 indicates encryption is performed + - Value 0 indicates encryption is not performed. + choices: + - '0' + - '1' + type: int + system_cmkid: + description: + - Key ID. + - The Default Master Keys cannot be used to create grants. Specifically, + you cannot use Default Master Keys whose aliases end with /default in + KMS to create clusters. + - After a cluster is created, do not delete the key used by the cluster. + Otherwise, the cluster will become unavailable. + type: str + https_enable: + type: bool + description: + - Whether communication is encrypted on the cluster. + - Available values include true and false. By default, communication is + encrypted. + - Value true indicates that communication is encrypted on the cluster. + - Value false indicates that communication is not encrypted on the + cluster. + authority_enable: + type: bool + description: + - Whether to enable authentication. + - Available values include true and false. + - Authentication is disabled by default. + - 'When authentication is enabled, httpsEnable must be set to true.' + admin_pwd: + description: + - Password of the cluster user admin in security mode. + - This parameter is mandatory only when authority_enable is set to true. + - The password can contain 8 to 32 characters. + - Passwords must contain at least 3 of the following character types + uppercase letters, lowercase letters, numbers, and special characters + (~!@#$%^&*()-_=+\\|[{}];:,<.>/?). + type: str + router: + description: 'VPC ID, which is used for configuring cluster network.' + type: str + net: + description: + - Subnet ID. All instances in a cluster must have the same subnets and + security groups. + type: str + security_group: + description: + - Security group ID. All instances in a cluster must have the same subnets + and security groups. + type: str + tag_key: + description: + - Tag key. The value can contain 1 to 36 characters. Only digits, letters, + hyphens (-) and underscores (_) are allowed. + type: str + tag_value: + description: + - Tag value. The value can contain 0 to 43 characters. Only digits, + letters, hyphens (-) and underscores (_) are allowed. + type: str + backup_period: + description: + - Time when a snapshot is created every day. Snapshots can only be created + on the hour. The time format is the time followed by the time zone, + specifically, HH:mm z. In the format, HH:mm refers to the hour time and + z refers to the time zone, for example, 00:00 GMT+08:00 and 01:00 + GMT+08:00. + type: str + backup_prefix: + description: Prefix of the name of the snapshot that is automatically created. + type: str + backup_keepday: + description: + - Number of days for which automatically created snapshots are reserved. + - Value range is 1 to 90 + type: int + state: + description: Instance state + type: str + choices: + - present + - absent + default: present +''' + +RETURN = ''' +cluster: + description: Dictionary of CSS cluster + returned: changed + type: list + sample: [ + { + "cluster": { + "id": "ef683016-871e-48bc-bf93-74a29d60d214", + "name": "ES-Test" + } + } + ] +''' + +EXAMPLES = ''' +#Create CSS Cluster +--- +- hosts: localhost + tasks: + - name: Create CSS cluster + opentelekomcloud.cloud.css_cluster: + name: ES-Test + state: present + instance_num: 3 + volume_size: 40 + authority_enable: false + volume_type: common + router: '{{ router_id }}' + net: '{{ net_id }}' + security_group: '{{ security_group_id }}' + flavor: 'css.xlarge.2' + https_enable: false + system_encrypted: 0 + +#Delete CSS Cluster +- hosts: localhost + tasks: + - name: Create CSS cluster + opentelekomcloud.cloud.css_cluster: + name: ES-Test + state: absent +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CssClusterModule(OTCModule): + argument_spec = dict( + name=dict(type='str', required=True), + datastore_version=dict(type='str', choices=['6.2.3', '7.1.1', '7.6.2'], default='7.6.2'), + datastore_type=dict(type='str', default='elasticsearch'), + instance_num=dict(type='int'), + flavor=dict(type='str'), + volume_type=dict(type='str', choices=['common', 'high', 'ultrahigh']), + volume_size=dict(type='int'), + system_encrypted=dict(type='int', choices=[0, 1]), + system_cmkid=dict(type='str'), + https_enable=dict(type='bool'), + authority_enable=dict(type='bool'), + admin_pwd=dict(type='str'), + router=dict(type='str'), + net=dict(type='str'), + security_group=dict(type='str'), + tag_key=dict(type='str'), + tag_value=dict(type='str'), + backup_period=dict(type='str'), + backup_prefix=dict(type='str'), + backup_keepday=dict(type='int'), + state=dict(type='str', + choices=['present', 'absent'], + default='present') + ) + module_kwargs = dict( + required_if=[ + ('state', 'present', + ['flavor', 'router', 'net', + 'security_group', 'instance_num']), + ('backup_period', not None, ['backup_keepday']), + ('backup_keepday', not None, ['backup_period']), + ('authority_enable', 'true', + ['admin_pwd']), + ('system_encrypted', '1', + ['system_cmkid']) + ], + supports_check_mode=True + ) + + def _system_state_change(self, cluster): + state = self.params['state'] + if state == 'present': + if not cluster: + return True + elif state == 'absent' and cluster: + return True + return False + + def run(self): + attrs = {} + + cluster = None + changed = False + + cluster = self.conn.css.find_cluster( + name_or_id=self.params['name'], + ignore_missing=True + ) + + if self.ansible.check_mode: + self.exit_json(changed=self._system_state_change(cluster)) + + # Delete cluster + if self.params['state'] == 'absent': + if cluster: + changed = True + self.conn.css.delete_cluster(cluster=cluster.id, + ignore_missing=True) + self.exit_json(changed=changed) + + # Create cluster + elif self.params['state'] == 'present': + if cluster: + self.exit(changed=changed) + + if not cluster: + changed = True + + volume_type = self.params['volume_type'] + + attrs = { + 'name': self.params['name'], + 'datastore': { + 'type': self.params['datastore_type'], + 'version': self.params['datastore_version'] + }, + 'instance': { + "flavorRef": self.params['flavor'], + 'nics': { + 'netId': self.params['net'], + 'vpcId': self.params['router'], + 'securityGroupId': self.params['security_group'] + }, + 'volume': { + 'volume_type': volume_type.upper(), + 'size': self.params['volume_size'] + } + }, + 'diskEncryption': { + 'systemEncrypted': self.params['system_encrypted'] + } + } + + if self.params['system_cmkid']: + attrs['diskEncryption']['systemCmkid'] = self.params['system_cmkid'] + if self.params['instance_num']: + attrs['instanceNum'] = self.params['instance_num'] + if self.params['https_enable']: + attrs['httpsEnable'] = self.params['https_enable'] + if self.params['authority_enable']: + attrs['authorityEnable'] = self.params['authority_enable'] + if self.params['admin_pwd']: + attrs['adminPwd'] = self.params['admin_pwd'] + if self.params['tag_key']: + attrs['tags']['key'] = self.params['tag_key'] + if self.params['tag_value']: + attrs['tags']['value'] = self.params['tag_value'] + if self.params['backup_period']: + attrs['backupStrategy']['period'] = self.params['backup_period'] + if self.params['backup_prefix']: + attrs['backupStrategy']['prefix'] = self.params['backup_prefix'] + if self.params['backup_keepday']: + attrs['backupStrategy']['keepday'] = self.params['backup_keepday'] + + cluster = self.conn.css.create_cluster(**attrs) + + self.exit_json( + changed=changed, + css_cluster=cluster.to_dict(), + id=cluster.id + ) + + +def main(): + module = CssClusterModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/as_instance/aliases b/tests/integration/targets/as_instance/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/as_instance/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/css_cluster/aliases b/tests/integration/targets/css_cluster/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/css_cluster/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/css_cluster/tasks/main.yaml b/tests/integration/targets/css_cluster/tasks/main.yaml new file mode 100644 index 00000000..48e78517 --- /dev/null +++ b/tests/integration/targets/css_cluster/tasks/main.yaml @@ -0,0 +1,120 @@ +--- +- module_defaults: + opentelekomcloud.cloud.css_cluster: + cloud: "{{ test_cloud }}" + vars: + prefix: test-a- + block: + - name: Set random prefix + set_fact: + prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" + short_prefix: "{{ (prefix + (99999999999 | random | to_uuid | hash('md5') | truncate(12,end='') )) }}" + + - name: Set initial facts + set_fact: + network_name: "{{ ( prefix + '-test-network') }}" + subnet_name: "{{ ( prefix + '-test-subnet') }}" + router_name: "{{ ( prefix + '-test-router') }}" + security_group_name: "{{ ( prefix + '-security_group') }}" + css_cluster_name: "{{ ( short_prefix + 'css-test') }}" + css_flavor: "css.xlarge.2" + cidr: "192.168.0.0/24" + + - name: Create network for test + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: present + register: test_network + + - name: Create subnet for test + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: present + network_name: "{{ test_network.network.name }}" + cidr: "{{ cidr }}" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: test_subnet + + - name: Create router for test + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: true + interfaces: + - net: "{{ test_network.network.name }}" + subnet: "{{ test_subnet.subnet.name }}" + register: test_router + + - name: Add cidr for vpc + opentelekomcloud.cloud.vpc: + name: "{{ router_name }}" + state: present + cidr: "{{ cidr }}" + + - name: Create security group for test + openstack.cloud.security_group: + cloud: "{{ test_cloud }}" + state: present + name: "{{ security_group_name }}" + description: security group for test + register: test_security_group + + - name: Create CSS Cluster + opentelekomcloud.cloud.css_cluster: + name: "{{ css_cluster_name }}" + state: present + instance_num: 3 + volume_size: 40 + authority_enable: false + datastore_version: 7.6.2 + volume_type: common + router: "{{ test_router.router.id }}" + net: "{{ test_network.id }}" + security_group: "{{ test_security_group.id }}" + flavor: "{{ css_flavor }}" + https_enable: false + system_encrypted: 0 + register: cluster + + - name: assert result + assert: + that: + - cluster is success + - cluster is changed + + always: + - block: + # Cleanup + - name: Drop cluster + opentelekomcloud.cloud.css_cluster: + name: "{{ css_cluster_name }}" + state: "absent" + + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent + + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent + + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + + - name: Drop security group + openstack.cloud.security_group: + cloud: "{{ test_cloud }}" + state: absent + name: "{{ security_group_name }}" + ignore_errors: true diff --git a/tests/integration/targets/css_cluster_info/aliases b/tests/integration/targets/css_cluster_info/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/css_cluster_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 8288fd30..27a122d3 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -21,6 +21,7 @@ plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license +plugins/modules/css_cluster.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 8141cecf..c602f5b3 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -21,6 +21,7 @@ plugins/modules/ces_metric_data_info.py validate-modules:missing-gplv3-license plugins/modules/ces_metrics_info.py validate-modules:missing-gplv3-license plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license +plugins/modules/css_cluster.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license From 8d763070a11c60b59c587f5d3e4041f4198f7c59 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Tue, 28 Dec 2021 06:25:35 -0500 Subject: [PATCH 28/65] Css snapshot module (#145) Css snapshot module Resolves #143 Reviewed-by: None Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Sidelnikov --- .gitignore | 5 + meta/runtime.yml | 1 + plugins/modules/css_snapshot.py | 161 ++++++++++++++++ .../integration/targets/css_snapshot/aliases | 1 + .../targets/css_snapshot/tasks/main.yml | 182 ++++++++++++++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 7 files changed, 352 insertions(+) create mode 100644 plugins/modules/css_snapshot.py create mode 100644 tests/integration/targets/css_snapshot/aliases create mode 100644 tests/integration/targets/css_snapshot/tasks/main.yml diff --git a/.gitignore b/.gitignore index c3148aaf..3b333a3a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,8 @@ importer_result.json *.tar.gz doc/build tmp + +#idea +.idea/** +*/.idea/** + diff --git a/meta/runtime.yml b/meta/runtime.yml index 6b91e8e6..da9fb436 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -21,6 +21,7 @@ action_groups: - cce_node_pool - css_cluster - css_cluster_info + - css_snapshot - css_snapshot_info - deh_host - deh_host_info diff --git a/plugins/modules/css_snapshot.py b/plugins/modules/css_snapshot.py new file mode 100644 index 00000000..d416602d --- /dev/null +++ b/plugins/modules/css_snapshot.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: css_snapshot +short_description: Manage CSS snapshots +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.10.0" +author: "Vladimir Vshivkov (@enrrou)" +description: + - Manage CSS snapshots +options: + cluster: + description: + - Name or ID of CSS cluster. + type: str + name: + description: + - Name of CSS snapshot name must be start with letter. + - Name must be 4 to 64 characters in length. + - The backup name must be unique. + type: str + description: + description: + - Description of a snapshot. + - The value contains 0 to 256 characters, and angle brackets (<) and (>) are not allowed. + type: str + indices: + description: + - Name of the index to be backed up. + - Multiple index names are separated by commas (,). + - By default, data of all indices is backed up. + type: str + state: + description: Whether css snapshot should be present or absent. + choices: [present, absent] + default: present + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +snapshots: + description: Specifies the CSS snapshot. + returned: changed + type: complex + contains: + id: + description: ID of the snapshot. + returned: On success when C(state=present) + type: str + sample: "4dae5bac-0925-4d5b-add8-cb6667b8" + name: + description: Snapshot name. + returned: On success when C(state=present) + type: str + sample: "snapshot_101" +''' + +EXAMPLES = ''' +# Create css snapshot +- opentelekomcloud.cloud.css_snapshot: + cluster: "test-css" + name: "snapshot_01" + register: css_snapshot + +# Delete css snapshot +- opentelekomcloud.cloud.css_snapshot: + cluster: "test-css" + name: "snapshot_01" + state: absent +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CssSnapshotModule(OTCModule): + argument_spec = dict( + name=dict(), + cluster=dict(), + description=dict(), + indices=dict(), + state=dict(choices=['present', 'absent'], + default='present') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + attrs = {} + name = self.params['name'] + snapshot_description = self.params['description'] + + if self.params['description']: + attrs['description'] = self.params['description'] + if self.params['indices']: + attrs['indices'] = self.params['indices'] + + if self.params['name']: + if self.params['cluster']: + cluster = self.conn.css.find_cluster(name_or_id=attrs['cluster']) + + if cluster: + changed = False + + if self.ansible.check_mode: + self.exit(changed=self._system_state_change(name)) + + if self.params['state'] == 'present': + attrs['name'] = name + + if snapshot_description: + attrs['description'] = snapshot_description + + snapshot = self.conn.css.create_snapshot(cluster, **attrs) + changed = True + + self.exit(changed=changed, + snapshot=snapshot.to_dict(), + id=snapshot.id, + msg='CSS snapshot with name %s was created' % name) + + else: + changed = False + self.fail(changed=changed, + msg='CSS snapshot with name %s ' + 'already exists' % name) + + elif self.params['state'] == 'absent': + self.conn.css.delete_snapshot(name, cluster) + changed = True + + self.exit(changed=changed, + msg='CSS snapshot with name %s was deleted' % name) + + else: + changed = False + self.fail(changed=changed, + msg='CSS snapshot with name %s does not exist' % name) + else: + self.fail(msg='CSS snapshot %s does not exist' % self.params['cluster']) + + +def main(): + module = CssSnapshotModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/css_snapshot/aliases b/tests/integration/targets/css_snapshot/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/css_snapshot/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/css_snapshot/tasks/main.yml b/tests/integration/targets/css_snapshot/tasks/main.yml new file mode 100644 index 00000000..a8c16f1a --- /dev/null +++ b/tests/integration/targets/css_snapshot/tasks/main.yml @@ -0,0 +1,182 @@ +--- +- module_defaults: + css_snapshot: + cloud: "{{ test_cloud }}" + vars: + prefix: css_cluster_snapshot_test + block: + - name: Set random prefix + set_fact: + prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" + short_prefix: "{{ (prefix + (99999999999 | random | to_uuid | hash('md5') | truncate(12,end='') )) }}" + + - name: Set initial facts + set_fact: + network_name: "{{ ( prefix + '-test-network') }}" + subnet_name: "{{ ( prefix + '-test-subnet') }}" + router_name: "{{ ( prefix + '-test-router') }}" + security_group_name: "{{ ( prefix + '-security_group') }}" + css_cluster_name: "{{ ( short_prefix + 'css-test') }}" + css_flavor: "css.xlarge.2" + cidr: "192.168.0.0/24" + snapshot_name: "{{ ( prefix + '-snapshot') }}" + + - name: Create network for test + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: present + register: test_network + + - name: Create subnet for test + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: present + network_name: "{{ test_network.network.name }}" + cidr: "{{ cidr }}" + dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" + register: test_subnet + + - name: Create router for test + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: present + network: admin_external_net + enable_snat: true + interfaces: + - net: "{{ test_network.network.name }}" + subnet: "{{ test_subnet.subnet.name }}" + register: test_router + + - name: Add cidr for vpc + opentelekomcloud.cloud.vpc: + name: "{{ router_name }}" + state: present + cidr: "{{ cidr }}" + + - name: Create security group for test + openstack.cloud.security_group: + cloud: "{{ test_cloud }}" + state: present + name: "{{ security_group_name }}" + description: security group for test + register: test_security_group + + - name: Create CSS Cluster + opentelekomcloud.cloud.css_cluster: + name: "{{ css_cluster_name }}" + state: present + instance_num: 3 + volume_size: 40 + authority_enable: false + datastore_version: 7.6.2 + volume_type: common + router: "{{ test_router.router.id }}" + net: "{{ test_network.id }}" + security_group: "{{ test_security_group.id }}" + flavor: "{{ css_flavor }}" + https_enable: false + system_encrypted: 0 + register: cluster + + - name: Create CSS snapshot + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster_name }}" + name: "{{ snapshot_name }}" + register: snapshot + + - name: assert result + assert: + that: + - snapshot is success + - snapshot is not changed + - snapshot.snapshots is defined + + - name: Create CSS snapshot - check mode + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster_name }}" + name: "{{ snapshot_name }}" + register: created_snapshot + check_mode: true + + - name: assert result + assert: + that: + - created_snapshot is success + - created_snapshot is not changed + + - name: Create CSS snapshot + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster_name }}" + name: "{{ snapshot_name }}" + register: created_snapshot + + - name: assert result + assert: + that: + - created_snapshot is success + - created_snapshot is changed + - created_snapshot.snapshots is defined + + - name: Delete css snapshot - check mode + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster_name }}" + name: "{{ snapshot_name }}" + state: "absent" + register: deleted_snapshot + check_mode: true + + - name: assert result + assert: + that: + - deleted_snapshot is not changed + - deleted_snapshot is success + + - name: Delete css snapshot + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster_name }}" + name: "{{ snapshot_name }}" + state: "absent" + register: deleted_snapshot + + - name: assert result + assert: + that: + - deleted_snapshot is changed + - deleted_snapshot is success + + always: + - block: + # Cleanup + - name: Drop cluster + opentelekomcloud.cloud.css_cluster: + cloud: "{{ test_cloud }}" + name: "{{ css_cluster_name }}" + state: absent + + - name: Drop router + openstack.cloud.router: + cloud: "{{ test_cloud }}" + name: "{{ router_name }}" + state: absent + + - name: Drop subnet + openstack.cloud.subnet: + cloud: "{{ test_cloud }}" + name: "{{ subnet_name }}" + state: absent + + - name: Drop network + openstack.cloud.network: + cloud: "{{ test_cloud }}" + name: "{{ network_name }}" + state: absent + + - name: Drop security group + openstack.cloud.security_group: + cloud: "{{ test_cloud }}" + state: absent + name: "{{ security_group_name }}" + ignore_errors: true diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 27a122d3..7fb018c4 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -24,6 +24,7 @@ plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license plugins/modules/css_cluster.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license +plugins/modules/css_snapshot.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_topic.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index c602f5b3..1135b51e 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -23,6 +23,7 @@ plugins/modules/ces_quotas_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node_info.py validate-modules:missing-gplv3-license plugins/modules/css_cluster.py validate-modules:missing-gplv3-license plugins/modules/css_cluster_info.py validate-modules:missing-gplv3-license +plugins/modules/css_snapshot.py validate-modules:missing-gplv3-license plugins/modules/css_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/dms_instance.py validate-modules:missing-gplv3-license plugins/modules/dms_instance_info.py validate-modules:missing-gplv3-license From 0839e7a41305a567139e9338314ccfa511f9684f Mon Sep 17 00:00:00 2001 From: Anton Kachurin Date: Tue, 28 Dec 2021 15:59:52 +0300 Subject: [PATCH 29/65] Add `subnet` module (#159) Add `subnet` module Resolve #160 Reviewed-by: None Reviewed-by: Anton Sidelnikov --- meta/runtime.yml | 1 + plugins/modules/subnet.py | 348 ++++++++++++++++++ .../targets/subnet/tasks/main.yaml | 112 ++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 463 insertions(+) create mode 100644 plugins/modules/subnet.py create mode 100644 tests/integration/targets/subnet/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index da9fb436..cf169dec 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -48,6 +48,7 @@ action_groups: - security_group - security_group_info - server_group_info + - subnet - tag - volume_backup - waf_certificate diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py new file mode 100644 index 00000000..b9b1d8d5 --- /dev/null +++ b/plugins/modules/subnet.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: subnet +short_description: Manage VPC subnet +extends_documentation_fragment: opentelekomcloud.cloud.otc +author: "Anton Kachurin (@outcatcher)" +description: + - Manage (create, update or delete) Open Telekom Cloud VPC subnet. +options: + state: + description: Indicate desired state of the resource. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies the subnet name or ID. + - When creating a new subnet, the value can contain 1 to 64 characters, + including letters, digits, underscores (_), hyphens (-), and periods (.). + type: str + required: true + description: + description: + - Provides supplementary information about the subnet. + - The value can contain no more than 255 characters and cannot contain angle brackets (< or >). + type: str + cidr: + description: + - Specifies the subnet CIDR block. + - The value must be within the VPC CIDR block. + - The value must be in CIDR format. The subnet mask cannot be greater than 28. + type: str + gateway_ip: + description: + - Specifies the gateway of the subnet. + - The value must be an IP address in the subnet. + - The value must be a valid IP address. + type: str + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. + - The value can be true (enabled) or false (disabled). + - If this parameter is left blank, the system automatically sets it to true by default. + If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and + usernames and passwords cannot be injected using Cloud-init. + type: bool + primary_dns: + type: str + description: + - Specifies the IP address of DNS server 1 on the subnet. + - The value must be an IP address. + secondary_dns: + type: str + description: + - Specifies the IP address of DNS server 2 on the subnet. + - The value must be an IP address. + dns_list: + description: + - Specifies the DNS server address list of a subnet. + - This field is required if use more than two DNS servers. + - This parameter value is the superset of both I(primary_dns) and I(secondary_dns). + type: list + elements: str + aliases: ['dnsList'] + availability_zone: + description: Specifies the AZ to which the subnet belongs. + type: str + vpc_id: + description: Specifies the ID of the VPC to which the subnet belongs. + required: true + type: str + extra_dhcp_opts: + description: Specifies the NTP server address configured for the subnet. + type: list + elements: dict + suboptions: + opt_value: + description: + - Specifies the NTP server address configured for the subnet. + - The option ntp for opt_name indicates the NTP server configured for the subnet. + Currently, only IPv4 addresses are supported. A maximum of four IP addresses can be + configured, and each address must be unique. Multiple IP addresses must be separated + using commas (,). The option null for opt_name indicates that no NTP server is configured + for the subnet. The parameter value cannot be an empty string. + type: str + opt_name: + description: + - Specifies the NTP server address name configured for the subnet. + type: str + required: true + choices: ['ntp'] +requirements: ['openstacksdk', 'otcextensions>=0.24.5'] +''' + +EXAMPLES = ''' +- name: Create VPC + opentelekomcloud.cloud.vpc: + name: "vpc-test" + cidr: "192.168.0.0/16" + register: vpc + +- name: Create subnet + opentelekomcloud.cloud.subnet: + name: "test-subnet" + vpc_id: "{{ vpc.vpc.id }}" + cidr: "192.168.0.0/16" + gateway_ip: "192.168.0.1" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + +- name: Update subnet + opentelekomcloud.cloud.subnet: + name: "test-subnet" + vpc_id: "{{ vpc.vpc.id }}" + dns_list: + - "100.125.4.25" + - "1.1.1.1" + dhcp_enable: false + +- name: Delete subnet + opentelekomcloud.cloud.subnet: + name: "test-subnet" + vpc_id: "{{ vpc.vpc.id }}" + state: absent +''' + +RETURN = ''' +subnet: + description: Created subnet resource. + returned: On success when I(state=present) + type: complex + contains: + id: + description: Specifies the resource identifier in the form of UUID. + type: str + sample: "0f21367c-022d-433e-8ddb-1c31a65a05b8" + name: + description: Specifies the subnet name. + type: str + sample: "test-subnet" + description: + description: Provides supplementary information about the subnet. + type: str + cidr: + description: Specifies the subnet CIDR block. + type: str + gateway_ip: + description: Specifies the gateway of the subnet. + type: str + dhcp_enable: + description: Specifies whether the DHCP function is enabled for the subnet. + type: bool + primary_dns: + description: Specifies the IP address of DNS server 1 on the subnet. + type: str + secondary_dns: + description: Specifies the IP address of DNS server 2 on the subnet. + type: str + dns_list: + description: Specifies the DNS server address list of a subnet. + type: list + elements: str + availability_zone: + description: Specifies the AZ to which the subnet belongs, which can be + obtained from endpoints. + type: str + vpc_id: + description: Specifies the ID of the VPC to which the subnet belongs. + type: str + neutron_network_id: + description: Specifies the ID of the corresponding network (OpenStack Neutron API). + type: str + sample: "0f21367c-022d-433e-8ddb-1c31a65a05b8" + neutron_subnet_id: + description: Specifies the ID of the corresponding subnet (OpenStack Neutron API). + type: str + sample: "235f5393-a5e0-4b7a-9655-70eb3c13e2fe" + extra_dhcp_opts: + description: Specifies the NTP server address configured for the subnet. + type: list + elements: dict + sample: [ + { + "opt_value": "10.100.0.33,10.100.0.34", + "opt_name": "ntp" + } + ] +''' + +import copy + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class SubnetModule(OTCModule): + argument_spec = dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + description=dict(type='str'), + cidr=dict(type='str'), + gateway_ip=dict(type='str'), + dhcp_enable=dict(type='bool'), + primary_dns=dict(type='str'), + secondary_dns=dict(type='str'), + dns_list=dict(type='list', elements='str', aliases=['dnsList']), + availability_zone=dict(type='str'), + extra_dhcp_opts=dict(type='list', elements='dict', options=dict( + opt_value=dict(type='str'), + opt_name=dict(type='str', required=True, choices=['ntp']) + )) + ) + + module_kwargs = dict( + supports_check_mode=True + ) + + _update_fields = {'dns_list', 'primary_dns', 'secondary_dns', 'extra_dhcp_opts'} + _update_forbidden = {'cidr', 'gateway_ip', 'description'} + + def run(self): + data = copy.deepcopy(self.params) + state = data.pop('state') + + subnet = self.find_vpc_subnet() + + has_changes = self._changed(subnet, data) + if self.ansible.check_mode: + self.exit(changed=has_changes, subnet=subnet) + + if state == 'present': + if subnet is None: + vpc = self.conn.vpc.get_vpc(data['vpc_id']) + self.sdk.resource.wait_for_status( + self.conn.vpc, + vpc, 'OK', + None, 1, 5 + ) + subnet = self.conn.vpc.create_subnet(**data) + elif has_changes: + err_fields = {} + for field in self._update_forbidden: + val = data.get(field, None) + if val is not None: + err_fields[field] = val + if err_fields: + self.fail('updating subnet fields {} is not supported (subnet: {})' + .format(err_fields, subnet)) + update_data = {} + for field in self._update_fields: + if data[field] is not None: + update_data[field] = data[field] + subnet = self.conn.vpc.update_subnet( + subnet, + name=subnet.name, + **update_data, + ) + subnet = self.sdk.resource.wait_for_status( + self.conn.vpc, + subnet, 'ACTIVE', + None, 2, 20 + ) + self.exit(changed=has_changes, subnet=subnet) + elif state == 'absent': + if subnet: + self.conn.vpc.delete_subnet(subnet, ignore_missing=True) + self.sdk.resource.wait_for_delete(self.conn.vpc, subnet, 2, 60) + self.exit(changed=has_changes) + + def _changed(self, state, expected): + expected_removed = self.params['state'] == 'absent' + actual_missing = state is None + if expected_removed: + return not actual_missing + elif actual_missing: + return True + + if _total_dns_list(state) != _total_dns_list(expected): + return True + + for field in self.argument_spec: # check only against possible arguments + if field not in expected: + continue + if expected[field] is None: # ignore not set fields + continue + if field in ['dns_list', 'primary_dns', 'secondary_dns']: + continue + if state.get(field, None) != expected[field]: + self.log('There is a difference in field {}. Expected {}, got {}' + .format(field, expected[field], state[field])) + return True + return False + + def find_vpc_subnet(self): + name = self.params['name'] + vpc_id = self.params['vpc_id'] + + try: + # first, try to find subnet by ID + return self.conn.vpc.get_subnet(name) + except (self.sdk.exceptions.ResourceNotFound, + # in case id is not a UUID (e.g. us subnet name): + self.sdk.exceptions.BadRequestException): + subnets = self.conn.vpc.subnets(vpc_id=vpc_id) + subnets = [s for s in subnets if s.name == name and s.vpc_id == vpc_id] + if len(subnets) == 0: + return None + if len(subnets) > 1: + self.fail( + msg='More than one subnet with name {} is found in vpc {}.' + 'Please use ID instead.'.format(name, vpc_id) + ) + return subnets[0] + + +def _total_dns_list(obj: dict) -> set: + if obj is None: + return set() + + dns_list = obj.get('dns_list', []) or [] + dns_set = set(dns_list) + dns_set.add(obj['primary_dns']) + dns_set.add(obj['secondary_dns']) + dns_set.discard(None) + return dns_set + + +def main(): + module = SubnetModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/subnet/tasks/main.yaml b/tests/integration/targets/subnet/tasks/main.yaml new file mode 100644 index 00000000..c04d55ce --- /dev/null +++ b/tests/integration/targets/subnet/tasks/main.yaml @@ -0,0 +1,112 @@ +--- +- module_defaults: + opentelekomcloud.cloud.vpc: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.subnet: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + vpc_name: "{{ ( prefix + '_vpc') }}" + subnet_name: "test-subnet" + cidr: "192.168.0.0/24" + gateway: "192.168.0.1" + + - name: Create VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + cidr: "{{ cidr }}" + register: vpc + + - name: Create subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + description: "Subnet example" + vpc_id: "{{ vpc.vpc.id }}" + cidr: "{{ cidr }}" + gateway_ip: "{{ gateway }}" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + register: subnet + + - name: Assert result + assert: + that: + - subnet is success + - subnet is changed + - subnet.subnet is defined + + - name: Check created subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + description: "Subnet example" + vpc_id: "{{ vpc.vpc.id }}" + cidr: "{{ cidr }}" + gateway_ip: "{{ gateway }}" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + check_mode: true + register: subnet_check_mode + + - name: Assert check result + assert: + that: + - subnet_check_mode is success + - subnet_check_mode is not changed + - subnet_check_mode.subnet is defined + + - name: Update subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + vpc_id: "{{ vpc.vpc.id }}" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + dhcp_enable: false + register: updated_subnet + + - name: Assert result + assert: + that: + - updated_subnet is success + - updated_subnet is changed + - updated_subnet.subnet is defined + + - name: Delete subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + vpc_id: "{{ vpc.vpc.id }}" + state: absent + register: deleted_subnet + + - name: Assert result + assert: + that: + - deleted_subnet is success + - deleted_subnet is changed + + - name: Check deleted subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + vpc_id: "{{ vpc.vpc.id }}" + state: absent + register: deleted_subnet_check + check_mode: true + + - name: Assert result + assert: + that: + - deleted_subnet_check is success + - deleted_subnet_check is not changed + + - name: Delete VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: absent diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7fb018c4..a00b9da8 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -62,6 +62,7 @@ plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license plugins/modules/vpc.py validate-modules:missing-gplv3-license +plugins/modules/subnet.py validate-modules:missing-gplv3-license plugins/modules/volume_backup.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 1135b51e..e015a319 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -68,6 +68,7 @@ plugins/modules/waf_certificate_info.py validate-modules:missing-gplv3-license plugins/modules/volume_backup_info.py validate-modules:missing-gplv3-license plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/vpc.py validate-modules:missing-gplv3-license +plugins/modules/subnet.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_mode.py validate-modules:missing-gplv3-license From 5a6116ca552f6da547a9707e0ef0d3fea700278a Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Tue, 28 Dec 2021 16:26:56 +0300 Subject: [PATCH 30/65] Fix doc for vpc module (#161) Fix doc for vpc module Fix doc for vpc module Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Anton Kachurin Reviewed-by: Vladimir Vshivkov --- plugins/modules/vpc.py | 82 +++++++++++-------- tests/integration/targets/vpc/tasks/main.yaml | 2 + 2 files changed, 49 insertions(+), 35 deletions(-) diff --git a/plugins/modules/vpc.py b/plugins/modules/vpc.py index c18ab5c8..c19cfad3 100644 --- a/plugins/modules/vpc.py +++ b/plugins/modules/vpc.py @@ -60,7 +60,7 @@ in the VPC. Otherwise, this value does not take effect. type: str required: false - enabled_shared_snat: + enable_shared_snat: description: Specifies whether the shared SNAT function is enabled. required: false type: bool @@ -86,46 +86,51 @@ ''' RETURN = ''' -router: - description: Dictionary describing the router. +vpc: + description: Dictionary describing the vpc. returned: On success when I(state) is 'present' type: complex contains: id: - description: Router ID. + description: Vpc ID. type: str sample: "474acfe5-be34-494c-b339-50f06aa143e4" name: - description: Router name. + description: Vpc name. type: str - sample: "router1" - admin_state_up: - description: Administrative state of the router. - type: bool - sample: true + sample: "vpc-test" + description: + description: Provides supplementary information about the VPC. + type: str + sample: "" status: - description: The router status. + description: The vpc status. Can be 'CREATING' or 'OK'. type: str - sample: "ACTIVE" - tenant_id: - description: The tenant ID. + sample: "OK" + cidr: + description: + - Specifies the available IP address ranges for subnets in the VPC. + - Possible values are 10.0.0.0/8~24, 172.16.0.0/12~24, 192.168.0.0/16~24. + - Must be in CIDR format. type: str - sample: "861174b82b43463c9edc5202aadc60ef" - external_gateway_info: - description: The external gateway parameters. - type: dict - sample: { - "enable_snat": true, - "external_fixed_ips": [ - { - "ip_address": "10.6.6.99", - "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81" - } - ] - } + sample: "192.168.0.0/24" routes: - description: The extra routes configuration for L3 router. + description: Specifies the route information. type: list + elements: dict + contains: + destination: + description: + - Specifies the destination network segment of a route. + - The value must be in the CIDR format. Currently, only the value \ + 0.0.0.0/0 is supported. + type: str + nexthop: + description: + - Specifies the next hop of a route. + - The value must be an IP address and must belong to the subnet in the VPC. + Otherwise, this value does not take effect. + type: str ''' @@ -139,7 +144,7 @@ class VpcModule(OTCModule): description=dict(required=False), cidr=dict(required=False), routes=dict(type='list', elements='dict', required=False), - enabled_shared_snat=dict(type='bool', required=False) + enable_shared_snat=dict(type='bool', required=False) ) def run(self): @@ -150,7 +155,7 @@ def run(self): description = self.params['description'] cidr = self.params['cidr'] routes = self.params['routes'] - enabled_shared_snat = self.params['enabled_shared_snat'] + enable_shared_snat = self.params['enable_shared_snat'] if name: query['name'] = name @@ -169,19 +174,26 @@ def run(self): if not vpc: new_vpc = self.conn.vpc.create_vpc(**query) + if routes or enable_shared_snat is not None: + query_update = {} + if routes: + query_update['routes'] = routes + if enable_shared_snat is not None: + query_update['enable_shared_snat'] = enable_shared_snat + new_vpc = self.conn.vpc.update_vpc(vpc=new_vpc, **query_update) self.exit(changed=True, vpc=new_vpc) + else: if routes: query['routes'] = routes - if enabled_shared_snat: - query['enabled_shared_snat'] = enabled_shared_snat + if enable_shared_snat is not None: + query['enable_shared_snat'] = enable_shared_snat updated_vpc = self.conn.vpc.update_vpc(vpc=vpc, **query) self.exit(changed=True, vpc=updated_vpc) else: if vpc: - if self.ansible.check_mode: - self.exit(changed=True) - self.conn.network.delete_router(vpc.id) + if not self.ansible.check_mode: + self.conn.vpc.delete_vpc(vpc.id) self.exit(changed=True) else: self.exit(changed=False) diff --git a/tests/integration/targets/vpc/tasks/main.yaml b/tests/integration/targets/vpc/tasks/main.yaml index 456d3229..1745b720 100644 --- a/tests/integration/targets/vpc/tasks/main.yaml +++ b/tests/integration/targets/vpc/tasks/main.yaml @@ -31,6 +31,7 @@ opentelekomcloud.cloud.vpc: name: "{{ vpc_name }}" cidr: "{{ cidr }}" + enable_shared_snat: true state: present register: vpc @@ -39,6 +40,7 @@ that: - vpc is success - vpc is changed + - vpc.vpc.enable_shared_snat is true - name: Update VPC opentelekomcloud.cloud.vpc: From e86c96cc213102c32124e65afe7030540e92e5bf Mon Sep 17 00:00:00 2001 From: Anton Kachurin Date: Tue, 28 Dec 2021 17:18:13 +0300 Subject: [PATCH 31/65] Add `version_added` to `subnet` and `vpc` (#162) Add `version_added` to `subnet` and `vpc` Add version_added to subnet and vpc modules documentation Reviewed-by: Rodion Gyrbu Reviewed-by: None --- plugins/modules/subnet.py | 1 + plugins/modules/vpc.py | 1 + 2 files changed, 2 insertions(+) diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py index b9b1d8d5..498fceab 100644 --- a/plugins/modules/subnet.py +++ b/plugins/modules/subnet.py @@ -17,6 +17,7 @@ short_description: Manage VPC subnet extends_documentation_fragment: opentelekomcloud.cloud.otc author: "Anton Kachurin (@outcatcher)" +version_added: "0.11.0" description: - Manage (create, update or delete) Open Telekom Cloud VPC subnet. options: diff --git a/plugins/modules/vpc.py b/plugins/modules/vpc.py index c19cfad3..a91b01d8 100644 --- a/plugins/modules/vpc.py +++ b/plugins/modules/vpc.py @@ -17,6 +17,7 @@ short_description: Create or delete vpc from Open Telekom Cloud extends_documentation_fragment: opentelekomcloud.cloud.otc author: "Polina Gubina (@polina-gubina)" +version_added: "0.10.0" description: - Create or Delete vpc from OpenStack. options: From facea056b39f4c943ddbf905638102ceb68276f7 Mon Sep 17 00:00:00 2001 From: Anton Kachurin Date: Tue, 28 Dec 2021 17:42:47 +0300 Subject: [PATCH 32/65] Update collection version (#163) Update collection version Set collection version to 0.11.0 Reviewed-by: Anton Sidelnikov Reviewed-by: Rodion Gyrbu Reviewed-by: None --- doc/source/index.rst | 2 +- galaxy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index fe365d25..a4cbfdf4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.9.0 +Collection version 0.11.0 Plugin Index diff --git a/galaxy.yml b/galaxy.yml index fd9b605c..6ea48335 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.9.0 +version: 0.11.0 readme: README.md authors: - Artem Goncharov From da693166c638434a53953321d0574f31b24e2725 Mon Sep 17 00:00:00 2001 From: Anton Kachurin Date: Wed, 29 Dec 2021 15:26:54 +0300 Subject: [PATCH 33/65] Support passing vpc by name to `subnet` (#167) Support passing vpc by name to `subnet` Allow passing VPC to subnet module either by name or by ID Resolve #165 Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Polina Gubina --- plugins/modules/subnet.py | 19 ++++++++++++------- .../targets/subnet/tasks/main.yaml | 10 +++++----- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py index 498fceab..1cd19583 100644 --- a/plugins/modules/subnet.py +++ b/plugins/modules/subnet.py @@ -79,10 +79,11 @@ availability_zone: description: Specifies the AZ to which the subnet belongs. type: str - vpc_id: - description: Specifies the ID of the VPC to which the subnet belongs. + vpc: + description: Specifies the name or ID of the VPC to which the subnet belongs. required: true type: str + aliases: ['vpc_id'] extra_dhcp_opts: description: Specifies the NTP server address configured for the subnet. type: list @@ -211,7 +212,7 @@ class SubnetModule(OTCModule): argument_spec = dict( state=dict(default='present', choices=['absent', 'present']), name=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), + vpc=dict(type='str', required=True, aliases=['vpc_id']), description=dict(type='str'), cidr=dict(type='str'), gateway_ip=dict(type='str'), @@ -234,18 +235,20 @@ class SubnetModule(OTCModule): _update_forbidden = {'cidr', 'gateway_ip', 'description'} def run(self): + vpc = self.conn.vpc.find_vpc(self.params['vpc']) + self.params['vpc'] = vpc.id + subnet = self.find_vpc_subnet() + data = copy.deepcopy(self.params) + data['vpc_id'] = data.pop('vpc') state = data.pop('state') - subnet = self.find_vpc_subnet() - has_changes = self._changed(subnet, data) if self.ansible.check_mode: self.exit(changed=has_changes, subnet=subnet) if state == 'present': if subnet is None: - vpc = self.conn.vpc.get_vpc(data['vpc_id']) self.sdk.resource.wait_for_status( self.conn.vpc, vpc, 'OK', @@ -300,6 +303,8 @@ def _changed(self, state, expected): continue if field in ['dns_list', 'primary_dns', 'secondary_dns']: continue + if field in ['vpc', 'vpc_id']: + field = 'vpc_id' # as `vpc` should be an ID too at this place if state.get(field, None) != expected[field]: self.log('There is a difference in field {}. Expected {}, got {}' .format(field, expected[field], state[field])) @@ -308,7 +313,7 @@ def _changed(self, state, expected): def find_vpc_subnet(self): name = self.params['name'] - vpc_id = self.params['vpc_id'] + vpc_id = self.params['vpc'] try: # first, try to find subnet by ID diff --git a/tests/integration/targets/subnet/tasks/main.yaml b/tests/integration/targets/subnet/tasks/main.yaml index c04d55ce..0efbc370 100644 --- a/tests/integration/targets/subnet/tasks/main.yaml +++ b/tests/integration/targets/subnet/tasks/main.yaml @@ -27,7 +27,7 @@ opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" description: "Subnet example" - vpc_id: "{{ vpc.vpc.id }}" + vpc: "{{ vpc_name }}" cidr: "{{ cidr }}" gateway_ip: "{{ gateway }}" dns_list: @@ -46,7 +46,7 @@ opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" description: "Subnet example" - vpc_id: "{{ vpc.vpc.id }}" + vpc: "{{ vpc_name }}" cidr: "{{ cidr }}" gateway_ip: "{{ gateway }}" dns_list: @@ -65,7 +65,7 @@ - name: Update subnet opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" - vpc_id: "{{ vpc.vpc.id }}" + vpc: "{{ vpc_name }}" dns_list: - "100.125.4.25" - "100.125.129.199" @@ -82,7 +82,7 @@ - name: Delete subnet opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" - vpc_id: "{{ vpc.vpc.id }}" + vpc: "{{ vpc_name }}" state: absent register: deleted_subnet @@ -95,7 +95,7 @@ - name: Check deleted subnet opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" - vpc_id: "{{ vpc.vpc.id }}" + vpc: "{{ vpc_name }}" state: absent register: deleted_subnet_check check_mode: true From 65eeb1fdadbc5a270635a9fa02b1ea4456b7ba8f Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Thu, 30 Dec 2021 14:50:58 +0300 Subject: [PATCH 34/65] vpc info module (#168) vpc info module close #164 Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Vladimir Vshivkov Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin --- meta/runtime.yml | 1 + plugins/modules/vpc_info.py | 106 ++++++++++++++++++ .../targets/vpc_info/tasks/main.yaml | 53 +++++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 162 insertions(+) create mode 100644 plugins/modules/vpc_info.py create mode 100644 tests/integration/targets/vpc_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index cf169dec..6f163558 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -54,6 +54,7 @@ action_groups: - waf_certificate - waf_certificate_info - vpc + - vpc_info - volume_backup_info - volume_snapshot_info - vpc_peering diff --git a/plugins/modules/vpc_info.py b/plugins/modules/vpc_info.py new file mode 100644 index 00000000..11ac49da --- /dev/null +++ b/plugins/modules/vpc_info.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: vpc_info +short_description: Get vpc info from OpenTelekomCloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.11.1" +author: "Polina Gubina(@polina-gubina)" +description: + - Get vpc from the OTC. +options: + name_or_id: + description: + - Name or id of the vpc. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +vpcs: + description: Dictionary describing vpcs. + type: complex + returned: On Success. + contains: + id: + description: Specifies the ID of the vpc. + type: str + sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69" + name: + description: Specifies the vpc name. + type: str + sample: "vpc-test" + description: + description: Provides supplementary information about the vpc. + type: str + sample: "vpc for testing" + cidr: + description: Specifies the available IP address ranges for subnets in the VPC. + type: str + sample: "10.0.0.0/8" + status: + description: Specifies the VPC status. + type: str + sample: "CREATING" + routes: + description: Specifies the route information. + type: list + enable_shared_snat: + description: Specifies whether the shared SNAT function is enabled. The value true\ + indicates that the function is enabled, and the value false indicates that the function is not enabled. + type: bool +''' + +EXAMPLES = ''' +# Get all vpcs +- opentelekomcloud.cloud.vpc_info: + register: vpc_info +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class VpcInfoModule(OTCModule): + argument_spec = dict( + name_or_id=dict(required=False) + ) + + def run(self): + data = [] + + if self.params['name_or_id']: + raw = self.conn.vpc.find_vpc(name_or_id=self.params['name_or_id']) + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + else: + for raw in self.conn.vpc.vpcs(): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit_json( + changed=False, + vpcs=data + ) + + +def main(): + module = VpcInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/vpc_info/tasks/main.yaml b/tests/integration/targets/vpc_info/tasks/main.yaml new file mode 100644 index 00000000..fcab9ce4 --- /dev/null +++ b/tests/integration/targets/vpc_info/tasks/main.yaml @@ -0,0 +1,53 @@ +--- +- module_defaults: + opentelekomcloud.cloud.vpc: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.vpc_info: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + vpc_name: "{{ ( prefix + 'vpc') }}" + + - name: Creating a vpc + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: present + register: vpc + + - name: Getting info about all vpcs + opentelekomcloud.cloud.vpc_info: + register: all_vpcs + + - name: assert result + assert: + that: + - all_vpcs is success + - all_vpcs is not changed + - all_vpcs | length > 0 + + - name: Getting info about new vpc + opentelekomcloud.cloud.vpc_info: + name_or_id: "{{ vpc_name }}" + register: new_vpc + + - name: assert result + assert: + that: + - new_vpc is success + - new_vpc is not changed + - new_vpc | length > 0 + + always: + - block: + # Cleanup + - name: Drop created vpc + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: absent + ignore_errors: true diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index a00b9da8..7e1f4d2e 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -62,6 +62,7 @@ plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license plugins/modules/vpc.py validate-modules:missing-gplv3-license +plugins/modules/vpc_info.py validate-modules:missing-gplv3-license plugins/modules/subnet.py validate-modules:missing-gplv3-license plugins/modules/volume_backup.py validate-modules:missing-gplv3-license plugins/modules/waf_certificate.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index e015a319..ec9736e4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -68,6 +68,7 @@ plugins/modules/waf_certificate_info.py validate-modules:missing-gplv3-license plugins/modules/volume_backup_info.py validate-modules:missing-gplv3-license plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/vpc.py validate-modules:missing-gplv3-license +plugins/modules/vpc_info.py validate-modules:missing-gplv3-license plugins/modules/subnet.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license From c6c252b225a36a2d9a1e6dbf562529d4ed2cb178 Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Fri, 14 Jan 2022 11:52:10 +0300 Subject: [PATCH 35/65] Subnet info module (#169) Subnet info module Subnet info module Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Rodion Gyrbu --- meta/runtime.yml | 1 + plugins/modules/subnet_info.py | 141 ++++++++++++++++++ .../targets/subnet_info/tasks/main.yaml | 79 ++++++++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 5 files changed, 223 insertions(+) create mode 100644 plugins/modules/subnet_info.py create mode 100644 tests/integration/targets/subnet_info/tasks/main.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml index 6f163558..20476b24 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -49,6 +49,7 @@ action_groups: - security_group_info - server_group_info - subnet + - subnet_info - tag - volume_backup - waf_certificate diff --git a/plugins/modules/subnet_info.py b/plugins/modules/subnet_info.py new file mode 100644 index 00000000..574fd40e --- /dev/null +++ b/plugins/modules/subnet_info.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +--- +module: subnet_info +short_description: Get subnet info from OpenTelekomCloud +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.11.1" +author: "Polina Gubina(@polina-gubina)" +description: + - Get subnet from the OTC. +options: + name_or_id: + description: + - Name or id of the subnet. + type: str + vpc: + description: + - Name or id of the vpc subnets should be listed within. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +subnets: + description: Dictionary describing subnets. + type: complex + returned: On Success. + contains: + id: + description: Specifies the ID of the subnet. + type: str + name: + description: Specifies the subnets name. + type: str + description: + description: Provides supplementary information about the subnet. + type: str + cidr: + description: Specifies the subnet CIDR block. + type: str + gateway_ip: + description: Specifies the subnet gateway address. + type: str + dhcp_enable: + description: Specifies whether the DHCP function is enabled for the subnet. + type: bool + primary_dns: + description: Specifies the IP address of DNS server 1 on the subnet. + type: str + secondary_dns: + description: Specifies the IP address of DNS server 2 on the subnet. + type: str + dnsList: + description: Specifies the IP address list of DNS servers on the subnet. + type: list + availability_zone: + description: Identifies the AZ to which the subnet belongs. + type: str + vpc_id: + description: Specifies the ID of the VPC to which the subnet belongs. + type: str + status: + description: Specifies the status of the subnet. + type: str + neutron_network_id: + description: Specifies the ID of the corresponding network (OpenStack Neutron API). + type: str + neutron_subnet_id: + description: Specifies the ID of the corresponding subnet (OpenStack Neutron API). + type: str + extra_dhcp_opts: + description: Specifies the NTP server address configured for the subnet. + type: list + elements: dict + contains: + opt_value: + description: Specifies the NTP server address configured for the subnet. + type: str + opt_name: + description: Specifies the NTP server address name configured for the subnet. + type: str +''' + +EXAMPLES = ''' +# Get all subnets +- opentelekomcloud.cloud.subnet_info: + register: subnet_info +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class SubnetInfoModule(OTCModule): + argument_spec = dict( + name_or_id=dict(required=False), + vpc=dict(required=False) + ) + + def run(self): + data = [] + + if self.params['name_or_id']: + raw = self.conn.vpc.find_subnet(name_or_id=self.params['name_or_id']) + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + else: + query = {} + if self.params['vpc']: + vpc = self.conn.vpc.find_vpc(name_or_id=self.params['vpc']) + query['vpc_id'] = vpc.id + for raw in self.conn.vpc.subnets(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit_json( + changed=False, + subnets=data + ) + + +def main(): + module = SubnetInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/subnet_info/tasks/main.yaml b/tests/integration/targets/subnet_info/tasks/main.yaml new file mode 100644 index 00000000..6b5617c2 --- /dev/null +++ b/tests/integration/targets/subnet_info/tasks/main.yaml @@ -0,0 +1,79 @@ +--- +- module_defaults: + opentelekomcloud.cloud.vpc: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.vpc_info: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.subnet: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.subnet_info: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + vpc_name: "{{ ( prefix + 'vpc') }}" + subnet_name: "{{ ( prefix + 'subnet') }}" + cidr: "192.168.0.0/24" + gateway: "192.168.0.1" + + - name: Creating a vpc + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: present + register: vpc + + - name: Creating a subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + description: "Subnet example" + vpc: "{{ vpc_name }}" + cidr: "{{ cidr }}" + gateway_ip: "{{ gateway }}" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + register: vpc + + - name: Getting info about all subnets + opentelekomcloud.cloud.subnet_info: + vpc: "{{ vpc_name }}" + register: all_subnets + + - name: assert result + assert: + that: + - all_subnets is success + - all_subnets is not changed + - all_subnets | length > 0 + + - name: Getting info about new subnet + opentelekomcloud.cloud.subnet_info: + name_or_id: "{{ subnet_name }}" + register: new_subnet + + - name: assert result + assert: + that: + - new_subnet is success + - new_subnet is not changed + - new_subnet | length > 0 + + always: + - block: + # Cleanup + - name: Drop created subnet + opentelekomcloud.cloud.subnet: + name: "{{ subnet_name }}" + vpc: "{{ vpc_name }}" + state: absent + + - name: Drop created vpc + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + state: absent + ignore_errors: true diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7e1f4d2e..a009d592 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -58,6 +58,7 @@ plugins/modules/rds_datastore_info.py validate-modules:missing-gplv3-license plugins/modules/rds_flavor_info.py validate-modules:missing-gplv3-license plugins/modules/rds_instance.py validate-modules:missing-gplv3-license plugins/modules/rds_instance_info.py validate-modules:missing-gplv3-license +plugins/modules/subnet_info.py validate-modules:missing-gplv3-license plugins/modules/security_group.py validate-modules:missing-gplv3-license plugins/modules/security_group_info.py validate-modules:missing-gplv3-license plugins/modules/tag.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index ec9736e4..00d13fe1 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -70,6 +70,7 @@ plugins/modules/volume_snapshot_info.py validate-modules:missing-gplv3-license plugins/modules/vpc.py validate-modules:missing-gplv3-license plugins/modules/vpc_info.py validate-modules:missing-gplv3-license plugins/modules/subnet.py validate-modules:missing-gplv3-license +plugins/modules/subnet_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_info.py validate-modules:missing-gplv3-license plugins/modules/vpc_peering_mode.py validate-modules:missing-gplv3-license From 15f85395fc0d14b731984dc424aa4aad8c367c48 Mon Sep 17 00:00:00 2001 From: Rodion Gyrbu Date: Tue, 1 Feb 2022 13:15:43 +0300 Subject: [PATCH 36/65] Change image-name in nat test (#171) Change image-name in nat test Reviewed-by: Anton Sidelnikov Reviewed-by: None --- tests/integration/targets/nat/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/targets/nat/tasks/main.yaml b/tests/integration/targets/nat/tasks/main.yaml index eda57f11..0b5a430c 100644 --- a/tests/integration/targets/nat/tasks/main.yaml +++ b/tests/integration/targets/nat/tasks/main.yaml @@ -21,7 +21,7 @@ nat_gateway_name: "{{ ( prefix + '_nat-gateway') }}" server_name: "{{ ( prefix + '_nat-server') }}" server_flavor: "s2.medium.2" - image_name: Standard_CentOS_8_latest + image_name: Standard_Debian_11_latest - name: Create network for NAT openstack.cloud.network: @@ -200,7 +200,7 @@ always: - block: - # Cleanup + # Cleanup - name: List SNAT rules of gateway nat_snat_rule_info: gateway: "{{ nat_gateway_name }}" From 354c220a5b9a63170f050bb14d66176377811ba0 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Tue, 1 Feb 2022 18:56:14 +0530 Subject: [PATCH 37/65] Add new doc pages (#170) Add new doc pages Fix tox.ini Add vpn.rst into the source Reviewed-by: None Reviewed-by: Rodion Gyrbu --- doc/source/dds.rst | 10 ++++++++++ doc/source/index.rst | 2 ++ doc/source/vpn.rst | 7 +++++++ tox.ini | 2 +- 4 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 doc/source/dds.rst create mode 100644 doc/source/vpn.rst diff --git a/doc/source/dds.rst b/doc/source/dds.rst new file mode 100644 index 00000000..b1ecad20 --- /dev/null +++ b/doc/source/dds.rst @@ -0,0 +1,10 @@ +DDS Modules +=========== + +.. toctree:: + :maxdepth: 1 + + DDS datastore info + DDS flavor info + DDS instance info + DDS instance \ No newline at end of file diff --git a/doc/source/index.rst b/doc/source/index.rst index a4cbfdf4..3ba80fcf 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -17,6 +17,7 @@ These are the plugins in the opentelekomcloud.cloud collection as cce ces + dds deh dns ecs @@ -27,6 +28,7 @@ These are the plugins in the opentelekomcloud.cloud collection volume vpc vpc_peering + vpn waf diff --git a/doc/source/vpn.rst b/doc/source/vpn.rst new file mode 100644 index 00000000..a898a2f5 --- /dev/null +++ b/doc/source/vpn.rst @@ -0,0 +1,7 @@ +VPN Modules +=========== + +.. toctree:: + :maxdepth: 1 + + VPN service info \ No newline at end of file diff --git a/tox.ini b/tox.ini index 2d38f709..f2aec3d2 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ commands = cp -av doc/source/ tmp # copy resulting content back to Sphinx location #cp -av tmp/ doc/source - sphinx-build -W -d doc/build/doctrees --keep-going -b html tmp doc/build/html + sphinx-build -W -d doc/build/doctrees --keep-going -b html tmp/source doc/build/html [testenv:pep8] commands = From 3a6d969a420e4397578d7c96b4d4f789af73a3ad Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Thu, 3 Feb 2022 14:28:42 +0530 Subject: [PATCH 38/65] add and fix pages (#172) add and fix pages Add some new pages Fix some existing pages Reviewed-by: None Reviewed-by: Rodion Gyrbu --- doc/source/as.rst | 4 ++-- doc/source/cce.rst | 4 ++-- doc/source/ces.rst | 4 ++-- doc/source/dds.rst | 12 ++++++------ doc/source/deh.rst | 4 ++-- doc/source/dms.rst | 15 +++++++++++++++ doc/source/dns.rst | 4 ++-- doc/source/ecs.rst | 4 ++-- doc/source/elb.rst | 4 ++-- doc/source/index.rst | 2 +- doc/source/misc.rst | 1 + doc/source/rds.rst | 4 ++-- doc/source/volume.rst | 4 ++-- doc/source/vpc.rst | 13 +++++++++++-- doc/source/vpc_peering.rst | 13 ------------- doc/source/vpn.rst | 6 +++--- doc/source/waf.rst | 4 ++-- 17 files changed, 57 insertions(+), 45 deletions(-) create mode 100644 doc/source/dms.rst delete mode 100644 doc/source/vpc_peering.rst diff --git a/doc/source/as.rst b/doc/source/as.rst index dedf3ea5..227bc922 100644 --- a/doc/source/as.rst +++ b/doc/source/as.rst @@ -1,5 +1,5 @@ -Auto Scaling Modules -==================== +Auto Scaling (AS) Modules +========================= .. toctree:: :maxdepth: 1 diff --git a/doc/source/cce.rst b/doc/source/cce.rst index be5aefc6..349c8902 100644 --- a/doc/source/cce.rst +++ b/doc/source/cce.rst @@ -1,5 +1,5 @@ -CCE Modules -=========== +Cloud Container Engine (CCE) Modules +==================================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/ces.rst b/doc/source/ces.rst index a1bc01c2..b735a177 100644 --- a/doc/source/ces.rst +++ b/doc/source/ces.rst @@ -1,5 +1,5 @@ -Cloud Eye Service Modules -========================= +Cloud Eye Service (CES) Modules +=============================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/dds.rst b/doc/source/dds.rst index b1ecad20..5e014e77 100644 --- a/doc/source/dds.rst +++ b/doc/source/dds.rst @@ -1,10 +1,10 @@ -DDS Modules -=========== +Document Database Service (DDS) Modules +======================================= .. toctree:: :maxdepth: 1 - DDS datastore info - DDS flavor info - DDS instance info - DDS instance \ No newline at end of file + dds_instance + dds_datastore_info + dds_flavor_info + dds_instance_info \ No newline at end of file diff --git a/doc/source/deh.rst b/doc/source/deh.rst index 21920c6e..57bd0f98 100644 --- a/doc/source/deh.rst +++ b/doc/source/deh.rst @@ -1,5 +1,5 @@ -Dedicated Host Service Modules -============================== +Dedicated Host Service (DEH) Modules +==================================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/dms.rst b/doc/source/dms.rst new file mode 100644 index 00000000..e625891e --- /dev/null +++ b/doc/source/dms.rst @@ -0,0 +1,15 @@ +Distributed Message Service (DMS) Modules +========================================= + +.. toctree:: + :maxdepth: 1 + + dms_instance + dms_instance_info + dms_instance_topic + dms_instance_topic_info + dms_message + dms_queue + dms_queue_info + dms_queue_group + dms_queue_group_info \ No newline at end of file diff --git a/doc/source/dns.rst b/doc/source/dns.rst index d19a364a..5463229c 100644 --- a/doc/source/dns.rst +++ b/doc/source/dns.rst @@ -1,5 +1,5 @@ -Domain Name Service Modules -=========================== +Domain Name Service (DNS) Modules +================================= .. toctree:: :maxdepth: 1 diff --git a/doc/source/ecs.rst b/doc/source/ecs.rst index ab794b08..090588b1 100644 --- a/doc/source/ecs.rst +++ b/doc/source/ecs.rst @@ -1,5 +1,5 @@ -Compute (ECS) Modules -===================== +Elastic Cloud Server (ECS) Modules +================================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/elb.rst b/doc/source/elb.rst index 86202fc9..c7899197 100644 --- a/doc/source/elb.rst +++ b/doc/source/elb.rst @@ -1,5 +1,5 @@ -Elastic Load Balancing Modules -============================== +Elastic Load Balancing (ELB) Modules +==================================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/index.rst b/doc/source/index.rst index 3ba80fcf..2d819669 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -19,6 +19,7 @@ These are the plugins in the opentelekomcloud.cloud collection ces dds deh + dms dns ecs elb @@ -27,7 +28,6 @@ These are the plugins in the opentelekomcloud.cloud collection rds volume vpc - vpc_peering vpn waf diff --git a/doc/source/misc.rst b/doc/source/misc.rst index fcc4326a..823295cb 100644 --- a/doc/source/misc.rst +++ b/doc/source/misc.rst @@ -6,5 +6,6 @@ Other Modules availability_zone_info object_info + server_group_info tag diff --git a/doc/source/rds.rst b/doc/source/rds.rst index 9380f60e..c76354c6 100644 --- a/doc/source/rds.rst +++ b/doc/source/rds.rst @@ -1,5 +1,5 @@ -Relational Database Service Modules -=================================== +Relational Database Service (RDS) Modules +========================================= .. toctree:: :maxdepth: 1 diff --git a/doc/source/volume.rst b/doc/source/volume.rst index 7ea6a94b..f097c668 100644 --- a/doc/source/volume.rst +++ b/doc/source/volume.rst @@ -1,5 +1,5 @@ -Volume (EVS) Modules -==================== +Elastic Volume Service (EVS) Modules +==================================== .. toctree:: :maxdepth: 1 diff --git a/doc/source/vpc.rst b/doc/source/vpc.rst index 1ea0a4f8..36cf1c01 100644 --- a/doc/source/vpc.rst +++ b/doc/source/vpc.rst @@ -1,8 +1,17 @@ -Networking (VPC) Modules -======================== +Virtual Private Cloud (VPC) Modules +=================================== .. toctree:: :maxdepth: 1 floating_ip + security_group security_group_info + subnet + subnet_info + vpc + vpc_peering + vpc_peering_info + vpc_peering_mode + vpc_route + vpc_route_info diff --git a/doc/source/vpc_peering.rst b/doc/source/vpc_peering.rst deleted file mode 100644 index 3e0e47ce..00000000 --- a/doc/source/vpc_peering.rst +++ /dev/null @@ -1,13 +0,0 @@ -VPC Peering Modules -=================== - -.. toctree:: - :maxdepth: 1 - - vpc_peering - vpc_peering_info - vpc_peering_mode - vpc_route - vpc_route_info - vpn_service_info - diff --git a/doc/source/vpn.rst b/doc/source/vpn.rst index a898a2f5..0c760ad8 100644 --- a/doc/source/vpn.rst +++ b/doc/source/vpn.rst @@ -1,7 +1,7 @@ -VPN Modules -=========== +Virtual Private Network (VPN) Modules +===================================== .. toctree:: :maxdepth: 1 - VPN service info \ No newline at end of file + vpn_service_info \ No newline at end of file diff --git a/doc/source/waf.rst b/doc/source/waf.rst index e3912121..04e3382e 100644 --- a/doc/source/waf.rst +++ b/doc/source/waf.rst @@ -1,5 +1,5 @@ -WAF Modules -=========== +Web Application Firewall (WAF) Modules +====================================== .. toctree:: :maxdepth: 1 From 174166038b8cbeaeb64dce7faecb6a11ae2a00fe Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Thu, 10 Feb 2022 21:34:12 +0530 Subject: [PATCH 39/65] Examples for new docs portal (#174) Examples for new docs portal added some examples minor code additions Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Irina Pereiaslavskaia --- plugins/modules/availability_zone_info.py | 1 + plugins/modules/css_cluster_info.py | 3 +++ plugins/modules/css_snapshot_info.py | 11 ++++------- plugins/modules/dds_instance_info.py | 5 +++++ plugins/modules/dms_instance.py | 7 ++++++- plugins/modules/dms_instance_info.py | 9 +++++++-- plugins/modules/dms_queue.py | 15 ++++++++++----- plugins/modules/dns_recordset_info.py | 3 ++- plugins/modules/lb_healthmonitor_info.py | 6 ++++++ plugins/modules/nat_dnat_rule_info.py | 1 + plugins/modules/rds_instance_info.py | 2 ++ plugins/modules/volume_backup.py | 9 +++++++++ plugins/modules/waf_certificate_info.py | 1 + plugins/modules/waf_domain_info.py | 1 + 14 files changed, 58 insertions(+), 16 deletions(-) diff --git a/plugins/modules/availability_zone_info.py b/plugins/modules/availability_zone_info.py index c1326ea4..fc30be15 100644 --- a/plugins/modules/availability_zone_info.py +++ b/plugins/modules/availability_zone_info.py @@ -47,6 +47,7 @@ EXAMPLES = ''' # Get AZ. - opentelekomcloud.cloud.availability_zone_info: + name: 'eu-de' register: az ''' diff --git a/plugins/modules/css_cluster_info.py b/plugins/modules/css_cluster_info.py index 60ff30c4..c8bdef5f 100644 --- a/plugins/modules/css_cluster_info.py +++ b/plugins/modules/css_cluster_info.py @@ -174,6 +174,9 @@ EXAMPLES = ''' # Get info about clusters - opentelekomcloud.cloud.css_cluster_info: + name: "css-ea59" + start: 1 + limit: 5 register: result ''' diff --git a/plugins/modules/css_snapshot_info.py b/plugins/modules/css_snapshot_info.py index 439023bf..60812a8b 100644 --- a/plugins/modules/css_snapshot_info.py +++ b/plugins/modules/css_snapshot_info.py @@ -96,13 +96,10 @@ EXAMPLES = ''' #Query CSS Snapshots ---- -- hosts: localhost - tasks: - - name: Get CSS Snapshots - opentelekomcloud.cloud.css_snapshot_info: - cluster: test - register: result + +- opentelekomcloud.cloud.css_snapshot_info: + cluster: 'test' + register: result ''' diff --git a/plugins/modules/dds_instance_info.py b/plugins/modules/dds_instance_info.py index c7ca29db..ec7fb0ea 100644 --- a/plugins/modules/dds_instance_info.py +++ b/plugins/modules/dds_instance_info.py @@ -244,6 +244,11 @@ EXAMPLES = ''' # Get info about instances - opentelekomcloud.cloud.dds_instance_info: + vpc_id: "7ea09482-793a-4aed-b4ce-447113d10d96" + register: result + +- opentelekomcloud.cloud.dds_instance_info: + mode: "replicaset" register: result ''' diff --git a/plugins/modules/dms_instance.py b/plugins/modules/dms_instance.py index 46fc6092..f23c0044 100644 --- a/plugins/modules/dms_instance.py +++ b/plugins/modules/dms_instance.py @@ -112,7 +112,11 @@ storage_spec_code: description: - Indicates I/O specification of a Kafka instance. + - When specification is 100MB or 300MB, the storage I/O is + - dms.physical.storage.high or dms.physical.storage.ultra + - When specification is 600MB or 1200MB, the storage I/O is dms.physical.storage.ultra. - Required for creation + choices: [dms.physical.storage.high, dms.physical.storage.ultra] type: str state: choices: [present, absent] @@ -193,7 +197,8 @@ class DmsInstanceModule(OTCModule): enable_publicip=dict(required=False, type='bool'), public_bandwidth=dict(required=False), retention_policy=dict(required=False), - storage_spec_code=dict(required=False), + storage_spec_code=dict(required=False, choices=['dms.physical.storage.high', + 'dms.physical.storage.ultra']), state=dict(type='str', choices=['present', 'absent'], default='present') ) module_kwargs = dict( diff --git a/plugins/modules/dms_instance_info.py b/plugins/modules/dms_instance_info.py index ad3f0350..6e537191 100644 --- a/plugins/modules/dms_instance_info.py +++ b/plugins/modules/dms_instance_info.py @@ -35,6 +35,7 @@ status: description: - Instance Status + choices: [creating, running, error, starting, closing, frozen, createfailed] type: str includeFailure: description: @@ -105,6 +106,9 @@ EXAMPLES = ''' # Query all Instances - opentelekomcloud.cloud.dms_instance_info: + +- opentelekomcloud.cloud.dms_instance_info: + status: createfailed ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule @@ -115,7 +119,8 @@ class DmsInstanceInfoModule(OTCModule): engine=dict(required=False), name=dict(required=False), id=dict(required=False), - status=dict(required=False), + status=dict(required=False, choices=['creating', 'running', 'error', 'starting', 'closing', + 'frozen', 'createfailed']), includeFailure=dict(required=False, type='bool', default='true'), exactMatchName=dict(required=False, type='bool', default='false'), ) @@ -135,7 +140,7 @@ def run(self): if self.params['id']: query['id'] = self.params['id'] if self.params['status']: - query['status'] = self.params['status'] + query['status'] = self.params['status'].upper() if self.params['includeFailure']: query['includeFailure'] = self.params['includeFailure'] if self.params['exactMatchName']: diff --git a/plugins/modules/dms_queue.py b/plugins/modules/dms_queue.py index 31d97366..7cd6035c 100644 --- a/plugins/modules/dms_queue.py +++ b/plugins/modules/dms_queue.py @@ -28,8 +28,9 @@ queue_mode: description: - Indicates the queue type. + choices: [normal, fifo, kafka_ha, kafka_ht] type: str - default: NORMAL + default: normal description: description: - Description. @@ -92,12 +93,15 @@ EXAMPLES = ''' # Create Queue - opentelekomcloud.cloud.dms_queue: - name: 'test-queue' + name: "test_dms_queue" + queue_mode: "fifo" + redrive_policy: "enable" + max_consume_count: "9" state: present # Delete Queue - opentelekomcloud.cloud.dms_queue: - name: 'test-queue' + name: 'test_dms_queue' state: absent ''' @@ -107,7 +111,8 @@ class DmsQueueModule(OTCModule): argument_spec = dict( name=dict(required=True), - queue_mode=dict(required=False, default='NORMAL'), + queue_mode=dict(required=False, choices=['normal', 'fifo', 'kafka_ha', 'kafka_ht'], + default='normal'), description=dict(required=False), redrive_policy=dict(required=False, default='disable'), max_consume_count=dict(required=False, type='int'), @@ -128,7 +133,7 @@ def run(self): if not queue: attrs['name'] = self.params['name'] if self.params['queue_mode']: - attrs['queue_mode'] = self.params['queue_mode'] + attrs['queue_mode'] = self.params['queue_mode'].upper() if self.params['description']: attrs['description'] = self.params['description'] if self.params['redrive_policy']: diff --git a/plugins/modules/dns_recordset_info.py b/plugins/modules/dns_recordset_info.py index f39ac79a..cf0b02a5 100644 --- a/plugins/modules/dns_recordset_info.py +++ b/plugins/modules/dns_recordset_info.py @@ -86,7 +86,8 @@ EXAMPLES = ''' #Get info about choosen DNS recordset. - opentelekomcloud.cloud.dns_recordset_info: - zone: "test.zone." + zone: "ff80808275f5fc0f017e886898315ee9" + name: "ff80808275f5fc0f017e886898315ee2" register: recordsets ''' diff --git a/plugins/modules/lb_healthmonitor_info.py b/plugins/modules/lb_healthmonitor_info.py index 3a9406cd..84f70b3f 100644 --- a/plugins/modules/lb_healthmonitor_info.py +++ b/plugins/modules/lb_healthmonitor_info.py @@ -132,6 +132,12 @@ - lb_healthmonitor_info: name: hm-test register: healthmonitor + +- opentelekomcloud.cloud.lb_healthmonitor_info: + type: "http" + admin_state_up: False + expected_codes: "200,202,401" + register: healthmonitor ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule diff --git a/plugins/modules/nat_dnat_rule_info.py b/plugins/modules/nat_dnat_rule_info.py index 50b1e2ad..ed5d3e7e 100644 --- a/plugins/modules/nat_dnat_rule_info.py +++ b/plugins/modules/nat_dnat_rule_info.py @@ -136,6 +136,7 @@ EXAMPLES = ''' # Get configs versions. - nat_dnat_rule_info: + floating_ip: "123.1.2.3" register: dn - nat_dnat_rule_info: diff --git a/plugins/modules/rds_instance_info.py b/plugins/modules/rds_instance_info.py index 7f36ec12..88aedf72 100644 --- a/plugins/modules/rds_instance_info.py +++ b/plugins/modules/rds_instance_info.py @@ -58,6 +58,8 @@ EXAMPLES = ''' # Get Instances. - rds_instance_info: + datastore_type: "mysql" + router: "7ea09482-793a-4aed-b4ce-447113d10d69" register: rds - rds_instance_info: diff --git a/plugins/modules/volume_backup.py b/plugins/modules/volume_backup.py index a6f8e3b5..3634ab04 100644 --- a/plugins/modules/volume_backup.py +++ b/plugins/modules/volume_backup.py @@ -94,6 +94,15 @@ ''' EXAMPLES = ''' + +# Add volume backup +- opentelekomcloud.cloud.volume_backup: + name: "test_vbs_backup" + description: "my test backup" + state: present + volume: ecs-7b0 + force: True + incremental: True ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule diff --git a/plugins/modules/waf_certificate_info.py b/plugins/modules/waf_certificate_info.py index 8e1cb099..16db9f89 100644 --- a/plugins/modules/waf_certificate_info.py +++ b/plugins/modules/waf_certificate_info.py @@ -52,6 +52,7 @@ EXAMPLES = ''' # Get Certificates. - waf_certificate_info: + name: SDK-Daccde21b17b7d4617bb7d4617b register: cert ''' diff --git a/plugins/modules/waf_domain_info.py b/plugins/modules/waf_domain_info.py index 767ef09e..c982be1a 100644 --- a/plugins/modules/waf_domain_info.py +++ b/plugins/modules/waf_domain_info.py @@ -72,6 +72,7 @@ EXAMPLES = ''' # Get Domain. - waf_domain_info: + name: "{{ domain_id }}" register: domain ''' From 0bd04845a6e68d0e591f1d07fe38ab46cdeefdb5 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 18 Feb 2022 14:27:30 +0100 Subject: [PATCH 40/65] Add override router (#175) Add override router Due to another incompatibility we need to override router module and drop external_fixed_ips from it. Reviewed-by: Anton Sidelnikov Reviewed-by: kucerakk --- meta/runtime.yml | 1 + plugins/modules/router.py | 471 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 472 insertions(+) create mode 100644 plugins/modules/router.py diff --git a/meta/runtime.yml b/meta/runtime.yml index 20476b24..e891c7bc 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -45,6 +45,7 @@ action_groups: - rds_instance - rds_backup - rds_backup_info + - router - security_group - security_group_info - server_group_info diff --git a/plugins/modules/router.py b/plugins/modules/router.py new file mode 100644 index 00000000..734c9155 --- /dev/null +++ b/plugins/modules/router.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = ''' +--- +module: router +short_description: Create or delete routers from OpenStack +author: OpenStack Ansible SIG +description: + - Create or Delete routers from OpenStack. Although Neutron allows + routers to share the same name, this module enforces name uniqueness + to be more user friendly. +options: + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Name to be give to the router + required: true + type: str + admin_state_up: + description: + - Desired admin state of the created or existing router. + type: bool + default: 'yes' + enable_snat: + description: + - Enable Source NAT (SNAT) attribute. + type: bool + network: + description: + - Unique name or ID of the external gateway network. + - required I(interfaces) or I(enable_snat) are provided. + type: str + project: + description: + - Unique name or ID of the project. + type: str + interfaces: + description: + - List of subnets to attach to the router internal interface. Default + gateway associated with the subnet will be automatically attached + with the router's internal interface. + In order to provide an ip address different from the default + gateway,parameters are passed as dictionary with keys as network + name or ID (I(net)), subnet name or ID (I(subnet)) and the IP of + port (I(portip)) from the network. + User defined portip is often required when a multiple router need + to be connected to a single subnet for which the default gateway has + been already used. + type: list + elements: raw +requirements: + - "python >= 3.6" + - "openstacksdk" + +extends_documentation_fragment: +- opentelekomcloud.cloud.otc +''' + +EXAMPLES = ''' +# Create a simple router, not attached to a gateway or subnets. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: simple_router + +# Create a simple router, not attached to a gateway or subnets for a given project. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: simple_router + project: myproj + +# Creates a router attached to ext_network1 on an IPv4 subnet and one +# internal subnet interface. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: router1 + network: ext_network1 + external_fixed_ips: + - subnet: public-subnet + ip: 172.24.4.2 + interfaces: + - private-subnet + +# Create another router with two internal subnet interfaces.One with user defined port +# ip and another with default gateway. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: router2 + network: ext_network1 + interfaces: + - net: private-net + subnet: private-subnet + portip: 10.1.1.10 + - project-subnet + +# Create another router with two internal subnet interface.One with user defined port +# ip and and another with default gateway. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: router2 + network: ext_network1 + interfaces: + - net: private-net + subnet: private-subnet + portip: 10.1.1.10 + - project-subnet + +# Create another router with two internal subnet interface. one with user defined port +# ip and and another with default gateway. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: router2 + network: ext_network1 + interfaces: + - net: private-net + subnet: private-subnet + portip: 10.1.1.10 + - project-subnet + +# Update existing router1 external gateway to include the IPv6 subnet. +# Note that since 'interfaces' is not provided, any existing internal +# interfaces on an existing router will be left intact. +- opentelekomcloud.cloud.router: + cloud: mycloud + state: present + name: router1 + network: ext_network1 + external_fixed_ips: + - subnet: public-subnet + ip: 172.24.4.2 + - subnet: ipv6-public-subnet + ip: 2001:db8::3 + +# Delete router1 +- opentelekomcloud.cloud.router: + cloud: mycloud + state: absent + name: router1 +''' + +RETURN = ''' +router: + description: Dictionary describing the router. + returned: On success when I(state) is 'present' + type: complex + contains: + id: + description: Router ID. + type: str + sample: "474acfe5-be34-494c-b339-50f06aa143e4" + name: + description: Router name. + type: str + sample: "router1" + admin_state_up: + description: Administrative state of the router. + type: bool + sample: true + status: + description: The router status. + type: str + sample: "ACTIVE" + tenant_id: + description: The tenant ID. + type: str + sample: "861174b82b43463c9edc5202aadc60ef" + external_gateway_info: + description: The external gateway parameters. + type: dict + sample: { + "enable_snat": true, + "external_fixed_ips": [ + { + "ip_address": "10.6.6.99", + "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81" + } + ] + } + routes: + description: The extra routes configuration for L3 router. + type: list +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +ROUTER_INTERFACE_OWNERS = set([ + 'network:router_interface', + 'network:router_interface_distributed', + 'network:ha_router_replicated_interface' +]) + + +class RouterModule(OTCModule): + argument_spec = dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(required=True), + admin_state_up=dict(type='bool', default=True), + enable_snat=dict(type='bool'), + network=dict(default=None), + interfaces=dict(type='list', default=None, elements='raw'), + # external_fixed_ips=dict(type='list', default=None, elements='dict'), + project=dict(default=None) + ) + + def _router_internal_interfaces(self, router): + for port in self.conn.list_router_interfaces(router, 'internal'): + if port['device_owner'] in ROUTER_INTERFACE_OWNERS: + yield port + + def _needs_update(self, router, network, internal_subnet_ids, internal_port_ids, filters=None): + """Decide if the given router needs an update. + """ + if router['admin_state_up'] != self.params['admin_state_up']: + return True + if router['external_gateway_info']: + # check if enable_snat is set in module params + if self.params['enable_snat'] is not None: + if router['external_gateway_info'].get('enable_snat', True) != self.params['enable_snat']: + return True + if network: + if not router['external_gateway_info']: + return True + elif router['external_gateway_info']['network_id'] != network['id']: + return True + + # OTC does not support external_fixed_ips in the response. + # Comment it out for now. + + # check external interfaces + # if self.params['external_fixed_ips']: + # for new_iface in self.params['external_fixed_ips']: + # subnet = self.conn.get_subnet(new_iface['subnet'], filters) + # exists = False + + # # compare the requested interface with existing, looking for an existing match + # for existing_iface in router['external_gateway_info']['external_fixed_ips']: + # if existing_iface['subnet_id'] == subnet['id']: + # if 'ip' in new_iface: + # if existing_iface['ip_address'] == new_iface['ip']: + # # both subnet id and ip address match + # exists = True + # break + # else: + # # only the subnet was given, so ip doesn't matter + # exists = True + # break + + # # this interface isn't present on the existing router + # if not exists: + # return True + + # check internal interfaces + if self.params['interfaces']: + existing_subnet_ids = [] + for port in self._router_internal_interfaces(router): + if 'fixed_ips' in port: + for fixed_ip in port['fixed_ips']: + existing_subnet_ids.append(fixed_ip['subnet_id']) + + for iface in self.params['interfaces']: + if isinstance(iface, dict): + for p_id in internal_port_ids: + p = self.conn.get_port(name_or_id=p_id) + if 'fixed_ips' in p: + for fip in p['fixed_ips']: + internal_subnet_ids.append(fip['subnet_id']) + + if set(internal_subnet_ids) != set(existing_subnet_ids): + return True + + return False + + def _system_state_change(self, router, network, internal_ids, internal_portids, filters=None): + """Check if the system state would be changed.""" + state = self.params['state'] + if state == 'absent' and router: + return True + if state == 'present': + if not router: + return True + return self._needs_update(router, network, internal_ids, internal_portids, filters) + return False + + def _build_kwargs(self, router, network): + kwargs = { + 'admin_state_up': self.params['admin_state_up'], + } + + if router: + kwargs['name_or_id'] = router['id'] + else: + kwargs['name'] = self.params['name'] + + if network: + kwargs['ext_gateway_net_id'] = network['id'] + # can't send enable_snat unless we have a network + if self.params.get('enable_snat') is not None: + kwargs['enable_snat'] = self.params['enable_snat'] + + # if self.params['external_fixed_ips']: + # kwargs['ext_fixed_ips'] = [] + # for iface in self.params['external_fixed_ips']: + # subnet = self.conn.get_subnet(iface['subnet']) + # d = {'subnet_id': subnet['id']} + # if 'ip' in iface: + # d['ip_address'] = iface['ip'] + # kwargs['ext_fixed_ips'].append(d) + + return kwargs + + def _validate_subnets(self, filters=None): + external_subnet_ids = [] + internal_subnet_ids = [] + internal_port_ids = [] + existing_port_ips = [] + # if self.params['external_fixed_ips']: + # for iface in self.params['external_fixed_ips']: + # subnet = self.conn.get_subnet(iface['subnet']) + # if not subnet: + # self.fail_json(msg='subnet %s not found' % iface['subnet']) + # external_subnet_ids.append(subnet['id']) + + if self.params['interfaces']: + for iface in self.params['interfaces']: + if isinstance(iface, str): + subnet = self.conn.get_subnet(iface, filters) + if not subnet: + self.fail(msg='subnet %s not found' % iface) + internal_subnet_ids.append(subnet['id']) + elif isinstance(iface, dict): + subnet = self.conn.get_subnet(iface['subnet'], filters) + if not subnet: + self.fail(msg='subnet %s not found' % iface['subnet']) + net = self.conn.get_network(iface['net']) + if not net: + self.fail(msg='net %s not found' % iface['net']) + if "portip" not in iface: + internal_subnet_ids.append(subnet['id']) + elif not iface['portip']: + self.fail(msg='put an ip in portip or remove it from list to assign default port to router') + else: + for existing_port in self.conn.list_ports(filters={'network_id': net.id}): + for fixed_ip in existing_port['fixed_ips']: + if iface['portip'] == fixed_ip['ip_address']: + internal_port_ids.append(existing_port.id) + existing_port_ips.append(fixed_ip['ip_address']) + if iface['portip'] not in existing_port_ips: + p = self.conn.create_port(network_id=net.id, fixed_ips=[ + { + 'ip_address': iface['portip'], + 'subnet_id': subnet.id + } + ]) + if p: + internal_port_ids.append(p.id) + + return external_subnet_ids, internal_subnet_ids, internal_port_ids + + def run(self): + + state = self.params['state'] + name = self.params['name'] + network = self.params['network'] + project = self.params['project'] + + # if self.params['external_fixed_ips'] and not network: + # self.fail_json(msg='network is required when supplying external_fixed_ips') + + if project is not None: + proj = self.conn.get_project(project) + if proj is None: + self.fail(msg='Project %s could not be found' % project) + project_id = proj['id'] + filters = {'tenant_id': project_id} + else: + project_id = None + filters = None + + router = self.conn.get_router(name, filters=filters) + net = None + if network: + net = self.conn.get_network(network) + if not net: + self.fail(msg='network %s not found' % network) + + # Validate and cache the subnet IDs so we can avoid duplicate checks + # and expensive API calls. + external_ids, subnet_internal_ids, internal_portids = self._validate_subnets(filters) + if self.ansible.check_mode: + self.exit_json( + changed=self._system_state_change(router, net, subnet_internal_ids, internal_portids, filters) + ) + + if state == 'present': + changed = False + + if not router: + kwargs = self._build_kwargs(router, net) + if project_id: + kwargs['project_id'] = project_id + router = self.conn.create_router(**kwargs) + for int_s_id in subnet_internal_ids: + self.conn.add_router_interface(router, subnet_id=int_s_id) + # add interface by port id as well + for int_p_id in internal_portids: + self.conn.add_router_interface(router, port_id=int_p_id) + changed = True + else: + if self._needs_update(router, net, subnet_internal_ids, internal_portids, filters): + kwargs = self._build_kwargs(router, net) + updated_router = self.conn.update_router(**kwargs) + + # Protect against update_router() not actually + # updating the router. + if not updated_router: + changed = False + + # On a router update, if any internal interfaces were supplied, + # just detach all existing internal interfaces and attach the new. + if internal_portids or subnet_internal_ids: + router = updated_router + ports = self._router_internal_interfaces(router) + for port in ports: + self.conn.remove_router_interface(router, port_id=port['id']) + if internal_portids: + external_ids, subnet_internal_ids, internal_portids = self._validate_subnets(filters) + for int_p_id in internal_portids: + self.conn.add_router_interface(router, port_id=int_p_id) + changed = True + if subnet_internal_ids: + for s_id in subnet_internal_ids: + self.conn.add_router_interface(router, subnet_id=s_id) + changed = True + + self.exit(changed=changed, router=router, id=router['id']) + + elif state == 'absent': + if not router: + self.exit(changed=False) + else: + # We need to detach all internal interfaces on a router before + # we will be allowed to delete it. + ports = self._router_internal_interfaces(router) + router_id = router['id'] + for port in ports: + self.conn.remove_router_interface(router, port_id=port['id']) + self.conn.delete_router(router_id) + self.exit_json(changed=True) + + +def main(): + module = RouterModule() + module() + + +if __name__ == '__main__': + main() From 3ef074c6157af7ca0174191b4b5e5992875ae69f Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Fri, 25 Feb 2022 16:41:25 +0400 Subject: [PATCH 41/65] vpc and dns_zone fixes (#177) vpc and dns_zone fixes May close issue (#176) Reviewed-by: Anton Sidelnikov Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Anton Kachurin Reviewed-by: Rodion Gyrbu --- plugins/modules/dns_zone.py | 49 ++++++++++++++++++------------------- plugins/modules/vpc.py | 26 ++++++++++---------- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/plugins/modules/dns_zone.py b/plugins/modules/dns_zone.py index 3f1edd0a..e3ec924f 100644 --- a/plugins/modules/dns_zone.py +++ b/plugins/modules/dns_zone.py @@ -133,21 +133,21 @@ class DNSZonesModule(OTCModule): def run(self): changed = False - query = {} - - if self.params['zone_type'] == 'private': - query['type'] = self.params['zone_type'] - query['name_or_id'] = self.params['name'] - query['ignore_missing'] = True - zo = self.conn.dns.find_zone(**query) - if zo: - zone_id = zo.id - zone_desc = zo.description - zone_ttl = zo.ttl - zone_email = zo.email - zone_check = True + attrs = {} + query = { + 'type': self.params['zone_type'], + 'name_or_id': self.params['name'] + } + + zone = self.conn.dns.find_zone(**query) + if zone: + zone_id = zone.id + zone_desc = zone.description + zone_ttl = zone.ttl + zone_email = zone.email + needs_update = True else: - zone_check = False + needs_update = False if self.params['state'] == 'absent': self.exit( changed=False, @@ -166,16 +166,15 @@ def run(self): changed = True if self.params['state'] == 'present': - attrs = {} if self.ansible.check_mode: self.exit_json(changed=True) - if zone_check is False: + if not needs_update: # Check if VPC exists if self.params['zone_type'] == 'private': if not self.params['router']: self.exit( changed=False, - message=('No Router specified, but needed for creation') + message='No Router specified, but needed for creation' ) ro = self.conn.network.find_router( name_or_id=self.params['router'], @@ -193,31 +192,31 @@ def run(self): message=('No Router found with name or id: %s' % self.params['router']) ) - attrs['zone_type'] = self.params['zone_type'] + if self.params['zone_type']: + attrs['zone_type'] = self.params['zone_type'] if self.params['description']: attrs['description'] = self.params['description'] if self.params['email']: attrs['email'] = self.params['email'] if self.params['ttl']: attrs['ttl'] = self.params['ttl'] - attrs['name'] = self.params['name'] - + if self.params['name']: + attrs['name'] = self.params['name'] zone = self.conn.dns.create_zone(**attrs) self.exit(changed=True, zone=zone.to_dict()) - if zone_check is True: + if needs_update: changed = False - if self.params['description'] and self.params['description'] != zone_desc: + if self.params['description'] != zone_desc: attrs['description'] = self.params['description'] changed = True - if self.params['email'] and self.params['email'] != zone_email: + if self.params['email'] != zone_email: attrs['email'] = self.params['email'] changed = True - if self.params['ttl'] and self.params['ttl'] != zone_ttl: + if self.params['ttl'] != zone_ttl: attrs['ttl'] = self.params['ttl'] changed = True attrs['zone'] = zone_id - zone = self.conn.dns.update_zone(**attrs) self.exit(changed=changed, zone=zone.to_dict()) diff --git a/plugins/modules/vpc.py b/plugins/modules/vpc.py index a91b01d8..278f8a21 100644 --- a/plugins/modules/vpc.py +++ b/plugins/modules/vpc.py @@ -151,6 +151,7 @@ class VpcModule(OTCModule): def run(self): query = {} + attrs = {} state = self.params['state'] name = self.params['name'] description = self.params['description'] @@ -165,14 +166,9 @@ def run(self): if cidr: query['cidr'] = cidr - vpc = None - if name: - vpc = self.conn.vpc.find_vpc(name, ignore_missing=True) + vpc = self.conn.vpc.find_vpc(name, ignore_missing=True) if state == 'present': - if self.ansible.check_mode: - self.exit(changed=True) - if not vpc: new_vpc = self.conn.vpc.create_vpc(**query) if routes or enable_shared_snat is not None: @@ -183,14 +179,18 @@ def run(self): query_update['enable_shared_snat'] = enable_shared_snat new_vpc = self.conn.vpc.update_vpc(vpc=new_vpc, **query_update) self.exit(changed=True, vpc=new_vpc) - else: - if routes: - query['routes'] = routes - if enable_shared_snat is not None: - query['enable_shared_snat'] = enable_shared_snat - updated_vpc = self.conn.vpc.update_vpc(vpc=vpc, **query) - self.exit(changed=True, vpc=updated_vpc) + if cidr != vpc['cidr']: + attrs['cidr'] = cidr + if name != vpc['name']: + attrs['name'] = name + if enable_shared_snat != vpc['enable_shared_snat']: + attrs['enable_shared_snat'] = enable_shared_snat + if attrs: + updated_vpc = self.conn.vpc.update_vpc(vpc=vpc.id, **attrs) + self.exit_json(changed=True, vpc=updated_vpc) + else: + self.exit(changed=False, vpc=vpc) else: if vpc: if not self.ansible.check_mode: From b1cebc3eba21feb03ba0f25faf0f827d482d3e27 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Fri, 25 Feb 2022 18:33:00 +0400 Subject: [PATCH 42/65] Update collection version (#179) Update collection version to 0.12.0 Reviewed-by: Polina Gubina Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin Reviewed-by: Anton Sidelnikov --- doc/source/index.rst | 2 +- galaxy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 2d819669..ba6e23c7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.11.0 +Collection version 0.12.0 Plugin Index diff --git a/galaxy.yml b/galaxy.yml index 6ea48335..abbbfe5d 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.11.0 +version: 0.12.0 readme: README.md authors: - Artem Goncharov From 61c5f393b2c7f118738ddf008e31b3ef79541cc5 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Tue, 1 Mar 2022 16:47:58 +0400 Subject: [PATCH 43/65] Subnet fix (#181) Subnet fix Fixes issue #178 Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin Reviewed-by: Anton Sidelnikov --- doc/source/index.rst | 3 ++- galaxy.yml | 2 +- plugins/modules/subnet.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index ba6e23c7..5ea9feca 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,8 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.12.0 +Collection version 0.12.1 + Plugin Index diff --git a/galaxy.yml b/galaxy.yml index abbbfe5d..91a63929 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.12.0 +version: 0.12.1 readme: README.md authors: - Artem Goncharov diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py index 1cd19583..67507510 100644 --- a/plugins/modules/subnet.py +++ b/plugins/modules/subnet.py @@ -232,7 +232,7 @@ class SubnetModule(OTCModule): ) _update_fields = {'dns_list', 'primary_dns', 'secondary_dns', 'extra_dhcp_opts'} - _update_forbidden = {'cidr', 'gateway_ip', 'description'} + _update_forbidden = {'cidr', 'gateway_ip'} def run(self): vpc = self.conn.vpc.find_vpc(self.params['vpc']) From e2f9a04ddb0376aa5c9d3cb42709fea171b9a00d Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Wed, 2 Mar 2022 15:18:47 +0400 Subject: [PATCH 44/65] fix rds region (#182) fix RDS region hardcode Resolves issue for other regions (#180) Reviewed-by: Artem Goncharov Reviewed-by: Vladimir Vshivkov Reviewed-by: Polina Gubina Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin --- plugins/modules/rds_instance.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index ed6d6a8e..299156ba 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -74,10 +74,9 @@ description: Database port type: int region: - choices: [eu-de] - default: eu-de description: Database region type: str + default: eu-de replica_of: description: Instance ID to create the replica of type: str @@ -174,7 +173,7 @@ class RdsInstanceModule(OTCModule): network=dict(type='str'), password=dict(type='str', no_log=True), port=dict(type='int'), - region=dict(type='str', choices=['eu-de'], default='eu-de'), + region=dict(type='str', default='eu-de'), replica_of=dict(type='str'), router=dict(type='str'), security_group=dict(type='str'), From 229479861a1ea9ddd6986ab9832d86c75bf453a3 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+enrrou@users.noreply.github.com> Date: Wed, 2 Mar 2022 16:30:43 +0400 Subject: [PATCH 45/65] version 0.12.2 (#183) version 0.12.2 Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin --- doc/source/index.rst | 2 +- galaxy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 5ea9feca..e61ea0f4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.12.1 +Collection version 0.12.2 diff --git a/galaxy.yml b/galaxy.yml index 91a63929..4edb7cb3 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.12.1 +version: 0.12.2 readme: README.md authors: - Artem Goncharov From e1132e0b7075ea3eb5c1fc76115f51b5973ab781 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+vladimirvshivkov@users.noreply.github.com> Date: Wed, 30 Mar 2022 15:33:35 +0400 Subject: [PATCH 46/65] freeze ansible-lint version (#189) freeze ansible-lint version Reviewed-by: Anton Sidelnikov Reviewed-by: Anton Kachurin --- .ansible-lint | 3 ++- test-requirements.txt | 2 +- tests/integration/targets/as_config/tasks/main.yaml | 2 +- tests/integration/targets/as_group/tasks/main.yaml | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index ab70b73d..85006fac 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -6,4 +6,5 @@ skip_list: - '106' # Role name does not match ``^[a-z][a-z0-9_]+$`` pattern - '204' # Lines should be no longer than 160 chars - '301' # Commands should not change things if nothing needs doing - - '701' # No 'galaxy_info' found + - '701' # No 'galaxy_info' found\ + - fqcn-builtins diff --git a/test-requirements.txt b/test-requirements.txt index 38ca38d2..39949628 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,6 @@ openstacksdk ansible-base -ansible-lint +ansible-lint==5.4.0 pycodestyle==2.6.0 flake8==3.8.4 pylint diff --git a/tests/integration/targets/as_config/tasks/main.yaml b/tests/integration/targets/as_config/tasks/main.yaml index 6c403b99..a5208122 100644 --- a/tests/integration/targets/as_config/tasks/main.yaml +++ b/tests/integration/targets/as_config/tasks/main.yaml @@ -38,7 +38,7 @@ opentelekomcloud.cloud.as_config: scaling_configuration: "{{ config_name }}" key_name: "{{ key_name }}" - image: "Standard_Debian_9_latest" + image: "Standard_Debian_10_latest" flavor: "c4.2xlarge.2" disk: - size: 10 diff --git a/tests/integration/targets/as_group/tasks/main.yaml b/tests/integration/targets/as_group/tasks/main.yaml index 04f11fec..a6616039 100644 --- a/tests/integration/targets/as_group/tasks/main.yaml +++ b/tests/integration/targets/as_group/tasks/main.yaml @@ -63,7 +63,7 @@ opentelekomcloud.cloud.as_config: scaling_configuration: "{{ config_name }}" key_name: "{{ key_name }}" - image: "Standard_Debian_9_latest" + image: "Standard_Debian_10_latest" flavor: "c4.2xlarge.2" disk: - size: 10 From aee4cfddedc9070759ba91b2d360e07abc2ef063 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+vladimirvshivkov@users.noreply.github.com> Date: Wed, 30 Mar 2022 16:23:10 +0400 Subject: [PATCH 47/65] [CSS] cluster tags fixes (#187) [CSS] cluster tags fixes Closes #184 Reviewed-by: Anton Sidelnikov Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin --- plugins/modules/css_cluster.py | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/plugins/modules/css_cluster.py b/plugins/modules/css_cluster.py index 15abb650..95ae74ea 100644 --- a/plugins/modules/css_cluster.py +++ b/plugins/modules/css_cluster.py @@ -121,16 +121,20 @@ - Security group ID. All instances in a cluster must have the same subnets and security groups. type: str - tag_key: + tags: description: - - Tag key. The value can contain 1 to 36 characters. Only digits, letters, - hyphens (-) and underscores (_) are allowed. - type: str - tag_value: - description: - - Tag value. The value can contain 0 to 43 characters. Only digits, - letters, hyphens (-) and underscores (_) are allowed. - type: str + - Tags in a cluster. + type: list + elements: dict + suboptions: + key: + description: + - Tag key. The value can contain 1 to 36 characters. + Only digits, letters, hyphens (-) and underscores (_) are allowed. + value: + description: + - Tag value. The value can contain 0 to 43 characters. + Only digits, letters, hyphens (-) and underscores (_) are allowed. backup_period: description: - Time when a snapshot is created every day. Snapshots can only be created @@ -190,6 +194,11 @@ flavor: 'css.xlarge.2' https_enable: false system_encrypted: 0 + tags: + - 'key': "key0" + 'value': "value0" + - 'key': "key1" + 'value': "value1" #Delete CSS Cluster - hosts: localhost @@ -220,8 +229,7 @@ class CssClusterModule(OTCModule): router=dict(type='str'), net=dict(type='str'), security_group=dict(type='str'), - tag_key=dict(type='str'), - tag_value=dict(type='str'), + tags=dict(required=False, type='list', elements='dict'), backup_period=dict(type='str'), backup_prefix=dict(type='str'), backup_keepday=dict(type='int'), @@ -282,7 +290,6 @@ def run(self): if not cluster: changed = True - volume_type = self.params['volume_type'] attrs = { @@ -318,17 +325,14 @@ def run(self): attrs['authorityEnable'] = self.params['authority_enable'] if self.params['admin_pwd']: attrs['adminPwd'] = self.params['admin_pwd'] - if self.params['tag_key']: - attrs['tags']['key'] = self.params['tag_key'] - if self.params['tag_value']: - attrs['tags']['value'] = self.params['tag_value'] + if self.params['tags']: + attrs['tags'] = self.params['tags'] if self.params['backup_period']: attrs['backupStrategy']['period'] = self.params['backup_period'] if self.params['backup_prefix']: attrs['backupStrategy']['prefix'] = self.params['backup_prefix'] if self.params['backup_keepday']: attrs['backupStrategy']['keepday'] = self.params['backup_keepday'] - cluster = self.conn.css.create_cluster(**attrs) self.exit_json( From b8a87402b53094bd81ed7e46a39053269a37eff0 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+vladimirvshivkov@users.noreply.github.com> Date: Wed, 30 Mar 2022 17:42:30 +0400 Subject: [PATCH 48/65] version 0.12.3 (#190) version 0.12.3 Reviewed-by: Rodion Gyrbu Reviewed-by: Anton Kachurin Reviewed-by: Anton Sidelnikov --- doc/source/index.rst | 2 +- galaxy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index e61ea0f4..ec9ef97c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.12.2 +Collection version 0.12.3 diff --git a/galaxy.yml b/galaxy.yml index 4edb7cb3..869245f6 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.12.2 +version: 0.12.3 readme: README.md authors: - Artem Goncharov From be37690866fee06d35f70cc3e83a99d0403465b4 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Wed, 30 Mar 2022 16:48:52 +0300 Subject: [PATCH 49/65] fix check-mode option (#186) fix check-mode option Add 'ansible' into check_mode option Reviewed-by: Rodion Gyrbu Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Vladimir Vshivkov --- plugins/modules/volume_backup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/volume_backup.py b/plugins/modules/volume_backup.py index 3634ab04..e3830891 100644 --- a/plugins/modules/volume_backup.py +++ b/plugins/modules/volume_backup.py @@ -209,7 +209,7 @@ def run(self): # so search this was backup = self.find_backup(name) - if self.check_mode: + if self.ansible.check_mode: self.exit_json(changed=self._system_state_change(backup)) if self.params['state'] == 'present': From ca7289e9a2e68d2f8832d37602e494716cadc1c5 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Wed, 20 Apr 2022 12:56:34 +0300 Subject: [PATCH 50/65] Fix snapshot section to create snapshot in case of initial backup (#191) Fix snapshot section to create snapshot in case of initial backup Add instructions to make an snapshot in case of first backing up Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Anton Sidelnikov Reviewed-by: Anton Kachurin --- plugins/modules/volume_backup.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/plugins/modules/volume_backup.py b/plugins/modules/volume_backup.py index e3830891..85908383 100644 --- a/plugins/modules/volume_backup.py +++ b/plugins/modules/volume_backup.py @@ -215,7 +215,6 @@ def run(self): if self.params['state'] == 'present': if not backup: cloud_volume = self.find_volume(volume) - cloud_snapshot_id = None attrs = { 'name': name, @@ -225,9 +224,19 @@ def run(self): } if snapshot: - cloud_snapshot_id = self.find_snapshot(snapshot, - ignore_missing=False).id - attrs['snapshot_id'] = cloud_snapshot_id + snapshot = self.conn.get_volume_snapshot( + self.params['display_name'], filters={'volume_id': volume.id}) + attrs['snapshot_id'] = snapshot.id + else: + snapshot = self.conn.create_volume_snapshot( + volume.id, + force=self.params['force'], + wait=self.params['wait'], + timeout=self.params['timeout'], + name=self.params['display_name'] + '_snapshot', + description=self.params.get('display_description') + ) + attrs['snapshot_id'] = snapshot.id if metadata: attrs['metadata'] = metadata From e556717b976c6f844d6b2a7863eb311016951ca4 Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov <53078276+anton-sidelnikov@users.noreply.github.com> Date: Wed, 20 Apr 2022 15:29:46 +0300 Subject: [PATCH 51/65] Nameserver info module (#192) Nameserver info module Reviewed-by: Vladimir Vshivkov Reviewed-by: Anton Kachurin Reviewed-by: Irina Pereiaslavskaia Reviewed-by: Rodion Gyrbu --- doc/source/dns.rst | 3 +- doc/source/index.rst | 2 +- galaxy.yml | 2 +- meta/runtime.yml | 1 + plugins/modules/dns_nameserver_info.py | 101 ++++++++++++++++++ tests/integration/targets/dns/tasks/main.yaml | 22 ++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 8 files changed, 130 insertions(+), 3 deletions(-) create mode 100644 plugins/modules/dns_nameserver_info.py diff --git a/doc/source/dns.rst b/doc/source/dns.rst index 5463229c..9a0d8b57 100644 --- a/doc/source/dns.rst +++ b/doc/source/dns.rst @@ -6,5 +6,6 @@ Domain Name Service (DNS) Modules dns_floating_ip dns_recordset + dns_recordset_info dns_zone - + dns_nameserver_info diff --git a/doc/source/index.rst b/doc/source/index.rst index ec9ef97c..f74f222c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ Opentelekomcloud.Cloud ====================== -Collection version 0.12.3 +Collection version 0.12.4 diff --git a/galaxy.yml b/galaxy.yml index 869245f6..c6fbfbcc 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: opentelekomcloud name: cloud -version: 0.12.3 +version: 0.12.4 readme: README.md authors: - Artem Goncharov diff --git a/meta/runtime.yml b/meta/runtime.yml index e891c7bc..fb052324 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -79,3 +79,4 @@ action_groups: - anti_ddos_fip_statuses_info.py - anti_ddos_optional_policies_info.py - object_info.py + - dns_nameserver_info.py diff --git a/plugins/modules/dns_nameserver_info.py b/plugins/modules/dns_nameserver_info.py new file mode 100644 index 00000000..801cf1a9 --- /dev/null +++ b/plugins/modules/dns_nameserver_info.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: dns_nameserver_info +short_description: Get info about DNS nameservers. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.12.2" +author: "Anton Sidelnikov (@anton-sidelnikov)" +description: + - Get DNS nameservers info from the OTC. +options: + zone: + description: + - ID or name of the required zone. If name had been provided, only public zone could be\ + returned. If private zone is required, only ID should be passed. + type: str + required: true +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +nameservers: + description: List of dictionaries describing nameservers. + type: complex + returned: On Success. + contains: + hostname: + description: Host name of a name server. + type: str + sample: "ns1.example.com." + address: + description: IP address of a DNS server (Private Zone only). + type: str + sample: "100.125.0.81" + priority: + description: Priority of a name server. + type: int +''' + +EXAMPLES = ''' +#Get info about choosen DNS recordset. +- opentelekomcloud.cloud.dns_nameserver_info: + zone: "ff80808275f5fc0f017e886898315ee9" + register: nameservers +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class DNSNameserverInfoModule(OTCModule): + + argument_spec = dict( + zone=dict(required=True), + ) + module_kwargs = dict( + supports_check_mode=True, + ) + + def run(self): + + data = [] + query = {} + + if self.params['zone']: + try: + query['zone'] = self.conn.dns.find_zone( + name_or_id=self.params['zone'], ignore_missing=False).id + except self.sdk.exceptions.ResourceNotFound: + self.fail_json(msg="Zone not found") + + for raw in self.conn.dns.nameservers(**query): + dt = raw.to_dict() + dt.pop('location') + dt.pop('name') + dt.pop('id') + data.append(dt) + + self.exit( + changed=False, + nameservers=data + ) + + +def main(): + module = DNSNameserverInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/dns/tasks/main.yaml b/tests/integration/targets/dns/tasks/main.yaml index d1a77724..e492cc99 100644 --- a/tests/integration/targets/dns/tasks/main.yaml +++ b/tests/integration/targets/dns/tasks/main.yaml @@ -236,6 +236,28 @@ - dns_zo_pr is success - dns_zo_pr.zone.description is defined + - name: Get a DNS Nameservers info for public zone + opentelekomcloud.cloud.dns_nameserver_info: + zone: "{{ zone_public_name }}" + register: dns_ns + + - name: assert result + assert: + that: + - dns_ns is success + - dns_ns.nameservers[0].hostname is defined + + - name: Get a DNS Nameservers info for private zone + opentelekomcloud.cloud.dns_nameserver_info: + zone: "{{ zone_private_name }}" + register: dns_ns + + - name: assert result + assert: + that: + - dns_ns is success + - dns_ns.nameservers[0].address is defined + - name: Creating a DNS Recordset - check mode opentelekomcloud.cloud.dns_recordset: zone_id: "{{ dns_zo.zone.id }}" diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index a009d592..50a22a2e 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -94,3 +94,4 @@ plugins/modules/anti_ddos_fip_statuses_info.py validate-modules:missing-gplv3-li plugins/modules/anti_ddos_optional_policies_info.py validate-modules:missing-gplv3-license plugins/modules/server_group_info.py validate-modules:missing-gplv3-license plugins/modules/object_info.py validate-modules:missing-gplv3-license +plugins/modules/dns_nameserver_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 00d13fe1..e281c7ab 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -95,3 +95,4 @@ plugins/modules/anti_ddos_fip_statuses_info.py validate-modules:missing-gplv3-li plugins/modules/anti_ddos_optional_policies_info.py validate-modules:missing-gplv3-license plugins/modules/server_group_info.py validate-modules:missing-gplv3-license plugins/modules/object_info.py validate-modules:missing-gplv3-license +plugins/modules/dns_nameserver_info.py validate-modules:missing-gplv3-license From aff4b434d37188302da1ce37e4a2964e85d3aea3 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Tue, 26 Apr 2022 19:32:10 +0300 Subject: [PATCH 52/65] Volume snapshot info fixes (#195) Volume snapshot info fixes Delete details_filter Add .id parameter into querying Reviewed-by: Anton Sidelnikov --- plugins/modules/volume_backup.py | 1 + plugins/modules/volume_snapshot_info.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/modules/volume_backup.py b/plugins/modules/volume_backup.py index 85908383..0374989b 100644 --- a/plugins/modules/volume_backup.py +++ b/plugins/modules/volume_backup.py @@ -223,6 +223,7 @@ def run(self): 'is_incremental': is_incremental } + volume = self.conn.get_volume(self.params['volume']) if snapshot: snapshot = self.conn.get_volume_snapshot( self.params['display_name'], filters={'volume_id': volume.id}) diff --git a/plugins/modules/volume_snapshot_info.py b/plugins/modules/volume_snapshot_info.py index d4482739..caf785c0 100644 --- a/plugins/modules/volume_snapshot_info.py +++ b/plugins/modules/volume_snapshot_info.py @@ -114,6 +114,8 @@ def run(self): data = [] query = {} + if details_filter: + query['details'] = details_filter if name_filter: query['name'] = name_filter if volume_filter: @@ -121,7 +123,7 @@ def run(self): if status_filter: query['status'] = status_filter.lower() - for raw in self.conn.block_storage.snapshots(details_filter, **query): + for raw in self.conn.block_storage.snapshots(**query): dt = raw.to_dict() dt.pop('location') data.append(dt) From 7d673ce3b3d82d1049046743b68c4e7d114ef0b0 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Tue, 26 Apr 2022 20:47:58 +0300 Subject: [PATCH 53/65] fix error id (#193) Fix error with "object has no attribute id" Add 'volume' object Reviewed-by: Anton Sidelnikov From bad014e049f19b90bb8f65ba0c06013e5f0da8b5 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Wed, 27 Apr 2022 12:38:54 +0300 Subject: [PATCH 54/65] Volume backup integration tests (#196) Volume backup integration tests Add integration tests for vloume_backup module Reviewed-by: Anton Sidelnikov --- plugins/modules/volume_snapshot_info.py | 2 +- .../targets/volume_backup/tasks/aliases | 1 + .../targets/volume_backup/tasks/main.yaml | 107 ++++++++++++++++++ 3 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 tests/integration/targets/volume_backup/tasks/aliases create mode 100644 tests/integration/targets/volume_backup/tasks/main.yaml diff --git a/plugins/modules/volume_snapshot_info.py b/plugins/modules/volume_snapshot_info.py index caf785c0..1ad3e08b 100644 --- a/plugins/modules/volume_snapshot_info.py +++ b/plugins/modules/volume_snapshot_info.py @@ -119,7 +119,7 @@ def run(self): if name_filter: query['name'] = name_filter if volume_filter: - query['volume_id'] = self.conn.block_storage.find_volume(volume_filter) + query['volume_id'] = self.conn.block_storage.find_volume(volume_filter).id if status_filter: query['status'] = status_filter.lower() diff --git a/tests/integration/targets/volume_backup/tasks/aliases b/tests/integration/targets/volume_backup/tasks/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/volume_backup/tasks/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/volume_backup/tasks/main.yaml b/tests/integration/targets/volume_backup/tasks/main.yaml new file mode 100644 index 00000000..cf97e01f --- /dev/null +++ b/tests/integration/targets/volume_backup/tasks/main.yaml @@ -0,0 +1,107 @@ +--- +- module_defaults: + openstack.cloud.volume: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.volume_backup: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.volume_snapshot_info: + cloud: "{{ test_cloud }}" + openstack.cloud.volume_snapshot: + cloud: "{{ test_cloud }}" + + block: + - name: Set random prefix + set_fact: + prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + set_fact: + display_name: "{{ ( prefix + '-test-volume') }}" + backup_name: "{{ ( prefix + '-test-backup') }}" + availability_zone: "eu-de-01" + + - name: 1 create volume + openstack.cloud.volume: + size: 1 + availability_zone: "{{ availability_zone }}" + display_name: "{{ display_name }}" + state: present + register: vol + + - name: 2 assert result of volume creating + assert: + that: + - vol is success + - vol is changed + - vol.volume is defined + + - name: 3 create backup of the volume + opentelekomcloud.cloud.volume_backup: + name: "{{ backup_name }}" + volume: "{{ display_name }}" + state: present + register: bckp + + - name: 4 assert result + assert: + that: + - bckp is success + - bckp is changed + - bckp.volume_backup is defined + + - name: 5 check whether snapshot had been created + opentelekomcloud.cloud.volume_snapshot_info: + volume: "{{ vol['id'] }}" + status: available + register: snapshot + + - name: 6 assert result for snapshot + assert: + that: + - snapshot is success + - snapshot.volume_snapshots is defined + - snapshot.volume_snapshots | length > 0 + + - name: 7 set fact to found proper snapshot + set_fact: + snapshot_name: "{{ snapshot['volume_snapshots'][0]['name'] }}" + + always: + - block: + - name: 8 delete snapshot + openstack.cloud.volume_snapshot: + volume: "{{ display_name }}" + name: "{{ snapshot_name }}" + state: absent + register: delsh + + - name: 9 assert result of deliting snapshot + assert: + that: + - delsh is success + - delsh is changed + + - name: 10 delete backup + opentelekomcloud.cloud.volume_backup: + name: "{{ backup_name }}" + state: absent + register: delbckp + + - name: 11 assert result of deliting + assert: + that: + - delbckp is success + - delbckp is not changed + + - name: 12 delete volume + openstack.cloud.volume: + name: "{{ display_name }}" + state: absent + register: delvol + + - name: 13 assert result of deleting volume + assert: + that: + - delvol is success + - delvol is changed + ignore_errors: true From c1e7e26445af60862f5a3c356a5b71f28a5701f2 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Mon, 2 May 2022 09:12:37 +0200 Subject: [PATCH 55/65] Fix recordset not uniqueness check (#197) Fix recordset not uniqueness check When we have few recordsets with same name we must also respect recordset type. Reviewed-by: kucerakk --- plugins/modules/dns_recordset.py | 6 +++++- plugins/modules/dns_recordset_info.py | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/modules/dns_recordset.py b/plugins/modules/dns_recordset.py index 58e6998b..3d446b82 100644 --- a/plugins/modules/dns_recordset.py +++ b/plugins/modules/dns_recordset.py @@ -148,11 +148,15 @@ def run(self): self.params['zone_id']) ) - rs = self.conn.dns.find_recordset( + find_args = dict( name_or_id=self.params['recordset_name'], zone=zo.id, ignore_missing=True ) + if self.params['type']: + find_args['type'] = self.params['type'] + + rs = self.conn.dns.find_recordset(**find_args) # recordset deletion if self.params['state'] == 'absent': diff --git a/plugins/modules/dns_recordset_info.py b/plugins/modules/dns_recordset_info.py index cf0b02a5..9535f1fc 100644 --- a/plugins/modules/dns_recordset_info.py +++ b/plugins/modules/dns_recordset_info.py @@ -124,9 +124,12 @@ def run(self): self.fail_json(msg="Zone not found") if self.params['name']: try: + query['name_or_id'] = self.params['name'] + if self.params['type']: + query['type'] = self.params['type'] + recordset = self.conn.dns.find_recordset( - zone=query['zone'], name_or_id=self.params['name'], - ignore_missing=False) + ignore_missing=False, **query) dt = recordset.to_dict() dt.pop('location') data.append(dt) From 797c322d69a5403f9e1d041bab5e8effed045633 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Tue, 7 Jun 2022 14:08:52 +0200 Subject: [PATCH 56/65] update test requirements (#200) update test requirements Reviewed-by: Vladimir Vshivkov Reviewed-by: Anton Sidelnikov --- .ansible-lint | 1 - plugins/module_utils/otc.py | 12 +- plugins/modules/as_config.py | 2 +- plugins/modules/as_instance_info.py | 4 + plugins/modules/as_quota_info.py | 4 + plugins/modules/cce_cluster_node.py | 2 +- plugins/modules/cce_node_pool.py | 2 +- plugins/modules/dds_instance.py | 2 +- plugins/modules/dms_instance.py | 2 +- plugins/modules/nat_gateway.py | 2 +- plugins/modules/object_info.py | 4 + plugins/modules/subnet.py | 16 ++- plugins/modules/subnet_info.py | 4 + plugins/modules/vpc_info.py | 4 + plugins/modules/vpn_service_info.py | 3 +- roles/vpc_peering/meta/main.yml | 3 +- test-requirements.txt | 7 +- .../tasks/main.yaml | 9 +- .../tasks/main.yaml | 5 +- .../targets/as_config/tasks/main.yaml | 19 ++- .../targets/as_config_info/tasks/main.yaml | 9 +- .../targets/as_group/tasks/main.yaml | 74 ++++------- .../targets/as_instance/tasks/main.yaml | 88 +++++++------ .../targets/as_instance_info/tasks/main.yaml | 37 +++--- .../targets/as_policy/tasks/main.yaml | 52 ++++---- .../targets/as_policy_info/tasks/main.yaml | 39 +++--- .../targets/as_quota_info/tasks/main.yaml | 5 +- .../availability_zone_info/tasks/main.yaml | 5 +- .../targets/cce_cluster/tasks/main.yaml | 18 +-- .../targets/cce_cluster_node/tasks/main.yaml | 24 ++-- .../targets/cce_lifecycle/tasks/main.yaml | 32 +++-- .../targets/cce_node_pool/tasks/main.yaml | 7 +- .../cce_node_pool_info/tasks/main.yaml | 7 +- tests/integration/targets/ces/tasks/main.yaml | 40 +++--- .../targets/css_cluster/tasks/main.yaml | 19 +-- .../targets/css_cluster_info/tasks/main.yaml | 5 +- .../targets/css_snapshot/tasks/main.yml | 25 ++-- .../targets/css_snapshot_info/tasks/main.yaml | 5 +- .../dds_datastore_info/tasks/main.yaml | 5 +- .../targets/dds_flavor_info/tasks/main.yaml | 5 +- .../targets/dds_instance/tasks/main.yaml | 20 +-- .../targets/dds_instance_info/tasks/main.yaml | 5 +- .../targets/deh_host/tasks/main.yaml | 39 +++--- .../targets/deh_host_info/tasks/main.yaml | 9 +- .../deh_host_type_info/tasks/main.yaml | 9 +- tests/integration/targets/dms/tasks/main.yaml | 16 ++- tests/integration/targets/dns/tasks/main.yaml | 70 ++++++----- .../dns_recordset_info/tasks/main.yaml | 19 +-- .../targets/floating_ip/tasks/main.yaml | 11 +- .../targets/lb_certificate/tasks/main.yaml | 34 ++--- .../lb_certificate_info/tasks/main.yaml | 13 +- .../lb_healtmonitor_info/tasks/main.yaml | 11 +- .../lb_listener_certificates/tasks/main.yaml | 43 ++++--- .../targets/loadbalancer/tasks/main.yaml | 44 ++++--- tests/integration/targets/nat/tasks/main.yaml | 117 ++++++++++-------- .../nat_dnat_rule_info/tasks/main.yaml | 9 +- .../targets/nat_gateway_info/tasks/main.yaml | 9 +- .../nat_snat_rule_info/tasks/main.yaml | 9 +- .../targets/object_info/tasks/main.yaml | 5 +- .../targets/prepare_tests/tasks/main.yaml | 8 +- .../targets/rds_backup/tasks/main.yaml | 5 +- .../targets/rds_backup_info/tasks/main.yaml | 21 +--- .../rds_datastore_info/tasks/main.yaml | 6 +- .../targets/rds_flavor_info/tasks/main.yaml | 24 ++-- .../targets/rds_instance/tasks/main.yaml | 22 ++-- .../targets/rds_instance_info/tasks/main.yaml | 27 ++-- .../targets/security_group/tasks/main.yaml | 13 +- .../security_group_info/tasks/main.yaml | 7 +- .../targets/server_group_info/tasks/main.yaml | 7 +- .../targets/subnet/tasks/main.yaml | 17 +-- .../targets/subnet_info/tasks/main.yaml | 18 +-- .../targets/volume_backup/{tasks => }/aliases | 0 .../targets/volume_backup/tasks/main.yaml | 32 +++-- .../volume_backup_info/tasks/main.yaml | 9 +- .../volume_snapshot_info/tasks/main.yaml | 9 +- tests/integration/targets/vpc/tasks/main.yaml | 15 +-- .../targets/vpc_info/tasks/main.yaml | 17 +-- .../vpc_peering_info_test/tasks/main.yaml | 9 +- .../targets/vpc_peering_test/tasks/main.yaml | 28 +++-- .../vpc_route_info_test/tasks/main.yaml | 9 +- .../targets/vpc_route_test/tasks/main.yaml | 27 ++-- .../targets/vpn_service_info/tasks/main.yaml | 23 ++-- .../targets/waf_certificate/tasks/main.yaml | 27 ++-- .../waf_certificate_info/tasks/main.yaml | 13 +- .../targets/waf_domain/tasks/main.yaml | 61 ++++----- .../targets/waf_domain_info/tasks/main.yaml | 13 +- tests/requirements.yml | 2 - tests/sanity/ignore-2.13.txt | 1 + 88 files changed, 826 insertions(+), 744 deletions(-) rename tests/integration/targets/volume_backup/{tasks => }/aliases (100%) delete mode 100644 tests/requirements.yml create mode 120000 tests/sanity/ignore-2.13.txt diff --git a/.ansible-lint b/.ansible-lint index 85006fac..f789d228 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -7,4 +7,3 @@ skip_list: - '204' # Lines should be no longer than 160 chars - '301' # Commands should not change things if nothing needs doing - '701' # No 'galaxy_info' found\ - - fqcn-builtins diff --git a/plugins/module_utils/otc.py b/plugins/module_utils/otc.py index 7d442f6a..9f63df80 100644 --- a/plugins/module_utils/otc.py +++ b/plugins/module_utils/otc.py @@ -16,12 +16,12 @@ import openstack as sdk import otcextensions from otcextensions import sdk as otc_sdk + from pkg_resources import parse_version as V HAS_LIBRARIES = True except ImportError: HAS_LIBRARIES = False from ansible.module_utils.basic import AnsibleModule -from distutils.version import StrictVersion def openstack_full_argument_spec(**kwargs): @@ -122,11 +122,11 @@ def openstack_cloud_from_module(self, min_version='0.6.9'): self.fail_json(msg='openstacksdk and otcextensions are required for this self') if min_version: - min_version = max(StrictVersion('0.6.9'), StrictVersion(min_version)) + min_version = max(V('0.6.9'), V(min_version)) else: - min_version = StrictVersion('0.6.9') + min_version = V('0.6.9') - if StrictVersion(otcextensions.__version__) < min_version: + if V(otcextensions.__version__) < min_version: self.fail_json( msg="To utilize this self, the installed version of " "the otcextensions library MUST be >={min_version}".format( @@ -181,10 +181,10 @@ def check_versioned(self, **kwargs): versioned_result = {} for var_name in kwargs: if ('min_ver' in self.argument_spec[var_name] - and StrictVersion(self.sdk_version) < self.argument_spec[var_name]['min_ver']): + and V(self.sdk_version) < self.argument_spec[var_name]['min_ver']): continue if ('max_ver' in self.argument_spec[var_name] - and StrictVersion(self.sdk_version) > self.argument_spec[var_name]['max_ver']): + and V(self.sdk_version) > self.argument_spec[var_name]['max_ver']): continue versioned_result.update({var_name: kwargs[var_name]}) return versioned_result diff --git a/plugins/modules/as_config.py b/plugins/modules/as_config.py index 776445bc..03d8edcd 100644 --- a/plugins/modules/as_config.py +++ b/plugins/modules/as_config.py @@ -94,7 +94,7 @@ or 1 (encryption enabled). type: str choices: [ '0', '1' ] - default: 0 + default: '0' __system__cmkid: description: - Specifies the CMK ID, which indicates encryption in metadata.\ diff --git a/plugins/modules/as_instance_info.py b/plugins/modules/as_instance_info.py index 1a26b03f..a453836e 100644 --- a/plugins/modules/as_instance_info.py +++ b/plugins/modules/as_instance_info.py @@ -183,6 +183,10 @@ class ASInstanceInfoModule(OTCModule): limit=dict(type='int', required=False, default=20) ) + module_kwargs = dict( + supports_check_mode=True + ) + def run(self): as_group = self.params['scaling_group'] lifecycle_state = self.params['lifecycle_state'] diff --git a/plugins/modules/as_quota_info.py b/plugins/modules/as_quota_info.py index 7b8a06a7..b4e0f44e 100644 --- a/plugins/modules/as_quota_info.py +++ b/plugins/modules/as_quota_info.py @@ -83,6 +83,10 @@ class ASQuotaInfoModule(OTCModule): scaling_group=dict(required=False) ) + module_kwargs = dict( + supports_check_mode=True + ) + def run(self): data = [] diff --git a/plugins/modules/cce_cluster_node.py b/plugins/modules/cce_cluster_node.py index 8dd3dd02..e8810a9f 100644 --- a/plugins/modules/cce_cluster_node.py +++ b/plugins/modules/cce_cluster_node.py @@ -332,7 +332,7 @@ class CceClusterNodeModule(OTCModule): flavor=dict(required=False), floating_ip=dict(required=False), k8s_tags=dict(required=False, type='dict'), - ssh_key=dict(required=False), + ssh_key=dict(required=False, no_log=False), labels=dict(required=False, type='dict'), lvm_config=dict(required=False), max_pods=dict(required=False, type='int'), diff --git a/plugins/modules/cce_node_pool.py b/plugins/modules/cce_node_pool.py index 2b5b06fe..534db56c 100644 --- a/plugins/modules/cce_node_pool.py +++ b/plugins/modules/cce_node_pool.py @@ -342,7 +342,7 @@ class CceNodePoolModule(OTCModule): choices=['SATA', 'SAS', 'SSD'], default='SATA'), scale_down_cooldown_time=dict(required=False, type='int'), - ssh_key=dict(required=False), + ssh_key=dict(required=False, no_log=False), state=dict(default='present', choices=['absent', 'present']), tags=dict(required=False, type='list', elements='dict'), taints=dict(required=False, type='list', elements='dict'), diff --git a/plugins/modules/dds_instance.py b/plugins/modules/dds_instance.py index de10678e..de85154a 100644 --- a/plugins/modules/dds_instance.py +++ b/plugins/modules/dds_instance.py @@ -30,7 +30,7 @@ type: str datastore_version: description: Specifies the database version. - choices: [3.2, 3.4] + choices: ['3.2', '3.4'] type: str default: '3.4' region: diff --git a/plugins/modules/dms_instance.py b/plugins/modules/dms_instance.py index f23c0044..d90cf0c8 100644 --- a/plugins/modules/dms_instance.py +++ b/plugins/modules/dms_instance.py @@ -185,7 +185,7 @@ class DmsInstanceModule(OTCModule): engine_version=dict(required=False, default='2.3.0'), storage_space=dict(required=False, type='int'), access_user=dict(required=False), - password=dict(required=False), + password=dict(required=False, no_log=True), vpc_id=dict(required=False), security_group_id=dict(required=False), subnet_id=dict(required=False), diff --git a/plugins/modules/nat_gateway.py b/plugins/modules/nat_gateway.py index 6390ed3e..9bf24184 100644 --- a/plugins/modules/nat_gateway.py +++ b/plugins/modules/nat_gateway.py @@ -53,7 +53,7 @@ 2 (medium 50.000 connections), 3 (large 200.000 connections), 4 (extra-large 1.000.000 connections) type: str - default: 1 + default: "1" choices: ["1", "2", "3", "4"] state: choices: [present, absent] diff --git a/plugins/modules/object_info.py b/plugins/modules/object_info.py index 01f0734e..1f7031b6 100644 --- a/plugins/modules/object_info.py +++ b/plugins/modules/object_info.py @@ -157,6 +157,10 @@ class SwiftInfoModule(OTCModule): object_name=dict(type='str', required=False), ) + module_kwargs = dict( + supports_check_mode=True + ) + def run(self): container = self.params['container'] object_name = self.params['object_name'] diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py index 67507510..f63cb820 100644 --- a/plugins/modules/subnet.py +++ b/plugins/modules/subnet.py @@ -262,8 +262,9 @@ def run(self): if val is not None: err_fields[field] = val if err_fields: - self.fail('updating subnet fields {} is not supported (subnet: {})' - .format(err_fields, subnet)) + self.fail( + f'updating subnet fields {err_fields} is not ' + f'supported (subnet: {subnet})') update_data = {} for field in self._update_fields: if data[field] is not None: @@ -306,8 +307,9 @@ def _changed(self, state, expected): if field in ['vpc', 'vpc_id']: field = 'vpc_id' # as `vpc` should be an ID too at this place if state.get(field, None) != expected[field]: - self.log('There is a difference in field {}. Expected {}, got {}' - .format(field, expected[field], state[field])) + self.log( + f'There is a difference in field {field}. Expected ' + f'{expected[field]}, got {state[field]}') return True return False @@ -327,8 +329,10 @@ def find_vpc_subnet(self): return None if len(subnets) > 1: self.fail( - msg='More than one subnet with name {} is found in vpc {}.' - 'Please use ID instead.'.format(name, vpc_id) + msg=( + f'More than one subnet with name {name} is found ' + f'in vpc {vpc_id}. Please use ID instead.' + ) ) return subnets[0] diff --git a/plugins/modules/subnet_info.py b/plugins/modules/subnet_info.py index 574fd40e..ecd39a7b 100644 --- a/plugins/modules/subnet_info.py +++ b/plugins/modules/subnet_info.py @@ -108,6 +108,10 @@ class SubnetInfoModule(OTCModule): vpc=dict(required=False) ) + module_kwargs = dict( + supports_check_mode=True + ) + def run(self): data = [] diff --git a/plugins/modules/vpc_info.py b/plugins/modules/vpc_info.py index 11ac49da..01af9a70 100644 --- a/plugins/modules/vpc_info.py +++ b/plugins/modules/vpc_info.py @@ -77,6 +77,10 @@ class VpcInfoModule(OTCModule): name_or_id=dict(required=False) ) + module_kwargs = dict( + supports_check_mode=True + ) + def run(self): data = [] diff --git a/plugins/modules/vpn_service_info.py b/plugins/modules/vpn_service_info.py index ccd576c2..61356bf5 100644 --- a/plugins/modules/vpn_service_info.py +++ b/plugins/modules/vpn_service_info.py @@ -125,7 +125,7 @@ class VpnServicesInfoModule(OTCModule): description=dict(type='str', required=False), external_v4_ip=dict(type='str', required=False), external_v6_ip=dict(type='str', required=False), - router=dict(type='str', requiered=False), + router=dict(type='str', required=False), status=dict(required=False, choices=["active", "down", "build", "error", "pending_create", @@ -134,6 +134,7 @@ class VpnServicesInfoModule(OTCModule): project_id=dict(type='str', required=False), vpn_service=dict(type='str', required=False) ) + module_kwargs = dict( supports_check_mode=True ) diff --git a/roles/vpc_peering/meta/main.yml b/roles/vpc_peering/meta/main.yml index 08b7db3c..40b7a4d6 100644 --- a/roles/vpc_peering/meta/main.yml +++ b/roles/vpc_peering/meta/main.yml @@ -5,6 +5,7 @@ galaxy_info: license: Apache-2.0 - min_ansible_version: 2.10 + min_ansible_version: '2.10' + platforms: [] dependencies: [] diff --git a/test-requirements.txt b/test-requirements.txt index 39949628..f0d7c207 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,8 +1,7 @@ openstacksdk -ansible-base -ansible-lint==5.4.0 -pycodestyle==2.6.0 -flake8==3.8.4 +ansible-lint>=6.0.0 +pycodestyle==2.8.0 +flake8>=4.0.0 pylint voluptuous yamllint diff --git a/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml b/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml index 6987bb71..12a80345 100644 --- a/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml +++ b/tests/integration/targets/anti_ddos_fip_statuses_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AntiDDoS FIP Statuses Info tests + module_defaults: opentelekomcloud.cloud.anti_ddos_fip_statuses_info: cloud: "{{ test_cloud }}" block: @@ -8,7 +9,7 @@ register: anti_ddos - name: assert result - assert: + ansible.builtin.assert: that: - anti_ddos is success - anti_ddos is not changed @@ -19,7 +20,7 @@ register: anti_ddos - name: assert result - assert: + ansible.builtin.assert: that: - anti_ddos is success - anti_ddos is not changed @@ -31,7 +32,7 @@ ignore_errors: true - name: assert result - assert: + ansible.builtin.assert: that: - anti_ddos is not success - anti_ddos is not changed diff --git a/tests/integration/targets/anti_ddos_optional_policies_info/tasks/main.yaml b/tests/integration/targets/anti_ddos_optional_policies_info/tasks/main.yaml index 2acab8ae..d94d90f2 100644 --- a/tests/integration/targets/anti_ddos_optional_policies_info/tasks/main.yaml +++ b/tests/integration/targets/anti_ddos_optional_policies_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AntiDDoS Optional Policies Info tests + module_defaults: opentelekomcloud.cloud.anti_ddos_optional_policies_info: cloud: "{{ test_cloud }}" block: @@ -8,7 +9,7 @@ register: anti_ddos_optional_policies_info - name: assert result - assert: + ansible.builtin.assert: that: - anti_ddos_optional_policies_info is success - anti_ddos_optional_policies_info is not changed diff --git a/tests/integration/targets/as_config/tasks/main.yaml b/tests/integration/targets/as_config/tasks/main.yaml index a5208122..02eb2c54 100644 --- a/tests/integration/targets/as_config/tasks/main.yaml +++ b/tests/integration/targets/as_config/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: +- name: AS Config tests + module_defaults: opentelekomcloud.cloud.loadbalancer: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: config_name: "{{ ( prefix + '_config') }}" key_name: "{{ ( prefix + '_key') }}" @@ -30,7 +31,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - as_config_check is changed @@ -47,15 +48,14 @@ register: as_config - name: assert result - assert: + ansible.builtin.assert: that: - as_config is changed - as_config is success always: - - block: - # Cleanup - + - name: Cleanup + block: - name: Delete as config opentelekomcloud.cloud.as_config: scaling_configuration: "{{ config_name }}" @@ -64,7 +64,7 @@ ignore_errors: true - name: assert result - assert: + ansible.builtin.assert: that: - dropped_as_config is success - dropped_as_config is changed @@ -73,4 +73,3 @@ openstack.cloud.os_keypair: name: "{{ key_name }}" state: absent - ignore_errors: true diff --git a/tests/integration/targets/as_config_info/tasks/main.yaml b/tests/integration/targets/as_config_info/tasks/main.yaml index 909b15ea..3a24577e 100644 --- a/tests/integration/targets/as_config_info/tasks/main.yaml +++ b/tests/integration/targets/as_config_info/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- block: +- name: AS Config Info tests + block: - name: Get AS config info - as_config_info: + opentelekomcloud.cloud.as_config_info: cloud: "{{ test_cloud }}" register: auto_scale - name: debug configs - debug: + ansible.builtin.debug: var: auto_scale.as_configs - name: assert result - assert: + ansible.builtin.assert: that: - auto_scale is success - auto_scale is not changed diff --git a/tests/integration/targets/as_group/tasks/main.yaml b/tests/integration/targets/as_group/tasks/main.yaml index a6616039..bbe3f81c 100644 --- a/tests/integration/targets/as_group/tasks/main.yaml +++ b/tests/integration/targets/as_group/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Group tests + module_defaults: opentelekomcloud.cloud.as_group: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_instance_info: @@ -16,11 +17,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: as_group_name: "{{ ( prefix + '_as_group') }}" new_name: "{{ ( prefix + 'new_name') }}" key_name: "{{ ( prefix + '_key') }}" @@ -81,7 +82,7 @@ register: as_group_check - name: assert result - assert: + ansible.builtin.assert: that: - as_group_check is changed @@ -94,7 +95,7 @@ register: as_group - name: assert result - assert: + ansible.builtin.assert: that: - as_group is success - as_group is changed @@ -107,14 +108,7 @@ networks: [{'id': "{{ network.network.id }}"}] router: "{{ router.router.id }}" register: as_group_err - ignore_errors: true - - - name: assert result - assert: - that: - - as_group_err is not success - - as_group_err is not changed - - 'as_group_err.msg == "AS Group {{ as_group.as_group.id }} exists"' + failed_when: 'as_group_err is changed or as_group_err.msg != "AS Group {{ as_group.as_group.id }} exists"' - name: Update as group - check mode opentelekomcloud.cloud.as_group: @@ -126,7 +120,7 @@ register: as_group_check - name: assert result - assert: + ansible.builtin.assert: that: - as_group_check is success - as_group_check is changed @@ -140,7 +134,7 @@ register: as_group - name: assert result - assert: + ansible.builtin.assert: that: - as_group is success - as_group is changed @@ -155,7 +149,7 @@ register: as_group_check - name: assert result - assert: + ansible.builtin.assert: that: - as_group_check is changed - as_group_check is success @@ -170,7 +164,7 @@ register: as_gr_del - name: assert result - assert: + ansible.builtin.assert: that: - as_gr_del is success - as_gr_del is changed @@ -184,14 +178,7 @@ wait: true timeout: 360 register: as_gr_err - ignore_errors: true - - - name: assert result - assert: - that: - - as_gr_err is not success - - as_gr_err is not changed - - 'as_gr_err.msg == "AS Group {{ as_group.as_group.id }} not found"' + failed_when: 'as_gr_err.msg != "AS Group {{ as_group.as_group.id }} not found"' - name: Create AS Group with instances opentelekomcloud.cloud.as_group: @@ -209,7 +196,7 @@ register: as_group - name: assert result - assert: + ansible.builtin.assert: that: - as_group is success - as_group is changed @@ -220,13 +207,12 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is not changed - as_instances['scaling_instances']|length == 1 - - name: Delete AS Group with instances without force_delete opentelekomcloud.cloud.as_group: scaling_group: @@ -236,17 +222,11 @@ wait: true timeout: 360 register: as_group_err - ignore_errors: true - - - name: assert result - assert: - that: - - as_group_err is not success - - as_group_err is not changed - + failed_when: "as_group_err is changed" always: - - block: + - name: Cleanup + block: - name: Delete as group opentelekomcloud.cloud.as_group: scaling_group: @@ -256,43 +236,35 @@ wait: true timeout: 360 register: dropped_as_group - - - name: assert result - assert: - that: - - dropped_as_group is success - - dropped_as_group is changed + failed_when: "dropped_as_group is not changed" - name: Delete as config opentelekomcloud.cloud.as_config: scaling_configuration: "{{ config_name }}" state: absent register: dropped_as_config - ignore_errors: true - - - name: assert result - assert: - that: - - dropped_as_config is success - - dropped_as_config is changed + failed_when: "dropped_as_config is not changed" - name: Delete keypair openstack.cloud.os_keypair: name: "{{ key_name }}" state: absent + failed_when: false - name: Drop existing router openstack.cloud.os_router: name: "{{ router.router.name }}" state: absent + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet.subnet.name }}" state: absent + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network.network.name }}" state: absent - ignore_errors: true + ignore_errors: false diff --git a/tests/integration/targets/as_instance/tasks/main.yaml b/tests/integration/targets/as_instance/tasks/main.yaml index 088f1269..b976a5e8 100644 --- a/tests/integration/targets/as_instance/tasks/main.yaml +++ b/tests/integration/targets/as_instance/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Instance tests + module_defaults: opentelekomcloud.cloud.as_instance: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_instance_info: @@ -24,11 +25,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: as_instance_name: "{{ ( prefix + '_as_inst') }}" min_instance_number: 0 desire_instance_number: 1 @@ -150,15 +151,15 @@ register: as_inst_list - name: Get init list of ID of instances - set_fact: + ansible.builtin.set_fact: init_id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get init list of Names of instances - set_fact: + ansible.builtin.set_fact: init_name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - init_id_list|length == desire_instance_number - init_name_list|length == desire_instance_number @@ -173,7 +174,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -184,15 +185,15 @@ register: as_inst_list - name: Get init list of ID of instances - set_fact: + ansible.builtin.set_fact: init_id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get init list of Names of instances - set_fact: + ansible.builtin.set_fact: init_name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - init_id_list|length == desire_instance_number - init_name_list|length == desire_instance_number @@ -203,15 +204,15 @@ register: as_inst_list - name: Get init list of ID of instances - set_fact: + ansible.builtin.set_fact: id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get init list of Names of instances - set_fact: + ansible.builtin.set_fact: name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - id_list|length == init_id_list|length - name_list|length == init_name_list|length @@ -225,7 +226,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -236,15 +237,15 @@ register: as_inst_list - name: Get list of ID of instances - set_fact: + ansible.builtin.set_fact: id_list: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get list of Names of instances - set_fact: + ansible.builtin.set_fact: name_list: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - id_list|length == init_id_list|length + 2 - name_list|length == init_name_list|length + 2 @@ -259,7 +260,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -273,7 +274,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -288,7 +289,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -302,7 +303,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -317,7 +318,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -328,15 +329,15 @@ register: as_inst_list - name: Get list of ID of instances - set_fact: + ansible.builtin.set_fact: id_list_2: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get list of Names of instances - set_fact: + ansible.builtin.set_fact: name_list_2: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - id_list_2|length == id_list|length - name_list_2|length == name_list|length @@ -352,7 +353,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is changed @@ -363,15 +364,15 @@ register: as_inst_list - name: Get list of ID of instances - set_fact: + ansible.builtin.set_fact: id_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get list of Names of instances - set_fact: + ansible.builtin.set_fact: name_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - id_list_after_remove|length == id_list|length - 1 - name_list_after_remove|length == name_list|length - 1 @@ -387,7 +388,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -398,15 +399,15 @@ register: as_inst_list - name: Get list of ID of instances - set_fact: + ansible.builtin.set_fact: id_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='id') | list }}" - name: Get list of Names of instances - set_fact: + ansible.builtin.set_fact: name_list_after_remove: "{{ as_inst_list.scaling_instances | map(attribute='name') | list }}" - name: assert result - assert: + ansible.builtin.assert: that: - id_list_after_remove|length == id_list|length - 1 - name_list_after_remove|length == name_list|length - 1 @@ -423,7 +424,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -434,18 +435,19 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result.scaling_instances|length == {{ min_instance_number }} always: - - block: - # Cleanup + - name: Cleanup + block: - name: Delete ECS1 openstack.cloud.server: name: "{{ (as_instance_name + '_1') }}" delete_fip: true state: absent + failed_when: false - name: Delete ECS2 openstack.cloud.server: @@ -462,9 +464,10 @@ wait: true timeout: 360 register: dropped_as_group + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - dropped_as_group is success - dropped_as_group is changed @@ -474,9 +477,10 @@ scaling_configuration: "{{ as_config_name }}" state: absent register: dropped_as_config + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - dropped_as_config is success - dropped_as_config is changed @@ -485,24 +489,28 @@ openstack.cloud.security_group: name: "{{ secgroup_name }}" state: absent + failed_when: false - name: Delete existing router openstack.cloud.os_router: name: "{{ router.router.name }}" state: absent + failed_when: false - name: Delete existing subnet openstack.cloud.os_subnet: name: "{{ subnet.subnet.name }}" state: absent + failed_when: false - name: Delete existing network openstack.cloud.os_network: name: "{{ network.network.name }}" state: absent + failed_when: false - name: Delete keypair openstack.cloud.os_keypair: name: "{{ kp_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/as_instance_info/tasks/main.yaml b/tests/integration/targets/as_instance_info/tasks/main.yaml index cadbada3..926f7d95 100644 --- a/tests/integration/targets/as_instance_info/tasks/main.yaml +++ b/tests/integration/targets/as_instance_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Instance Info tests + module_defaults: opentelekomcloud.cloud.as_instance_info: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_group: @@ -12,11 +13,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: as_group_name: "{{ ( prefix + '_as_group') }}" network_name: "{{ ( prefix + '_network') }}" subnet_name: "{{ ( prefix + '_subnet') }}" @@ -62,7 +63,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is not changed @@ -74,7 +75,7 @@ register: as_instances - name: assert result - assert: + ansible.builtin.assert: that: - as_instances is success - as_instances is not changed @@ -83,30 +84,17 @@ - name: Get error message that required parameter is missing opentelekomcloud.cloud.as_instance_info: register: as_instances - ignore_errors: true - - - name: assert result - assert: - that: - - as_instances is not success - - as_instances is not changed - - 'as_instances.msg == "missing required arguments: scaling_group"' + failed_when: 'as_instances.msg != "missing required arguments: scaling_group"' - name: Get error when AS group id is missing opentelekomcloud.cloud.as_instance_info: scaling_group: register: as_instances - ignore_errors: true - - - name: assert result - assert: - that: - - as_instances is not success - - as_instances is not changed + failed_when: "as_instances is changed" always: - - block: - # Cleanup + - name: Cleanup + block: - name: Delete as group opentelekomcloud.cloud.as_group: scaling_group: @@ -115,19 +103,22 @@ force_delete: true wait: true register: dropped_as_group + failed_when: false - name: Drop existing router openstack.cloud.os_router: name: "{{ router.router.name }}" state: absent + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet.subnet.name }}" state: absent + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network.network.name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/as_policy/tasks/main.yaml b/tests/integration/targets/as_policy/tasks/main.yaml index 791c4d6d..171a5472 100644 --- a/tests/integration/targets/as_policy/tasks/main.yaml +++ b/tests/integration/targets/as_policy/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Policy tests + module_defaults: opentelekomcloud.cloud.as_policy: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_policy_info: @@ -18,11 +19,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: as_group_name: "{{ ( prefix + '_as_group') }}" as_policy_name: "{{ prefix + '_as_policy' }}" alarm_name: "{{ prefix + '_alarm' }}" @@ -42,7 +43,7 @@ register: fl - name: Set additional facts - set_fact: + ansible.builtin.set_fact: fl_ip: "{{ fl.floating_ip.floating_ip_address }}" fl_ip_id: "{{ fl.floating_ip.id }}" @@ -106,7 +107,7 @@ register: as_policy - name: assert result - assert: + ansible.builtin.assert: that: - as_policy is changed @@ -120,7 +121,7 @@ register: as_policy - name: assert result - assert: + ansible.builtin.assert: that: - as_policy is success - as_policy is changed @@ -134,14 +135,7 @@ alarm: "{{ alarm_name }}" state: "present" register: as_policy - ignore_errors: true - - - name: assert result - assert: - that: - - as_policy is not success - - as_policy is not changed - - 'as_policy.msg == "Scaling policy {{ as_policy_name }} exists"' + failed_when: 'as_policy.msg != "Scaling policy {{ as_policy_name }} exists"' - name: Get list of AS Policies opentelekomcloud.cloud.as_policy_info: @@ -149,7 +143,7 @@ register: as_policies - name: assert result - assert: + ansible.builtin.assert: that: - as_policies is success - as_policies['scaling_policies']|length == 1 @@ -167,7 +161,7 @@ register: as_policy - name: assert result - assert: + ansible.builtin.assert: that: - as_policy is success - as_policy is changed @@ -182,7 +176,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - as_policy is changed @@ -194,7 +188,7 @@ register: as_policy - name: assert result - assert: + ansible.builtin.assert: that: - as_policy is success - as_policy is changed @@ -206,7 +200,7 @@ register: as_policies - name: assert result - assert: + ansible.builtin.assert: that: - as_policies is success - as_policies['scaling_policies']|length == 0 @@ -217,18 +211,11 @@ scaling_policy: "{{ as_policy_name }}" state: "absent" register: as_policy - ignore_errors: true - - - name: assert result - assert: - that: - - as_policy is not success - - as_policy is not changed - - 'as_policy.msg == "Scaling policy {{ as_policy_name }} not found"' + failed_when: 'as_policy.msg != "Scaling policy {{ as_policy_name }} not found"' always: - - block: - # Cleanup + - name: Cleanup + block: - name: Delete AS group opentelekomcloud.cloud.as_group: scaling_group: @@ -237,30 +224,35 @@ force_delete: true wait: true register: dropped_as_group + failed_when: false - name: Drop alarm opentelekomcloud.cloud.ces_alarms: alarm_name: "{{ alarm_name }}" state: absent + failed_when: false - name: Drop floating ip opentelekomcloud.cloud.floating_ip: floating_ip_address: "{{ fl_ip }}" state: absent purge: true + failed_when: false - name: Drop existing router openstack.cloud.os_router: name: "{{ router.router.name }}" state: absent + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet.subnet.name }}" state: absent + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network.network.name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/as_policy_info/tasks/main.yaml b/tests/integration/targets/as_policy_info/tasks/main.yaml index 573720f7..41c8fca6 100644 --- a/tests/integration/targets/as_policy_info/tasks/main.yaml +++ b/tests/integration/targets/as_policy_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Policy Info tests + module_defaults: opentelekomcloud.cloud.as_policy_info: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_group: @@ -14,11 +15,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: as_group_name: "{{ ( prefix + '_as_group') }}" key_name: "{{ ( prefix + '_key') }}" network_name: "{{ ( prefix + '_network') }}" @@ -69,7 +70,7 @@ register: as_policies - name: assert result - assert: + ansible.builtin.assert: that: - as_policies is success - as_policies is not changed @@ -81,7 +82,7 @@ register: as_policies - name: assert result - assert: + ansible.builtin.assert: that: - as_policies is success - as_policies is not changed @@ -90,31 +91,17 @@ - name: Get error message that required parameter is missing opentelekomcloud.cloud.as_policy_info: register: as_policies - ignore_errors: true - - - name: assert result - assert: - that: - - as_policies is not success - - as_policies is not changed - - 'as_policies.msg == "missing required arguments: scaling_group"' + failed_when: 'as_policies.msg != "missing required arguments: scaling_group"' - name: Get error when AS group id is missing opentelekomcloud.cloud.as_policy_info: scaling_group: register: as_policies - ignore_errors: true - - - name: assert result - assert: - that: - - as_policies is not success - - as_policies is not changed - - 'as_policies.msg == "Scaling group is missing"' + failed_when: 'as_policies.msg != "Scaling group is missing"' always: - - block: - # Cleanup + - name: Cleanup + block: - name: Delete as group opentelekomcloud.cloud.as_group: scaling_group: @@ -123,24 +110,28 @@ force_delete: true wait: true register: dropped_as_group + failed_when: false - name: Delete keypair openstack.cloud.os_keypair: name: "{{ key_name }}" state: absent + failed_when: false - name: Drop existing router openstack.cloud.os_router: name: "{{ router.router.name }}" state: absent + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet.subnet.name }}" state: absent + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network.network.name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/as_quota_info/tasks/main.yaml b/tests/integration/targets/as_quota_info/tasks/main.yaml index 91212e2a..cca8f9d9 100644 --- a/tests/integration/targets/as_quota_info/tasks/main.yaml +++ b/tests/integration/targets/as_quota_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: AS Quota Info tests + module_defaults: opentelekomcloud.cloud.as_quota_info: cloud: "{{ test_cloud }}" block: @@ -8,7 +9,7 @@ register: as_quotas - name: assert result - assert: + ansible.builtin.assert: that: - as_quotas is success - as_quotas is not changed diff --git a/tests/integration/targets/availability_zone_info/tasks/main.yaml b/tests/integration/targets/availability_zone_info/tasks/main.yaml index 5e3422a3..03841ca2 100644 --- a/tests/integration/targets/availability_zone_info/tasks/main.yaml +++ b/tests/integration/targets/availability_zone_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: Availability Zone Info tests + module_defaults: opentelekomcloud.cloud.availability_zone_info: cloud: "{{ test_cloud }}" block: @@ -13,7 +14,7 @@ register: az - name: assert result - assert: + ansible.builtin.assert: that: - az is success - az is not changed diff --git a/tests/integration/targets/cce_cluster/tasks/main.yaml b/tests/integration/targets/cce_cluster/tasks/main.yaml index a3ff5b98..b5f2145e 100644 --- a/tests/integration/targets/cce_cluster/tasks/main.yaml +++ b/tests/integration/targets/cce_cluster/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- module_defaults: +- name: CCE Cluster tests + module_defaults: opentelekomcloud.cloud.cce_cluster: cloud: "{{ test_cloud }}" vars: prefix: scenario00a- block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '-test-network') }}" subnet_name: "{{ ( prefix + '-test-subnet') }}" router_name: "{{ ( prefix + '-test-router') }}" @@ -58,34 +59,37 @@ register: cluster - name: assert result - assert: + ansible.builtin.assert: that: - cluster is success - cluster is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop cluster opentelekomcloud.cloud.cce_cluster: name: "{{ cce_cluster_name }}" state: "absent" + failed_when: false - name: Drop router openstack.cloud.router: cloud: "{{ test_cloud }}" name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: cloud: "{{ test_cloud }}" name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/cce_cluster_node/tasks/main.yaml b/tests/integration/targets/cce_cluster_node/tasks/main.yaml index ade645fc..7feb91fd 100644 --- a/tests/integration/targets/cce_cluster_node/tasks/main.yaml +++ b/tests/integration/targets/cce_cluster_node/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: CCE Cluster Node tests + module_defaults: opentelekomcloud.cloud.cce_cluster: cloud: "{{ test_cloud }}" openstack.cloud.network: @@ -16,11 +17,11 @@ prefix: scenario00a- block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: keypair_name: "{{ ( prefix + '-key') }}" network_name: "{{ ( prefix + '-test-network') }}" subnet_name: "{{ ( prefix + '-test-subnet') }}" @@ -68,7 +69,7 @@ register: cluster - name: assert result - assert: + ansible.builtin.assert: that: - cluster is success - cluster is changed @@ -80,7 +81,7 @@ register: ssh_key - name: assert result - assert: + ansible.builtin.assert: that: - ssh_key is success - ssh_key is changed @@ -121,42 +122,47 @@ register: node - name: assert result - assert: + ansible.builtin.assert: that: - node is success - node is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop CCE cluster node opentelekomcloud.cloud.cce_cluster_node: cluster: "{{ cce_cluster_name }}" name: "{{ cce_node_name }}" state: "absent" + failed_when: false - name: Drop keypair openstack.cloud.keypair: name: "{{ keypair_name }}" state: "absent" + failed_when: false - name: Drop cluster opentelekomcloud.cloud.cce_cluster: name: "{{ cce_cluster_name }}" state: "absent" + failed_when: false - name: Drop router openstack.cloud.router: name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: name: "{{ network_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/cce_lifecycle/tasks/main.yaml b/tests/integration/targets/cce_lifecycle/tasks/main.yaml index bfdd26c2..0d90cf39 100644 --- a/tests/integration/targets/cce_lifecycle/tasks/main.yaml +++ b/tests/integration/targets/cce_lifecycle/tasks/main.yaml @@ -1,6 +1,7 @@ --- # author: Tino Schreiber (@tischrei) -- module_defaults: +- name: CCE Lifecycle tests + module_defaults: opentelekomcloud.cloud.cce_cluster: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.cce_cluster_node: @@ -12,11 +13,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ (99999999 | random | to_uuid | hash('md5')) }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '-cce-network') }}" subnet_name: "{{ ( prefix + '-ccc-subnet') }}" router_name: "{{ ( prefix + '-cce-router') }}" @@ -68,7 +69,7 @@ register: cluster - name: assert result - assert: + ansible.builtin.assert: that: - cluster is success - cluster is changed @@ -79,7 +80,7 @@ register: cluster_info - name: assert result - assert: + ansible.builtin.assert: that: - cluster_info is success - cluster_info.cce_clusters is defined @@ -91,7 +92,7 @@ register: keypair - name: assert result - assert: + ansible.builtin.assert: that: - keypair is success - keypair.key.private_key is defined @@ -131,13 +132,13 @@ register: node - name: assert result - assert: + ansible.builtin.assert: that: - node is success - node is changed - name: assert result - assert: + ansible.builtin.assert: that: - node is success - node.cce_cluster_node.id is defined @@ -158,7 +159,7 @@ register: node2 - name: assert result - assert: + ansible.builtin.assert: that: - node2 is success - node2.cce_cluster_node.id is defined @@ -169,14 +170,14 @@ register: node_info - name: assert result - assert: + ansible.builtin.assert: that: - node_info is success - node_info.cce_cluster_nodes is defined always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop node 2 opentelekomcloud.cloud.cce_cluster_node: cluster: "{{ cce_cluster_name }}" @@ -184,33 +185,38 @@ state: absent wait: true register: node2 + failed_when: false - name: Drop cluster opentelekomcloud.cloud.cce_cluster: name: "{{ cce_cluster_name }}" timeout: 3000 state: "absent" + failed_when: false - name: Drop Keypair openstack.cloud.keypair: state: "absent" name: "{{ keypair_name }}" + failed_when: false - name: Drop router openstack.cloud.router: cloud: "{{ test_cloud }}" name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: cloud: "{{ test_cloud }}" name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/cce_node_pool/tasks/main.yaml b/tests/integration/targets/cce_node_pool/tasks/main.yaml index d6f4ad6e..f6dc610c 100644 --- a/tests/integration/targets/cce_node_pool/tasks/main.yaml +++ b/tests/integration/targets/cce_node_pool/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- block: +- name: CCE Node Pool tests + block: - name: Create Node Pool cce_node_pool: cloud: "{{ test_cloud }}" @@ -42,10 +43,10 @@ register: pool - name: debug pool - debug: + ansible.builtin.debug: var: pool.cce_node_pool - name: assert result - assert: + ansible.builtin.assert: that: - pool.cce_node_pool is defined diff --git a/tests/integration/targets/cce_node_pool_info/tasks/main.yaml b/tests/integration/targets/cce_node_pool_info/tasks/main.yaml index 2fc55c29..e4a00131 100644 --- a/tests/integration/targets/cce_node_pool_info/tasks/main.yaml +++ b/tests/integration/targets/cce_node_pool_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- block: +- name: CCE Node Pool Info tests + block: - name: Get all Node Pools of a cluster cce_node_pool_info: cloud: "{{ test_cloud }}" @@ -7,11 +8,11 @@ register: pools - name: debug pools - debug: + ansible.builtin.debug: var: pools.cce_node_pools - name: assert result - assert: + ansible.builtin.assert: that: - pools is success - pools is not changed diff --git a/tests/integration/targets/ces/tasks/main.yaml b/tests/integration/targets/ces/tasks/main.yaml index c285f0dc..71a9380f 100644 --- a/tests/integration/targets/ces/tasks/main.yaml +++ b/tests/integration/targets/ces/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: Doing Integration test +- name: CES tests block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Assigning Floating IP @@ -11,11 +11,11 @@ register: fl - name: debug - debug: + ansible.builtin.debug: var: fl.floating_ip - name: Set facts - set_fact: + ansible.builtin.set_fact: alarm_name: "{{ ( prefix + '-alarm-ces' ) }}" fl_ip: "{{ fl.floating_ip.floating_ip_address }}" fl_ip_id: "{{ fl.floating_ip.id }}" @@ -43,7 +43,7 @@ register: ces_al_ch - name: assert result - assert: + ansible.builtin.assert: that: - ces_al_ch is success - ces_al_ch is changed @@ -70,11 +70,11 @@ register: ces_al - name: debug - debug: + ansible.builtin.debug: var: ces_al.alarms - name: assert result - assert: + ansible.builtin.assert: that: - ces_al is success - ces_al.alarms is defined @@ -85,11 +85,11 @@ register: ces_al_info - name: debug - debug: + ansible.builtin.debug: var: ces_al_info.alarms - name: assert result - assert: + ansible.builtin.assert: that: - ces_al_info is success - ces_al_info.alarms is defined @@ -104,11 +104,11 @@ register: ces_ed_info - name: debug - debug: + ansible.builtin.debug: var: ces_ed_info.events - name: assert result - assert: + ansible.builtin.assert: that: - ces_ed_info is success - ces_ed_info.events is defined @@ -125,11 +125,11 @@ register: ces_md_info - name: debug - debug: + ansible.builtin.debug: var: ces_md_info.metricdata - name: assert result - assert: + ansible.builtin.assert: that: - ces_al_info is success @@ -140,11 +140,11 @@ register: ces_me_info - name: debug - debug: + ansible.builtin.debug: var: ces_me_info.metrics - name: assert result - assert: + ansible.builtin.assert: that: - ces_me_info is success - ces_me_info.metrics is defined @@ -154,27 +154,29 @@ register: ces_qu_info - name: debug - debug: + ansible.builtin.debug: var: ces_qu_info.quotas - name: assert result - assert: + ansible.builtin.assert: that: - ces_qu_info is success - ces_qu_info.quotas is defined always: - - block: + - name: Cleanup + block: - name: Drop dns_floating_ip entry opentelekomcloud.cloud.floating_ip: floating_ip_address: "{{ fl_ip }}" state: absent purge: true register: dns_fl_dr + failed_when: false - name: Drop Alarm opentelekomcloud.cloud.ces_alarms: alarm_name: "{{ alarm_name }}" state: absent register: dns_rs_dr - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/css_cluster/tasks/main.yaml b/tests/integration/targets/css_cluster/tasks/main.yaml index 48e78517..07659786 100644 --- a/tests/integration/targets/css_cluster/tasks/main.yaml +++ b/tests/integration/targets/css_cluster/tasks/main.yaml @@ -1,17 +1,18 @@ --- -- module_defaults: +- name: CSS Cluster tests + module_defaults: opentelekomcloud.cloud.css_cluster: cloud: "{{ test_cloud }}" vars: prefix: test-a- block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" short_prefix: "{{ (prefix + (99999999999 | random | to_uuid | hash('md5') | truncate(12,end='') )) }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '-test-network') }}" subnet_name: "{{ ( prefix + '-test-subnet') }}" router_name: "{{ ( prefix + '-test-router') }}" @@ -81,40 +82,44 @@ register: cluster - name: assert result - assert: + ansible.builtin.assert: that: - cluster is success - cluster is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop cluster opentelekomcloud.cloud.css_cluster: name: "{{ css_cluster_name }}" state: "absent" + failed_when: false - name: Drop router openstack.cloud.router: cloud: "{{ test_cloud }}" name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: cloud: "{{ test_cloud }}" name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent + failed_when: false - name: Drop security group openstack.cloud.security_group: cloud: "{{ test_cloud }}" state: absent name: "{{ security_group_name }}" - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/css_cluster_info/tasks/main.yaml b/tests/integration/targets/css_cluster_info/tasks/main.yaml index 74d506f5..98b98047 100644 --- a/tests/integration/targets/css_cluster_info/tasks/main.yaml +++ b/tests/integration/targets/css_cluster_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: CSS Cluster Info tests + module_defaults: opentelekomcloud.cloud.css_cluster_info: cloud: "{{ test_cloud }}" block: @@ -10,7 +11,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/css_snapshot/tasks/main.yml b/tests/integration/targets/css_snapshot/tasks/main.yml index a8c16f1a..01a04439 100644 --- a/tests/integration/targets/css_snapshot/tasks/main.yml +++ b/tests/integration/targets/css_snapshot/tasks/main.yml @@ -1,17 +1,18 @@ --- -- module_defaults: - css_snapshot: +- name: CSS Snapshot tests + module_defaults: + opentelekomcloud.cloud.css_snapshot: cloud: "{{ test_cloud }}" vars: prefix: css_cluster_snapshot_test block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ (prefix + (99999999 | random | to_uuid | hash('md5'))) }}" short_prefix: "{{ (prefix + (99999999999 | random | to_uuid | hash('md5') | truncate(12,end='') )) }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '-test-network') }}" subnet_name: "{{ ( prefix + '-test-subnet') }}" router_name: "{{ ( prefix + '-test-router') }}" @@ -88,7 +89,7 @@ register: snapshot - name: assert result - assert: + ansible.builtin.assert: that: - snapshot is success - snapshot is not changed @@ -102,7 +103,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - created_snapshot is success - created_snapshot is not changed @@ -114,7 +115,7 @@ register: created_snapshot - name: assert result - assert: + ansible.builtin.assert: that: - created_snapshot is success - created_snapshot is changed @@ -129,7 +130,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - deleted_snapshot is not changed - deleted_snapshot is success @@ -142,14 +143,14 @@ register: deleted_snapshot - name: assert result - assert: + ansible.builtin.assert: that: - deleted_snapshot is changed - deleted_snapshot is success always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop cluster opentelekomcloud.cloud.css_cluster: cloud: "{{ test_cloud }}" @@ -179,4 +180,4 @@ cloud: "{{ test_cloud }}" state: absent name: "{{ security_group_name }}" - ignore_errors: true + ignore_errors: false diff --git a/tests/integration/targets/css_snapshot_info/tasks/main.yaml b/tests/integration/targets/css_snapshot_info/tasks/main.yaml index 5f2bea17..24d26d98 100644 --- a/tests/integration/targets/css_snapshot_info/tasks/main.yaml +++ b/tests/integration/targets/css_snapshot_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: CSS Snapshot Info tests + module_defaults: opentelekomcloud.cloud.css_snapshot_info: cloud: "{{ test_cloud }}" block: @@ -10,7 +11,7 @@ ignore_errors: true - name: assert result - assert: + ansible.builtin.assert: that: - result is not success - result is not changed diff --git a/tests/integration/targets/dds_datastore_info/tasks/main.yaml b/tests/integration/targets/dds_datastore_info/tasks/main.yaml index d8d99941..69ed3f99 100644 --- a/tests/integration/targets/dds_datastore_info/tasks/main.yaml +++ b/tests/integration/targets/dds_datastore_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: DDS Datastore Info tests + module_defaults: opentelekomcloud.cloud.dds_datastore_info: cloud: "{{ test_cloud }}" block: @@ -9,7 +10,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/dds_flavor_info/tasks/main.yaml b/tests/integration/targets/dds_flavor_info/tasks/main.yaml index 474b13c1..b970b430 100644 --- a/tests/integration/targets/dds_flavor_info/tasks/main.yaml +++ b/tests/integration/targets/dds_flavor_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: DDS Flavor Info tests + module_defaults: opentelekomcloud.cloud.dds_flavor_info: cloud: "{{ test_cloud }}" block: @@ -9,7 +10,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/dds_instance/tasks/main.yaml b/tests/integration/targets/dds_instance/tasks/main.yaml index 1f766577..28f68801 100644 --- a/tests/integration/targets/dds_instance/tasks/main.yaml +++ b/tests/integration/targets/dds_instance/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- module_defaults: +- name: DDS Instance tests + module_defaults: opentelekomcloud.cloud.dds_instance: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + 'dds_test-network') }}" subnet_name: "{{ ( prefix + 'dds_test-subnet') }}" router_name: "{{ ( prefix + 'dds_test-router') }}" @@ -23,7 +24,7 @@ register: dds - name: assert result - assert: + ansible.builtin.assert: that: - dds is success - dds is not changed @@ -80,34 +81,37 @@ state: present - name: assert result - assert: + ansible.builtin.assert: that: - obj is success - obj is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop instance opentelekomcloud.cloud.dds_instance: name: "{{ instance_name }}" state: "absent" + failed_when: false - name: Drop router openstack.cloud.router: cloud: "{{ test_cloud }}" name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: cloud: "{{ test_cloud }}" name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/dds_instance_info/tasks/main.yaml b/tests/integration/targets/dds_instance_info/tasks/main.yaml index dcc7c25f..afc46573 100644 --- a/tests/integration/targets/dds_instance_info/tasks/main.yaml +++ b/tests/integration/targets/dds_instance_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: DDS Instance Info tests + module_defaults: opentelekomcloud.cloud.dds_instance_info: cloud: "{{ test_cloud }}" block: @@ -8,7 +9,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/deh_host/tasks/main.yaml b/tests/integration/targets/deh_host/tasks/main.yaml index 622b130d..c213e587 100644 --- a/tests/integration/targets/deh_host/tasks/main.yaml +++ b/tests/integration/targets/deh_host/tasks/main.yaml @@ -1,22 +1,23 @@ --- # author: @tischrei -- module_defaults: - openstack.cloud.deh_host: +- name: DeH Host tests + module_defaults: + opentelekomcloud.cloud.deh_host: cloud: "{{ test_cloud }}" - openstack.cloud.deh_host_info: + opentelekomcloud.cloud.deh_host_info: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: deh_host_name: "{{ ( prefix + '_deh-host') }}" - name: Allocate DeH host - check_mode - deh_host: + opentelekomcloud.cloud.deh_host: availability_zone: eu-de-01 host_type: s2-medium name: "{{ deh_host_name }}" @@ -31,13 +32,13 @@ register: deh_ch - name: assert result - assert: + ansible.builtin.assert: that: - deh_ch is success - deh_ch is changed - name: Allocate DeH host - deh_host: + opentelekomcloud.cloud.deh_host: availability_zone: eu-de-01 host_type: s2-medium name: "{{ deh_host_name }}" @@ -51,13 +52,13 @@ register: deh - name: assert result - assert: + ansible.builtin.assert: that: - deh is success - deh.deh_host.dedicated_host_ids[0] is defined - name: Modify DeH host - check mode - deh_host: + opentelekomcloud.cloud.deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" auto_placement: false check_mode: true @@ -66,12 +67,12 @@ register: deh_ch - name: assert result - assert: + ansible.builtin.assert: that: - deh_ch is success - name: Modify DeH host - deh_host: + opentelekomcloud.cloud.deh_host: id: "{{ deh.deh_host.dedicated_host_ids[0] }}" auto_placement: false when: @@ -79,29 +80,29 @@ register: deh - name: assert result - assert: + ansible.builtin.assert: that: - deh is success - name: Query not existing ECS on dedicated host - deh_server_info: + opentelekomcloud.cloud.deh_server_info: dedicated_host: "{{ deh.deh_host.id }}" when: - deh is defined register: server - name: assert result - assert: + ansible.builtin.assert: that: - server is success - server.deh_servers is defined always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop existing DeH host - deh_host: + opentelekomcloud.cloud.deh_host: name: "{{ deh.deh_host.name }}" state: absent register: deh - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/deh_host_info/tasks/main.yaml b/tests/integration/targets/deh_host_info/tasks/main.yaml index feba136f..86f22ba8 100644 --- a/tests/integration/targets/deh_host_info/tasks/main.yaml +++ b/tests/integration/targets/deh_host_info/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- block: +- name: DeH Host Info tests + block: - name: Get Dedicated host info - deh_host_info: + opentelekomcloud.cloud.deh_host_info: cloud: "{{ test_cloud }}" register: deh - name: debug deh - debug: + ansible.builtin.debug: var: deh.deh_hosts - name: assert result - assert: + ansible.builtin.assert: that: - deh is success - deh is not changed diff --git a/tests/integration/targets/deh_host_type_info/tasks/main.yaml b/tests/integration/targets/deh_host_type_info/tasks/main.yaml index 52486951..0deac38d 100644 --- a/tests/integration/targets/deh_host_type_info/tasks/main.yaml +++ b/tests/integration/targets/deh_host_type_info/tasks/main.yaml @@ -1,17 +1,18 @@ --- -- block: +- name: DeH Host Type Info tests + block: - name: Get DeH host types in AZ - deh_host_type_info: + opentelekomcloud.cloud.deh_host_type_info: cloud: "{{ test_cloud }}" az: eu-de-01 register: deh - name: debug deh - debug: + ansible.builtin.debug: var: deh.deh_host_types - name: assert result - assert: + ansible.builtin.assert: that: - deh is success - deh is not changed diff --git a/tests/integration/targets/dms/tasks/main.yaml b/tests/integration/targets/dms/tasks/main.yaml index da07de43..a02fee7d 100644 --- a/tests/integration/targets/dms/tasks/main.yaml +++ b/tests/integration/targets/dms/tasks/main.yaml @@ -1,11 +1,11 @@ -- name: Doing Integration test +- name: DMS tests block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set facts - set_fact: + ansible.builtin.set_fact: queue_name: "{{ ( 'a' + prefix + '-queue' ) }}" group_name: "{{ ( 'group_test' ) }}" instance_name: "{{ ( 'a' + prefix + '-instance' ) }}" @@ -105,7 +105,8 @@ register: dms_sg always: - - block: + - name: Cleanup + block: - name: DMS Delete Queue Group opentelekomcloud.cloud.dms_queue_group: queue_name: '{{ queue_name }}' @@ -113,6 +114,7 @@ state: absent register: dms_queue_group_rm check_mode: false + failed_when: false - name: Delete Queue opentelekomcloud.cloud.dms_queue: @@ -120,28 +122,32 @@ state: absent register: dms_queue_rm check_mode: false + failed_when: false - name: Delete Security Group openstack.cloud.security_group: name: "{{ sg_name }}" state: absent register: dms_sg + failed_when: false - name: Drop existing Router openstack.cloud.router: name: "{{ router_name }}" state: absent register: dns_rout_dr + failed_when: false - name: Drop existing subnet openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent register: dns_subnet_dr + failed_when: false - name: Drop existing network openstack.cloud.network: name: "{{ network_name }}" state: absent register: dns_net_dr - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/dns/tasks/main.yaml b/tests/integration/targets/dns/tasks/main.yaml index e492cc99..dab0bac5 100644 --- a/tests/integration/targets/dns/tasks/main.yaml +++ b/tests/integration/targets/dns/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: Doing Integration test +- name: DNS tests block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Assigning Floating IP @@ -11,7 +11,7 @@ register: fl - name: Set facts - set_fact: + ansible.builtin.set_fact: fl_ip: "{{ fl.floating_ip.floating_ip_address }}" ptrdname: "{{ ( prefix + 'dns.com.' ) }}" description: "{{ ( prefix + 'description-dns' ) }}" @@ -57,7 +57,7 @@ register: dns_fl_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_fl_ch is success - dns_fl_ch is changed @@ -70,11 +70,11 @@ register: dns_fl - name: debug - debug: + ansible.builtin.debug: var: dns_fl.ptr - name: assert result - assert: + ansible.builtin.assert: that: - dns_fl is success - dns_fl.ptr is defined @@ -89,7 +89,7 @@ register: dns_fl_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_fl_ch is success - dns_fl_ch is changed @@ -103,11 +103,11 @@ register: dns_fl - name: debug - debug: + ansible.builtin.debug: var: dns_fl.ptr - name: assert result - assert: + ansible.builtin.assert: that: - dns_fl is success - dns_fl.ptr.description is defined @@ -120,7 +120,7 @@ register: dns_zo_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_ch is success - dns_zo_ch is changed @@ -132,11 +132,11 @@ register: dns_zo - name: debug - debug: + ansible.builtin.debug: var: dns_zo.zone - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo is success - dns_zo.zone is defined @@ -150,7 +150,7 @@ register: dns_zo_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_ch is success - dns_zo_ch is changed @@ -163,11 +163,11 @@ register: dns_zo - name: debug - debug: + ansible.builtin.debug: var: dns_zo.zone - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo is success - dns_zo.zone.description is defined @@ -182,7 +182,7 @@ register: dns_zo_pr_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_pr_ch is success - dns_zo_pr_ch is changed @@ -196,11 +196,11 @@ register: dns_zo_pr - name: debug - debug: + ansible.builtin.debug: var: dns_zo_pr.zone - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_pr is success - dns_zo_pr.zone is defined @@ -214,7 +214,7 @@ register: dns_zo_pr_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_pr_ch is success - dns_zo_pr_ch is changed @@ -227,11 +227,11 @@ register: dns_zo_pr - name: debug - debug: + ansible.builtin.debug: var: dns_zo_pr.zone - name: assert result - assert: + ansible.builtin.assert: that: - dns_zo_pr is success - dns_zo_pr.zone.description is defined @@ -242,7 +242,7 @@ register: dns_ns - name: assert result - assert: + ansible.builtin.assert: that: - dns_ns is success - dns_ns.nameservers[0].hostname is defined @@ -253,7 +253,7 @@ register: dns_ns - name: assert result - assert: + ansible.builtin.assert: that: - dns_ns is success - dns_ns.nameservers[0].address is defined @@ -271,7 +271,7 @@ register: dns_rs_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_rs_ch is success - dns_rs_ch is changed @@ -288,11 +288,11 @@ register: dns_rs - name: debug - debug: + ansible.builtin.debug: var: dns_rs.recordset - name: assert result - assert: + ansible.builtin.assert: that: - dns_rs is success - dns_rs.recordset is defined @@ -311,7 +311,7 @@ register: dns_rs_ch - name: assert result - assert: + ansible.builtin.assert: that: - dns_rs_ch is success - dns_rs_ch is changed @@ -329,22 +329,24 @@ register: dns_rs - name: debug - debug: + ansible.builtin.debug: var: dns_rs.recordset - name: assert result - assert: + ansible.builtin.assert: that: - dns_rs is success - dns_rs.recordset.description is defined always: - - block: + - name: Cleanup + block: - name: Drop dns_floating_ip entry opentelekomcloud.cloud.dns_floating_ip: floating_ip: "{{ fl_ip }}" state: absent register: dns_fl_dr + failed_when: false - name: Dropping DNS Recordset opentelekomcloud.cloud.dns_recordset: @@ -352,12 +354,14 @@ recordset_name: "{{ rs_name }}" state: absent register: dns_rs_dr + failed_when: false - name: Drop DNS public Zone opentelekomcloud.cloud.dns_zone: name: "{{ zone_public_name }}" state: absent register: dns_zo_pu_dr + failed_when: false - name: Drop DNS private Zone opentelekomcloud.cloud.dns_zone: @@ -365,6 +369,7 @@ zone_type: "private" state: absent register: dns_zo_pr_dr + failed_when: false - name: Drop Floating IP opentelekomcloud.cloud.floating_ip: @@ -372,22 +377,25 @@ state: absent purge: true register: fl_dr + failed_when: false - name: Drop existing Router openstack.cloud.router: name: "{{ router_name }}" state: absent register: dns_rout_dr + failed_when: false - name: Drop existing subnet openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent register: dns_subnet_dr + failed_when: false - name: Drop existing network openstack.cloud.network: name: "{{ network_name }}" state: absent register: dns_net_dr - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/dns_recordset_info/tasks/main.yaml b/tests/integration/targets/dns_recordset_info/tasks/main.yaml index 197aaf89..e2ba36f2 100644 --- a/tests/integration/targets/dns_recordset_info/tasks/main.yaml +++ b/tests/integration/targets/dns_recordset_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: DNS Recordset Info tests + module_defaults: opentelekomcloud.cloud.dns_recordset: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.dns_recordset_info: @@ -9,11 +10,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: zone_name: "{{ ( prefix + 'test.zone') }}" recordset_random_name: "{{ ( prefix + 'recordset.' + prefix + 'test.zone') }}" @@ -40,7 +41,7 @@ register: recordsets - name: assert result - assert: + ansible.builtin.assert: that: - recordsets is success - recordsets is not changed @@ -53,24 +54,24 @@ register: rs - name: assert result - assert: + ansible.builtin.assert: that: - rs is success - rs is not changed - rs | length > 0 - always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop created recordset opentelekomcloud.cloud.dns_recordset: recordset_name: "{{ recordset_random_name }}" state: absent zone_id: "{{ dns_zo.zone.id }}" + failed_when: false - name: Drop created DNS zone opentelekomcloud.cloud.dns_zone: name: "{{ dns_zo.zone.name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/floating_ip/tasks/main.yaml b/tests/integration/targets/floating_ip/tasks/main.yaml index b327080f..5b14583b 100644 --- a/tests/integration/targets/floating_ip/tasks/main.yaml +++ b/tests/integration/targets/floating_ip/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- block: +- name: Floating IP tests + block: - name: Allocate floating ip opentelekomcloud.cloud.floating_ip: cloud: "{{ test_cloud }}" @@ -7,15 +8,15 @@ register: fip - name: assert allocation - assert: + ansible.builtin.assert: that: - fip is changed - fip is success - fip.floating_ip.floating_ip_address is defined always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop fip opentelekomcloud.cloud.floating_ip: cloud: "{{ test_cloud }}" @@ -25,7 +26,7 @@ register: drop - name: assert check - assert: + ansible.builtin.assert: that: - drop is changed - drop is success diff --git a/tests/integration/targets/lb_certificate/tasks/main.yaml b/tests/integration/targets/lb_certificate/tasks/main.yaml index c81b7ec9..fc75bdf0 100644 --- a/tests/integration/targets/lb_certificate/tasks/main.yaml +++ b/tests/integration/targets/lb_certificate/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- module_defaults: - lb_certificate: +- name: LoadBalancer Certificate tests + module_defaults: + opentelekomcloud.cloud.lb_certificate: cloud: "{{ test_cloud }}" - lb_certificate_info: + opentelekomcloud.cloud.lb_certificate_info: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: cert_name_srv: "{{ ( prefix + '_acc-srv-lb-cert') }}" cert_name_ca: "{{ ( prefix + '_acc-ca-lb-cert') }}" cert_description: "{{ ( prefix + '_acc-lb-cert-description') }}" @@ -107,7 +108,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success @@ -118,7 +119,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success @@ -135,7 +136,7 @@ register: result_info - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -155,7 +156,7 @@ register: result_info - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -175,7 +176,7 @@ register: result_info - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -189,7 +190,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -208,7 +209,7 @@ register: result_info - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -222,23 +223,24 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop perhaps existing CA cert opentelekomcloud.cloud.lb_certificate: name: "{{ cert_name_ca }}" state: absent register: drop + failed_when: false - name: Drop perhaps existing Server cert opentelekomcloud.cloud.lb_certificate: name: "{{ cert_name_srv }}" state: absent register: drop - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/lb_certificate_info/tasks/main.yaml b/tests/integration/targets/lb_certificate_info/tasks/main.yaml index 37ea9689..9055c02a 100644 --- a/tests/integration/targets/lb_certificate_info/tasks/main.yaml +++ b/tests/integration/targets/lb_certificate_info/tasks/main.yaml @@ -1,26 +1,27 @@ --- -- module_defaults: - lb_certificate_info: +- name: LoadBalancer Certificate Info tests + module_defaults: + opentelekomcloud.cloud.lb_certificate_info: cloud: "{{ test_cloud }}" block: - name: Get ELB Cert info - lb_certificate_info: + opentelekomcloud.cloud.lb_certificate_info: register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed - result.elb_certificates is defined - name: Get ELB Cert with name filter info - lb_certificate_info: + opentelekomcloud.cloud.lb_certificate_info: name: some_fake register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/lb_healtmonitor_info/tasks/main.yaml b/tests/integration/targets/lb_healtmonitor_info/tasks/main.yaml index 6a6d95e5..335174b4 100644 --- a/tests/integration/targets/lb_healtmonitor_info/tasks/main.yaml +++ b/tests/integration/targets/lb_healtmonitor_info/tasks/main.yaml @@ -1,18 +1,19 @@ --- -- module_defaults: - lb_healthmonitor_info: +- name: LoadBalancer HealthMonitor Info tests + module_defaults: + opentelekomcloud.cloud.lb_healthmonitor_info: cloud: "{{ test_cloud }}" block: - name: Get health checks info - lb_healthmonitor_info: + opentelekomcloud.cloud.lb_healthmonitor_info: register: hm - name: debug configs - debug: + ansible.builtin.debug: var: hm.healthmonitors - name: assert result - assert: + ansible.builtin.assert: that: - hm is success - hm is not changed diff --git a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml index edbb3b68..3f338ba3 100644 --- a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml +++ b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: LoadBalancer Certificates tests + module_defaults: opentelekomcloud.cloud.loadbalancer: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.lb_listener: @@ -16,11 +17,11 @@ cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '_acc-lb-network') }}" subnet_name: "{{ ( prefix + '_acc-lb-subnet') }}" router_name: "{{ ( prefix + '_acc-lb-router') }}" @@ -150,7 +151,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success @@ -161,7 +162,7 @@ register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success @@ -179,7 +180,7 @@ register: cert_1_info - name: assert result - assert: + ansible.builtin.assert: that: - cert_1 is success - cert_1 is changed @@ -201,7 +202,7 @@ register: cert_2_info - name: assert result - assert: + ansible.builtin.assert: that: - cert_2 is success - cert_2 is changed @@ -218,7 +219,7 @@ register: lb - name: assert result - assert: + ansible.builtin.assert: that: - lb is success - lb is changed @@ -235,7 +236,7 @@ register: listener - name: assert result - assert: + ansible.builtin.assert: that: - listener is success - listener is changed @@ -252,7 +253,7 @@ register: listener - name: assert result - assert: + ansible.builtin.assert: that: - listener is success - listener is not changed @@ -269,7 +270,7 @@ register: listener - name: assert result - assert: + ansible.builtin.assert: that: - listener is success - listener is changed @@ -281,7 +282,7 @@ register: listener_drop - name: assert result - assert: + ansible.builtin.assert: that: - listener_drop is success - listener_drop is changed @@ -294,7 +295,7 @@ register: loadbalancer_drop - name: assert result - assert: + ansible.builtin.assert: that: - loadbalancer_drop is success - loadbalancer_drop is changed @@ -306,7 +307,7 @@ register: cert_drop_1 - name: assert result - assert: + ansible.builtin.assert: that: - cert_drop_1 is success - cert_drop_1 is changed @@ -318,30 +319,33 @@ register: cert_drop_2 - name: assert result - assert: + ansible.builtin.assert: that: - cert_drop_2 is success - cert_drop_2 is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop perhaps existing Server cert 1 opentelekomcloud.cloud.lb_certificate: name: "{{ cert_name_srv_1 }}" state: absent register: drop + failed_when: false - name: Drop perhaps existing Server cert 2 opentelekomcloud.cloud.lb_certificate: name: "{{ cert_name_srv_2 }}" state: absent register: drop + failed_when: false - name: Drop perhaps existing listener opentelekomcloud.cloud.lb_listener: state: absent name: "{{ listener_name }}" register: drop + failed_when: false - name: Drop perhaps existing loadbalancer opentelekomcloud.cloud.loadbalancer: @@ -349,22 +353,25 @@ state: absent delete_public_ip: true register: drop + failed_when: false - name: Drop existing Router openstack.cloud.os_router: name: "{{ router_name }}" state: absent register: drop + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet_name }}" state: absent register: drop + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network_name }}" state: absent register: drop - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/loadbalancer/tasks/main.yaml b/tests/integration/targets/loadbalancer/tasks/main.yaml index 138dcc23..63c1c8b3 100644 --- a/tests/integration/targets/loadbalancer/tasks/main.yaml +++ b/tests/integration/targets/loadbalancer/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: - loadbalancer: +- name: LoadBalancer tests + module_defaults: + opentelekomcloud.cloud.loadbalancer: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '_acc-lb-network') }}" subnet_name: "{{ ( prefix + '_acc-lb-subnet') }}" router_name: "{{ ( prefix + '_acc-lb-router') }}" @@ -52,7 +53,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - loadbalancer_check is changed @@ -65,7 +66,7 @@ register: lb - name: assert result - assert: + ansible.builtin.assert: that: - lb is success - lb is changed @@ -85,7 +86,7 @@ register: res - name: assert result - assert: + ansible.builtin.assert: that: - res is success - res.loadbalancers is defined @@ -102,7 +103,7 @@ register: lstnr - name: assert result - assert: + ansible.builtin.assert: that: - lstnr is success - lstnr is changed @@ -115,7 +116,7 @@ register: lstnr - name: assert result - assert: + ansible.builtin.assert: that: - lstnr is success - lstnr is changed @@ -126,7 +127,7 @@ register: res - name: assert result - assert: + ansible.builtin.assert: that: - res is success - res.listeners is defined @@ -143,7 +144,7 @@ register: pool - name: assert result - assert: + ansible.builtin.assert: that: - pool is success - pool is changed @@ -158,7 +159,7 @@ register: pool - name: assert result - assert: + ansible.builtin.assert: that: - pool is success - pool is changed @@ -169,7 +170,7 @@ register: res - name: assert result - assert: + ansible.builtin.assert: that: - res is success - res.server_groups is defined @@ -182,7 +183,7 @@ register: dropped - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is changed @@ -194,7 +195,7 @@ register: dropped - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is changed @@ -208,7 +209,7 @@ register: dropped - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is changed @@ -221,36 +222,39 @@ register: dropped - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is not changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop perhaps existing loadbalancer opentelekomcloud.cloud.loadbalancer: name: "{{ lb.loadbalancer.id }}" state: absent delete_public_ip: true register: lb + failed_when: false - name: Drop existing Router openstack.cloud.os_router: name: "{{ router_name }}" state: absent register: lb_net_router + failed_when: false - name: Drop existing subnet openstack.cloud.os_subnet: name: "{{ subnet_name }}" state: absent register: lb_net_subnet + failed_when: false - name: Drop existing network openstack.cloud.os_network: name: "{{ network_name }}" state: absent register: lb_net - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/nat/tasks/main.yaml b/tests/integration/targets/nat/tasks/main.yaml index 0b5a430c..6cb0fe08 100644 --- a/tests/integration/targets/nat/tasks/main.yaml +++ b/tests/integration/targets/nat/tasks/main.yaml @@ -1,7 +1,8 @@ --- # author: @tischrei -- module_defaults: - nat_gateway: +- name: NAT Gateway tests + module_defaults: + opentelekomcloud.cloud.nat_gateway: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.floating_ip: cloud: "{{ test_cloud }}" @@ -10,11 +11,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + '_nat-network') }}" subnet_name: "{{ ( prefix + '_nat-subnet') }}" router_name: "{{ ( prefix + '_nat-router') }}" @@ -50,7 +51,7 @@ register: nat_router - name: Create NAT gateway - check mode - nat_gateway: + opentelekomcloud.cloud.nat_gateway: name: "{{ nat_gateway_name }}" internal_network: "{{ network_name }}" router: "{{ router_name }}" @@ -58,45 +59,45 @@ register: nat_gw_ch - name: assert result - assert: + ansible.builtin.assert: that: - nat_gw_ch is success - nat_gw_ch is changed - name: Create NAT gateway - nat_gateway: + opentelekomcloud.cloud.nat_gateway: name: "{{ nat_gateway_name }}" internal_network: "{{ network_name }}" router: "{{ router_name }}" register: nat_gw - name: assert result - assert: + ansible.builtin.assert: that: - nat_gw is success - nat_gw.gateway.id is defined - name: Add NAT gateway description - check mode - nat_gateway: + opentelekomcloud.cloud.nat_gateway: name: "{{ nat_gw.gateway.name }}" description: test-description check_mode: true register: nat_gw_ch - name: assert result - assert: + ansible.builtin.assert: that: - nat_gw_ch is success - nat_gw_ch is changed - name: Add NAT gateway description - nat_gateway: + opentelekomcloud.cloud.nat_gateway: name: "{{ nat_gw.gateway.name }}" description: test-description register: nat_gw - name: assert result - assert: + ansible.builtin.assert: that: - nat_gw is success - nat_gw.gateway.description is defined @@ -107,13 +108,13 @@ register: fip - name: assert result - assert: + ansible.builtin.assert: that: - fip is success - fip.floating_ip.floating_ip_address is defined - name: Add SNAT rule - check mode - nat_snat_rule: + opentelekomcloud.cloud.nat_snat_rule: nat_gateway: "{{ nat_gw.gateway.name }}" network: "{{ network_name }}" floating_ip: "{{ fip.floating_ip.floating_ip_address }}" @@ -121,38 +122,38 @@ register: snat_ch - name: assert result - assert: + ansible.builtin.assert: that: - snat_ch is success - snat_ch is changed - name: Add SNAT rule - nat_snat_rule: + opentelekomcloud.cloud.nat_snat_rule: nat_gateway: "{{ nat_gw.gateway.name }}" network: "{{ network_name }}" floating_ip: "{{ fip.floating_ip.floating_ip_address }}" register: snat - name: assert result - assert: + ansible.builtin.assert: that: - snat is success - snat.snat_rule.id is defined - - name: Launch a server instance - openstack.cloud.server: - name: "{{ server_name }}" - image: "{{ image_name }}" - network: "{{ network_name }}" - flavor: "{{ server_flavor }}" - auto_ip: false - register: server - - - name: assert result - assert: - that: - - server is success - - server.server.id is defined + # - name: Launch a server instance + # openstack.cloud.server: + # name: "{{ server_name }}" + # image: "{{ image_name }}" + # network: "{{ network_name }}" + # flavor: "{{ server_flavor }}" + # auto_ip: false + # register: server + + # - name: assert result + # ansible.builtin.assert: + # that: + # - server is success + # - server.server.id is defined - name: Allocate EIP for DNAT rule opentelekomcloud.cloud.floating_ip: @@ -160,56 +161,61 @@ register: fip2 - name: assert result - assert: + ansible.builtin.assert: that: - fip2 is success - fip2.floating_ip.floating_ip_address is defined - name: Add DNAT rule - check mode - nat_dnat_rule: + opentelekomcloud.cloud.nat_dnat_rule: nat_gateway: "{{ nat_gw.gateway.name }}" floating_ip: "{{ fip2.floating_ip.floating_ip_address }}" internal_service_port: 80 external_service_port: 80 protocol: tcp - private_ip: "{{ server.server.private_v4 }}" + # private_ip: "{{ server.server.private_v4 }}" + # Temporarily switch to random IP address until we provision server + private_ip: "192.168.0.2" check_mode: true register: dnat_ch - name: assert result - assert: + ansible.builtin.assert: that: - dnat_ch is success - dnat_ch is changed - name: Add DNAT rule - nat_dnat_rule: + opentelekomcloud.cloud.nat_dnat_rule: nat_gateway: "{{ nat_gw.gateway.name }}" floating_ip: "{{ fip2.floating_ip.floating_ip_address }}" internal_service_port: 80 external_service_port: 80 protocol: tcp - private_ip: "{{ server.server.private_v4 }}" + # private_ip: "{{ server.server.private_v4 }}" + # Temporarily switch to random IP address until we provision server + private_ip: "192.168.0.2" register: dnat - name: assert result - assert: + ansible.builtin.assert: that: - dnat is success - dnat.dnat_rule.id is defined always: - - block: - # Cleanup + - name: Cleanup + block: - name: List SNAT rules of gateway - nat_snat_rule_info: + opentelekomcloud.cloud.nat_snat_rule_info: gateway: "{{ nat_gateway_name }}" when: - nat_gw.gateway.id is defined register: snat_rules + failed_when: false - name: Drop SNAT rules - nat_snat_rule: + opentelekomcloud.cloud.nat_snat_rule: id: "{{ item.id }}" state: absent loop: "{{ snat_rules.snat_rules }}" @@ -217,6 +223,7 @@ - nat_gw.gateway.id is defined - snat_rules.snat_rules is defined register: snat + failed_when: false - name: Drop EIP for snat rule opentelekomcloud.cloud.floating_ip: @@ -225,16 +232,18 @@ state: absent when: fip is defined register: fip + failed_when: false - name: List DNAT rules of gateway - nat_dnat_rule_info: + opentelekomcloud.cloud.nat_dnat_rule_info: gateway: "{{ nat_gateway_name }}" when: - nat_gw.gateway.id is defined register: dnat_rules + failed_when: false - name: Drop DNAT rules - nat_dnat_rule: + opentelekomcloud.cloud.nat_dnat_rule: id: "{{ item.id }}" state: absent loop: "{{ dnat_rules.dnat_rules }}" @@ -242,6 +251,7 @@ - nat_gw.gateway.id is defined - dnat_rules.dnat_rules is defined register: dnat + failed_when: false - name: Drop EIP for dnat rule opentelekomcloud.cloud.floating_ip: @@ -250,34 +260,39 @@ state: absent when: fip2 is defined register: fip2 + failed_when: false - - name: Drop server instance - openstack.cloud.server: - name: "{{ server_name }}" - state: absent - register: server + # - name: Drop server instance + # openstack.cloud.server: + # name: "{{ server_name }}" + # state: absent + # register: server + # failed_when: false - name: Drop existing NAT gateway - nat_gateway: + opentelekomcloud.cloud.nat_gateway: name: "{{ nat_gateway_name }}" state: absent register: nat_gw + failed_when: false - name: Drop existing Router openstack.cloud.router: name: "{{ router_name }}" state: absent register: nat_router + failed_when: false - name: Drop existing subnet openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent register: nat_subnet + failed_when: false - name: Drop existing network openstack.cloud.network: name: "{{ network_name }}" state: absent register: nat_net - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/nat_dnat_rule_info/tasks/main.yaml b/tests/integration/targets/nat_dnat_rule_info/tasks/main.yaml index 03bd7b9b..e40bdf17 100644 --- a/tests/integration/targets/nat_dnat_rule_info/tasks/main.yaml +++ b/tests/integration/targets/nat_dnat_rule_info/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- block: +- name: Nat DNAT Rule Info tests + block: - name: Get DNAT rule info - nat_dnat_rule_info: + opentelekomcloud.cloud.nat_dnat_rule_info: cloud: "{{ test_cloud }}" register: dn - name: debug configs - debug: + ansible.builtin.debug: var: dn.dnat_rules - name: assert result - assert: + ansible.builtin.assert: that: - dn is success - dn is not changed diff --git a/tests/integration/targets/nat_gateway_info/tasks/main.yaml b/tests/integration/targets/nat_gateway_info/tasks/main.yaml index c7529684..da2db89d 100644 --- a/tests/integration/targets/nat_gateway_info/tasks/main.yaml +++ b/tests/integration/targets/nat_gateway_info/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- block: +- name: Nat Gateway Info tests + block: - name: Get NAT gateway info - nat_gateway_info: + opentelekomcloud.cloud.nat_gateway_info: cloud: "{{ test_cloud }}" register: gw - name: debug configs - debug: + ansible.builtin.debug: var: gw.nat_gateways - name: assert result - assert: + ansible.builtin.assert: that: - gw is success - gw is not changed diff --git a/tests/integration/targets/nat_snat_rule_info/tasks/main.yaml b/tests/integration/targets/nat_snat_rule_info/tasks/main.yaml index a2a84918..fb279723 100644 --- a/tests/integration/targets/nat_snat_rule_info/tasks/main.yaml +++ b/tests/integration/targets/nat_snat_rule_info/tasks/main.yaml @@ -1,16 +1,17 @@ --- -- block: +- name: Nat SNAT Rule Info tests + block: - name: Get SNAT rule info - nat_snat_rule_info: + opentelekomcloud.cloud.nat_snat_rule_info: cloud: "{{ test_cloud }}" register: sn - name: debug configs - debug: + ansible.builtin.debug: var: sn.snat_rules - name: assert result - assert: + ansible.builtin.assert: that: - sn is success - sn is not changed diff --git a/tests/integration/targets/object_info/tasks/main.yaml b/tests/integration/targets/object_info/tasks/main.yaml index 807fae86..6684b15b 100644 --- a/tests/integration/targets/object_info/tasks/main.yaml +++ b/tests/integration/targets/object_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: Object Info tests + module_defaults: opentelekomcloud.cloud.object_info: cloud: "{{ test_cloud }}" block: @@ -8,7 +9,7 @@ register: containers - name: assert result - assert: + ansible.builtin.assert: that: - containers is success - containers.swift.containers is defined diff --git a/tests/integration/targets/prepare_tests/tasks/main.yaml b/tests/integration/targets/prepare_tests/tasks/main.yaml index 534f008f..56a0774a 100644 --- a/tests/integration/targets/prepare_tests/tasks/main.yaml +++ b/tests/integration/targets/prepare_tests/tasks/main.yaml @@ -1,5 +1,5 @@ --- -- name: set fact - set_fact: - # otc_cloud: "{{ lookup('env', 'OS_CLOUD') | default('otc') }}" - # otc_cloud: otc +# - name: set fact +# ansible.builtin.set_fact: +# otc_cloud: "{{ lookup('env', 'OS_CLOUD') | default('otc') }}" +# otc_cloud: otc diff --git a/tests/integration/targets/rds_backup/tasks/main.yaml b/tests/integration/targets/rds_backup/tasks/main.yaml index 78e98358..f594b7c8 100644 --- a/tests/integration/targets/rds_backup/tasks/main.yaml +++ b/tests/integration/targets/rds_backup/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: RDS Backup tests + module_defaults: opentelekomcloud.cloud.rds_backup: cloud: "{{ test_cloud }}" block: @@ -12,7 +13,7 @@ ignore_errors: true - name: assert result - assert: + ansible.builtin.assert: that: - rds_backup is not success - rds_backup is not changed diff --git a/tests/integration/targets/rds_backup_info/tasks/main.yaml b/tests/integration/targets/rds_backup_info/tasks/main.yaml index e022fa27..b0b89440 100644 --- a/tests/integration/targets/rds_backup_info/tasks/main.yaml +++ b/tests/integration/targets/rds_backup_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: RDS Backup Info tests + module_defaults: opentelekomcloud.cloud.rds_backup_info: cloud: "{{ test_cloud }}" block: @@ -7,23 +8,9 @@ opentelekomcloud.cloud.rds_backup_info: instance: register: rds_backup_info - ignore_errors: true - - - name: assert result - assert: - that: - - rds_backup_info is not success - - rds_backup_info is not changed - - 'rds_backup_info.msg == "RDS instance is missing"' + failed_when: 'rds_backup_info.msg != "RDS instance is missing"' - name: Get error message that required parameter is missing opentelekomcloud.cloud.rds_backup_info: register: rds_backup_info - ignore_errors: true - - - name: assert result - assert: - that: - - rds_backup_info is not success - - rds_backup_info is not changed - - 'rds_backup_info.msg == "missing required arguments: instance"' + failed_when: 'rds_backup_info.msg != "missing required arguments: instance"' diff --git a/tests/integration/targets/rds_datastore_info/tasks/main.yaml b/tests/integration/targets/rds_datastore_info/tasks/main.yaml index 1cc806ce..6bb5c3c1 100644 --- a/tests/integration/targets/rds_datastore_info/tasks/main.yaml +++ b/tests/integration/targets/rds_datastore_info/tasks/main.yaml @@ -1,14 +1,14 @@ --- -- module_defaults: +- name: RDS Datastore Info tests block: - name: Get datastore info - rds_datastore_info: + opentelekomcloud.cloud.rds_datastore_info: cloud: "{{ test_cloud }}" datastore: "postgresql" register: rds - name: assert result - assert: + ansible.builtin.assert: that: - rds is success - rds is not changed diff --git a/tests/integration/targets/rds_flavor_info/tasks/main.yaml b/tests/integration/targets/rds_flavor_info/tasks/main.yaml index ce91fc9b..476b0949 100644 --- a/tests/integration/targets/rds_flavor_info/tasks/main.yaml +++ b/tests/integration/targets/rds_flavor_info/tasks/main.yaml @@ -1,18 +1,18 @@ --- -- module_defaults: - group/otc: - cloud: otc +- name: RDS Flavor Info tests + module_defaults: + opentelekomcloud.cloud.rds_flavor_info: + cloud: "{{ test_cloud }}" block: - name: Get flavor info - rds_flavor_info: - cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.rds_flavor_info: datastore: "postgresql" version: "10" instance_mode: "single" register: rds - name: assert result - assert: + ansible.builtin.assert: that: - rds is success - rds is not changed @@ -24,17 +24,9 @@ - rds.rds_flavors[0]['vcpus'] is defined - name: Get flavor info for wrong ds - rds_flavor_info: - cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.rds_flavor_info: datastore: "postgresql1" version: "10" instance_mode: "single" register: rds - ignore_errors: true - - - name: assert result - assert: - that: - - rds is not success - - rds is not changed - - 'rds.msg == "value of datastore must be one of: mysql, postgresql, sqlserver, got: postgresql1"' + failed_when: 'rds.msg != "value of datastore must be one of: mysql, postgresql, sqlserver, got: postgresql1"' diff --git a/tests/integration/targets/rds_instance/tasks/main.yaml b/tests/integration/targets/rds_instance/tasks/main.yaml index fcf210a9..0e095026 100644 --- a/tests/integration/targets/rds_instance/tasks/main.yaml +++ b/tests/integration/targets/rds_instance/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: +- name: RDS Instance tests + module_defaults: opentelekomcloud.cloud.rds_instance: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name: "{{ ( prefix + 'rds_test-network') }}" subnet_name: "{{ ( prefix + 'rds_test-subnet') }}" router_name: "{{ ( prefix + 'rds_test-router') }}" @@ -16,13 +17,13 @@ rds_flavor: "rds.mysql.s1.medium" - name: Delete missing instance - rds_instance: + opentelekomcloud.cloud.rds_instance: state: absent name: "definitely_missing_instance" register: rds - name: assert result - assert: + ansible.builtin.assert: that: - rds is success - rds is not changed @@ -74,34 +75,37 @@ register: obj - name: assert result - assert: + ansible.builtin.assert: that: - obj is success - obj is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop instance opentelekomcloud.cloud.rds_instance: name: "{{ instance_name }}" state: "absent" + failed_when: false - name: Drop router openstack.cloud.router: cloud: "{{ test_cloud }}" name: "{{ router_name }}" state: absent + failed_when: false - name: Drop subnet openstack.cloud.subnet: cloud: "{{ test_cloud }}" name: "{{ subnet_name }}" state: absent + failed_when: false - name: Drop network openstack.cloud.network: cloud: "{{ test_cloud }}" name: "{{ network_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/rds_instance_info/tasks/main.yaml b/tests/integration/targets/rds_instance_info/tasks/main.yaml index c9e35d98..d8d308ae 100644 --- a/tests/integration/targets/rds_instance_info/tasks/main.yaml +++ b/tests/integration/targets/rds_instance_info/tasks/main.yaml @@ -1,54 +1,55 @@ --- -- module_defaults: - rds_instance_info: +- name: RDS Instance Info tests + module_defaults: + opentelekomcloud.cloud.rds_instance_info: cloud: "{{ test_cloud }}" block: - name: List instances without queries - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: register: rds - name: assert result - assert: + ansible.builtin.assert: that: - rds is success - rds is not changed - name: List pg instances - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: datastore_type: postgresql - name: List mysql instances - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: datastore_type: mysql - name: List sqlserver instances - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: datastore_type: sqlserver - name: List instances with name filter - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: name: some_name - name: List instances with single type filter - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: instance_type: single - name: List instances with ha type filter - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: instance_type: ha - name: List instances with replica type filter - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: instance_type: replica - name: List instances with net filters - rds_instance_info: + opentelekomcloud.cloud.rds_instance_info: network: admin_external_net router: some_fake_router register: rds - name: assert result - assert: + ansible.builtin.assert: that: - rds is success - rds is not changed diff --git a/tests/integration/targets/security_group/tasks/main.yaml b/tests/integration/targets/security_group/tasks/main.yaml index f790044c..04882059 100644 --- a/tests/integration/targets/security_group/tasks/main.yaml +++ b/tests/integration/targets/security_group/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: +- name: Security Group tests + module_defaults: opentelekomcloud.cloud.security_group: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: security_group_name: "{{ ( prefix + 'security_group') }}" - name: Create security group @@ -31,14 +32,14 @@ register: sg - name: assert result - assert: + ansible.builtin.assert: that: - sg is success - sg is changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop security group opentelekomcloud.cloud.security_group: name: "{{ security_group_name }}" diff --git a/tests/integration/targets/security_group_info/tasks/main.yaml b/tests/integration/targets/security_group_info/tasks/main.yaml index 0d7819ce..ff7851fd 100644 --- a/tests/integration/targets/security_group_info/tasks/main.yaml +++ b/tests/integration/targets/security_group_info/tasks/main.yaml @@ -1,15 +1,16 @@ --- # author: @tischrei -- module_defaults: +- name: Security Group Info tests + module_defaults: opentelekomcloud.cloud.security_group_info: cloud: "{{ test_cloud }}" block: - name: List security groups - security_group_info: + opentelekomcloud.cloud.security_group_info: register: sg - name: assert result - assert: + ansible.builtin.assert: that: - sg is success - sg.security_groups is defined diff --git a/tests/integration/targets/server_group_info/tasks/main.yaml b/tests/integration/targets/server_group_info/tasks/main.yaml index f80d233a..cc55e916 100644 --- a/tests/integration/targets/server_group_info/tasks/main.yaml +++ b/tests/integration/targets/server_group_info/tasks/main.yaml @@ -1,15 +1,16 @@ --- # author: @tischrei -- module_defaults: +- name: Server Group Info tests + module_defaults: opentelekomcloud.cloud.server_group_info: cloud: "{{ test_cloud }}" block: - name: List server groups - server_group_info: + opentelekomcloud.cloud.server_group_info: register: sg - name: assert result - assert: + ansible.builtin.assert: that: - sg is success - sg.server_groups is defined diff --git a/tests/integration/targets/subnet/tasks/main.yaml b/tests/integration/targets/subnet/tasks/main.yaml index 0efbc370..82625b40 100644 --- a/tests/integration/targets/subnet/tasks/main.yaml +++ b/tests/integration/targets/subnet/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: Subnet tests + module_defaults: opentelekomcloud.cloud.vpc: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.subnet: @@ -7,11 +8,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: vpc_name: "{{ ( prefix + '_vpc') }}" subnet_name: "test-subnet" cidr: "192.168.0.0/24" @@ -36,7 +37,7 @@ register: subnet - name: Assert result - assert: + ansible.builtin.assert: that: - subnet is success - subnet is changed @@ -56,7 +57,7 @@ register: subnet_check_mode - name: Assert check result - assert: + ansible.builtin.assert: that: - subnet_check_mode is success - subnet_check_mode is not changed @@ -73,7 +74,7 @@ register: updated_subnet - name: Assert result - assert: + ansible.builtin.assert: that: - updated_subnet is success - updated_subnet is changed @@ -87,7 +88,7 @@ register: deleted_subnet - name: Assert result - assert: + ansible.builtin.assert: that: - deleted_subnet is success - deleted_subnet is changed @@ -101,7 +102,7 @@ check_mode: true - name: Assert result - assert: + ansible.builtin.assert: that: - deleted_subnet_check is success - deleted_subnet_check is not changed diff --git a/tests/integration/targets/subnet_info/tasks/main.yaml b/tests/integration/targets/subnet_info/tasks/main.yaml index 6b5617c2..03d94cf0 100644 --- a/tests/integration/targets/subnet_info/tasks/main.yaml +++ b/tests/integration/targets/subnet_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: Subnet Info tests + module_defaults: opentelekomcloud.cloud.vpc: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.vpc_info: @@ -11,11 +12,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: vpc_name: "{{ ( prefix + 'vpc') }}" subnet_name: "{{ ( prefix + 'subnet') }}" cidr: "192.168.0.0/24" @@ -45,7 +46,7 @@ register: all_subnets - name: assert result - assert: + ansible.builtin.assert: that: - all_subnets is success - all_subnets is not changed @@ -57,23 +58,24 @@ register: new_subnet - name: assert result - assert: + ansible.builtin.assert: that: - new_subnet is success - new_subnet is not changed - new_subnet | length > 0 always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop created subnet opentelekomcloud.cloud.subnet: name: "{{ subnet_name }}" vpc: "{{ vpc_name }}" state: absent + failed_when: false - name: Drop created vpc opentelekomcloud.cloud.vpc: name: "{{ vpc_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/volume_backup/tasks/aliases b/tests/integration/targets/volume_backup/aliases similarity index 100% rename from tests/integration/targets/volume_backup/tasks/aliases rename to tests/integration/targets/volume_backup/aliases diff --git a/tests/integration/targets/volume_backup/tasks/main.yaml b/tests/integration/targets/volume_backup/tasks/main.yaml index cf97e01f..2f83696c 100644 --- a/tests/integration/targets/volume_backup/tasks/main.yaml +++ b/tests/integration/targets/volume_backup/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: Volume Backup tests + module_defaults: openstack.cloud.volume: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.volume_backup: @@ -8,14 +9,13 @@ cloud: "{{ test_cloud }}" openstack.cloud.volume_snapshot: cloud: "{{ test_cloud }}" - block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: display_name: "{{ ( prefix + '-test-volume') }}" backup_name: "{{ ( prefix + '-test-backup') }}" availability_zone: "eu-de-01" @@ -29,7 +29,7 @@ register: vol - name: 2 assert result of volume creating - assert: + ansible.builtin.assert: that: - vol is success - vol is changed @@ -43,7 +43,7 @@ register: bckp - name: 4 assert result - assert: + ansible.builtin.assert: that: - bckp is success - bckp is changed @@ -56,39 +56,36 @@ register: snapshot - name: 6 assert result for snapshot - assert: + ansible.builtin.assert: that: - snapshot is success - snapshot.volume_snapshots is defined - snapshot.volume_snapshots | length > 0 - name: 7 set fact to found proper snapshot - set_fact: + ansible.builtin.set_fact: snapshot_name: "{{ snapshot['volume_snapshots'][0]['name'] }}" always: - - block: + - name: Cleanup + block: - name: 8 delete snapshot openstack.cloud.volume_snapshot: volume: "{{ display_name }}" name: "{{ snapshot_name }}" state: absent register: delsh - - - name: 9 assert result of deliting snapshot - assert: - that: - - delsh is success - - delsh is changed + when: "snapshot_name is defined" - name: 10 delete backup opentelekomcloud.cloud.volume_backup: name: "{{ backup_name }}" state: absent register: delbckp + when: "backup_name is defined" - name: 11 assert result of deliting - assert: + ansible.builtin.assert: that: - delbckp is success - delbckp is not changed @@ -100,8 +97,7 @@ register: delvol - name: 13 assert result of deleting volume - assert: + ansible.builtin.assert: that: - delvol is success - delvol is changed - ignore_errors: true diff --git a/tests/integration/targets/volume_backup_info/tasks/main.yaml b/tests/integration/targets/volume_backup_info/tasks/main.yaml index 433fd9a9..c4e73494 100644 --- a/tests/integration/targets/volume_backup_info/tasks/main.yaml +++ b/tests/integration/targets/volume_backup_info/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- block: +- name: Volume Backup Info tests + block: - name: Get backup info - volume_backup_info: + opentelekomcloud.cloud.volume_backup_info: register: backup - name: Debug backup - debug: + ansible.builtin.debug: var: backup - name: assert result - assert: + ansible.builtin.assert: that: - backup is success - backup is not changed diff --git a/tests/integration/targets/volume_snapshot_info/tasks/main.yaml b/tests/integration/targets/volume_snapshot_info/tasks/main.yaml index 7aba24cf..63273aa7 100644 --- a/tests/integration/targets/volume_snapshot_info/tasks/main.yaml +++ b/tests/integration/targets/volume_snapshot_info/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- block: +- name: Volume Snapshot Info tests + block: - name: Get volume snapshot info - volume_snapshot_info: + opentelekomcloud.cloud.volume_snapshot_info: register: snapshot - name: Debug snapshots - debug: + ansible.builtin.debug: var: snapshot - name: assert result - assert: + ansible.builtin.assert: that: - snapshot is success - snapshot is not changed diff --git a/tests/integration/targets/vpc/tasks/main.yaml b/tests/integration/targets/vpc/tasks/main.yaml index 1745b720..6f0e48f2 100644 --- a/tests/integration/targets/vpc/tasks/main.yaml +++ b/tests/integration/targets/vpc/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- module_defaults: +- name: VPC Tests + module_defaults: opentelekomcloud.cloud.vpc: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: vpc_name: "{{ ( prefix + '_vpc') }}" cidr: "192.168.0.0/24" @@ -22,7 +23,7 @@ register: vpc_check_mode - name: assert result - assert: + ansible.builtin.assert: that: - vpc_check_mode is success - vpc_check_mode is not changed @@ -36,7 +37,7 @@ register: vpc - name: assert result - assert: + ansible.builtin.assert: that: - vpc is success - vpc is changed @@ -49,7 +50,7 @@ register: updated_vpc - name: assert result - assert: + ansible.builtin.assert: that: - updated_vpc is success - updated_vpc is defined @@ -61,7 +62,7 @@ register: deleted_vpc - name: assert result - assert: + ansible.builtin.assert: that: - deleted_vpc is success - deleted_vpc is changed diff --git a/tests/integration/targets/vpc_info/tasks/main.yaml b/tests/integration/targets/vpc_info/tasks/main.yaml index fcab9ce4..4b352fd7 100644 --- a/tests/integration/targets/vpc_info/tasks/main.yaml +++ b/tests/integration/targets/vpc_info/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- module_defaults: +- name: VPC Info tests + module_defaults: opentelekomcloud.cloud.vpc: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.vpc_info: @@ -7,11 +8,11 @@ block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: vpc_name: "{{ ( prefix + 'vpc') }}" - name: Creating a vpc @@ -25,7 +26,7 @@ register: all_vpcs - name: assert result - assert: + ansible.builtin.assert: that: - all_vpcs is success - all_vpcs is not changed @@ -37,17 +38,17 @@ register: new_vpc - name: assert result - assert: + ansible.builtin.assert: that: - new_vpc is success - new_vpc is not changed - new_vpc | length > 0 always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop created vpc opentelekomcloud.cloud.vpc: name: "{{ vpc_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/vpc_peering_info_test/tasks/main.yaml b/tests/integration/targets/vpc_peering_info_test/tasks/main.yaml index b3680845..98bea441 100644 --- a/tests/integration/targets/vpc_peering_info_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_peering_info_test/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- block: +- name: VPC Peering Info tests + block: - name: Get vpc peerings info - vpc_peering_info: + opentelekomcloud.cloud.vpc_peering_info: register: vpc_peerings - name: Debug vpc peerings - debug: + ansible.builtin.debug: var: vpc_peerings - name: assert result - assert: + ansible.builtin.assert: that: - vpc_peerings is success - vpc_peerings is not changed diff --git a/tests/integration/targets/vpc_peering_test/tasks/main.yaml b/tests/integration/targets/vpc_peering_test/tasks/main.yaml index 0cc7f1f4..a0d63329 100644 --- a/tests/integration/targets/vpc_peering_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_peering_test/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: +- name: VPC Peering tests + module_defaults: opentelekomcloud.cloud.vpc_peering: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name_1: "{{ ( prefix + 'vpc_peering_test-network_1') }}" subnet_name_1: "{{ ( prefix + 'vpc_peering_test-subnet_1') }}" router_name_1: "{{ ( prefix + 'vpc_peering_test-router_1') }}" @@ -84,7 +85,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - vpc_peering_check is changed @@ -98,7 +99,7 @@ register: vpc_peering - name: assert result - assert: + ansible.builtin.assert: that: - vpc_peering is success - vpc_peering is changed @@ -114,24 +115,24 @@ register: updated_vpc_peering - name: assert result - assert: + ansible.builtin.assert: that: - updated_vpc_peering is success - updated_vpc_peering is changed always: - - block: - # Cleanup - + - name: Cleanup + block: - name: Drop existing vpc peering opentelekomcloud.cloud.vpc_peering: # pass ID as name name: "{{ updated_vpc_peering.vpc_peering.name }}" state: absent register: dropped + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is changed @@ -140,29 +141,34 @@ openstack.cloud.router: name: "{{ router_name_1 }}" state: absent + failed_when: false - name: Drop existing first subnet openstack.cloud.subnet: name: "{{ subnet_name_1 }}" state: absent + failed_when: false - name: Drop existing first network openstack.cloud.network: name: "{{ network_name_1 }}" state: absent + failed_when: false - name: Drop existing second router openstack.cloud.router: name: "{{ router_name_2 }}" state: absent + failed_when: false - name: Drop existing second subnet openstack.cloud.subnet: name: "{{ subnet_name_2 }}" state: absent + failed_when: false - name: Drop existing second network openstack.cloud.network: name: "{{ network_name_2 }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/vpc_route_info_test/tasks/main.yaml b/tests/integration/targets/vpc_route_info_test/tasks/main.yaml index 44f95e4b..c4b39e44 100644 --- a/tests/integration/targets/vpc_route_info_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_route_info_test/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- block: +- name: VPC Route Info tests + block: - name: Get vpc routes info - vpc_route_info: + opentelekomcloud.cloud.vpc_route_info: register: vpc_routes - name: Debug vpc route - debug: + ansible.builtin.debug: var: vpc_routes - name: assert result - assert: + ansible.builtin.assert: that: - vpc_routes is success - vpc_routes is not changed diff --git a/tests/integration/targets/vpc_route_test/tasks/main.yaml b/tests/integration/targets/vpc_route_test/tasks/main.yaml index d4adc36c..667e2d5a 100644 --- a/tests/integration/targets/vpc_route_test/tasks/main.yaml +++ b/tests/integration/targets/vpc_route_test/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- module_defaults: +- name: VPC Route tests + module_defaults: opentelekomcloud.cloud.vpc_route: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: network_name_1: "{{ ( prefix + 'vpc_route_test-network_1') }}" subnet_name_1: "{{ ( prefix + 'vpc_route_test-subnet_1') }}" router_name_1: "{{ ( prefix + 'vpc_route_test-router_1') }}" @@ -91,7 +92,7 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - vpc_route_check is changed @@ -103,24 +104,24 @@ register: vpc_route - name: assert result - assert: + ansible.builtin.assert: that: - vpc_route is success - vpc_route is changed always: - - block: - # Cleanup - + - name: Cleanup + block: - name: Drop existing vpc route opentelekomcloud.cloud.vpc_route: # pass ID as name route_id: "{{ vpc_route.vpc_route.id }}" state: absent register: dropped + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - dropped is success - dropped is changed @@ -129,34 +130,40 @@ opentelekomcloud.cloud.vpc_peering: name: "{{ vpc_peering_name }}" state: absent + failed_when: false - name: Drop existing first router openstack.cloud.router: name: "{{ router_name_1 }}" state: absent + failed_when: false - name: Drop existing first subnet openstack.cloud.subnet: name: "{{ subnet_name_1 }}" state: absent + failed_when: false - name: Drop existing first network openstack.cloud.network: name: "{{ network_name_1 }}" state: absent + failed_when: false - name: Drop existing second router openstack.cloud.router: name: "{{ router_name_2 }}" state: absent + failed_when: false - name: Drop existing second subnet openstack.cloud.subnet: name: "{{ subnet_name_2 }}" state: absent + failed_when: false - name: Drop existing second network openstack.cloud.network: name: "{{ network_name_2 }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/vpn_service_info/tasks/main.yaml b/tests/integration/targets/vpn_service_info/tasks/main.yaml index 5596eee1..81b0ddea 100644 --- a/tests/integration/targets/vpn_service_info/tasks/main.yaml +++ b/tests/integration/targets/vpn_service_info/tasks/main.yaml @@ -1,15 +1,16 @@ --- -- module_defaults: +- name: VPC Service Info tests + module_defaults: opentelekomcloud.cloud.vpn_service_info: cloud: "{{ test_cloud }}" block: - name: Get list of vpn services opentelekomcloud.cloud.vpn_service_info: register: vpn_service_info - ignore_errors: true + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - vpn_service_info is success - vpn_service_info is not changed @@ -18,10 +19,10 @@ opentelekomcloud.cloud.vpn_service_info: vpn_service: "fake_vpn" register: vpn_service_info - ignore_errors: true + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - vpn_service_info is success - vpn_service_info is not changed @@ -32,10 +33,10 @@ opentelekomcloud.cloud.vpn_service_info: subnet: "fake_subnet" register: vpn_service_info - ignore_errors: true + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - vpn_service_info is success - vpn_service_info is not changed @@ -46,10 +47,10 @@ opentelekomcloud.cloud.vpn_service_info: router: "fake_router" register: vpn_service_info - ignore_errors: true + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - vpn_service_info is success - vpn_service_info is not changed @@ -62,10 +63,10 @@ subnet: "fake_subnet" router: "fake_router" register: vpn_service_info - ignore_errors: true + failed_when: false - name: assert result - assert: + ansible.builtin.assert: that: - vpn_service_info is success - vpn_service_info is not changed diff --git a/tests/integration/targets/waf_certificate/tasks/main.yaml b/tests/integration/targets/waf_certificate/tasks/main.yaml index e610146a..842906ec 100644 --- a/tests/integration/targets/waf_certificate/tasks/main.yaml +++ b/tests/integration/targets/waf_certificate/tasks/main.yaml @@ -1,12 +1,13 @@ --- -- module_defaults: - waf_certificate: +- name: WAF Certificate tests + module_defaults: + opentelekomcloud.cloud.waf_certificate: cloud: "{{ test_cloud }}" - waf_certificate_info: + opentelekomcloud.cloud.waf_certificate_info: cloud: "{{ test_cloud }}" block: - name: Set initial facts - set_fact: + ansible.builtin.set_fact: cert_name: test_cert_ansible_collection content: | -----BEGIN CERTIFICATE----- @@ -59,30 +60,30 @@ -----END PRIVATE KEY----- - name: Ensure cert does not exists - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert_name }}" state: absent register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - name: Create cert - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert_name }}" content: "{{ content }}" private_key: "{{ key }}" register: result - name: Get cert info - waf_certificate_info: + opentelekomcloud.cloud.waf_certificate_info: name: "{{ cert_name }}" register: result_info - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed @@ -90,24 +91,24 @@ - result_info.waf_certificates[0].name == cert_name - name: Drop cert - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert_name }}" state: absent register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is changed - name: Get cert info - waf_certificate_info: + opentelekomcloud.cloud.waf_certificate_info: name: "{{ cert_name }}" register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/waf_certificate_info/tasks/main.yaml b/tests/integration/targets/waf_certificate_info/tasks/main.yaml index ffe5faaa..7d96330a 100644 --- a/tests/integration/targets/waf_certificate_info/tasks/main.yaml +++ b/tests/integration/targets/waf_certificate_info/tasks/main.yaml @@ -1,26 +1,27 @@ --- -- module_defaults: - waf_certificate_info: +- name: WAF Certificate tests + module_defaults: + opentelekomcloud.cloud.waf_certificate_info: cloud: "{{ test_cloud }}" block: - name: Get WAF Cert info - waf_certificate_info: + opentelekomcloud.cloud.waf_certificate_info: register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed - result.waf_certificates is defined - name: Get WAF Cert with name filter info - waf_certificate_info: + opentelekomcloud.cloud.waf_certificate_info: name: some_fake register: result - name: assert result - assert: + ansible.builtin.assert: that: - result is success - result is not changed diff --git a/tests/integration/targets/waf_domain/tasks/main.yaml b/tests/integration/targets/waf_domain/tasks/main.yaml index ae365c67..6ddbb7ae 100644 --- a/tests/integration/targets/waf_domain/tasks/main.yaml +++ b/tests/integration/targets/waf_domain/tasks/main.yaml @@ -1,18 +1,19 @@ --- -- module_defaults: - waf_domain: +- name: WAF Domain tests + module_defaults: + opentelekomcloud.cloud.waf_domain: cloud: "{{ test_cloud }}" - waf_domain_info: + opentelekomcloud.cloud.waf_domain_info: cloud: "{{ test_cloud }}" - waf_certificate: + opentelekomcloud.cloud.waf_certificate: cloud: "{{ test_cloud }}" block: - name: Set random prefix - set_fact: + ansible.builtin.set_fact: prefix: "{{ 99999999 | random | to_uuid | hash('md5') }}" - name: Set initial facts - set_fact: + ansible.builtin.set_fact: cert_name: "{{ ( prefix + '-test_cert_ansible_collection') }}" cert2_name: "{{ ( prefix + '-test_cert_ansible_collection_2') }}" domain_name: "{{ ( prefix + '.example.com') }}" @@ -67,21 +68,21 @@ -----END PRIVATE KEY----- - name: Create cert - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert_name }}" content: "{{ content }}" private_key: "{{ key }}" register: cert - name: Create 2nd cert - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert2_name }}" content: "{{ content }}" private_key: "{{ key }}" register: cert2 - name: Create domain - check mode - waf_domain: + opentelekomcloud.cloud.waf_domain: state: present name: "{{ domain_name }}" server: @@ -97,12 +98,12 @@ check_mode: true - name: assert result - assert: + ansible.builtin.assert: that: - domain_check is changed - name: Create domain - waf_domain: + opentelekomcloud.cloud.waf_domain: state: present name: "{{ domain_name }}" # here we pass cert id explicitly @@ -119,7 +120,7 @@ register: domain - name: assert result - assert: + ansible.builtin.assert: that: - domain is success - domain is changed @@ -127,7 +128,7 @@ - domain.waf_domain.id is defined - name: Update domain server list - waf_domain: + opentelekomcloud.cloud.waf_domain: state: present name: "{{ domain_name }}" server: @@ -142,7 +143,7 @@ register: domain_modified - name: assert result - assert: + ansible.builtin.assert: that: - domain_modified is success - domain_modified is changed @@ -150,18 +151,18 @@ - domain.waf_domain.id is defined - name: Update certificate reference in the domain - waf_domain: + opentelekomcloud.cloud.waf_domain: name: "{{ domain_name }}" # we want to support cert_id and name certificate: "{{ cert2_name }}" - name: Get WAF Domain with name filter info - waf_domain_info: + opentelekomcloud.cloud.waf_domain_info: name: "{{ domain_name }}" register: res - name: assert result - assert: + ansible.builtin.assert: that: - res is success - res.waf_domains is defined @@ -169,57 +170,59 @@ - res.waf_domains[0].certificate_id == cert2.waf_certificate.id - name: Get WAF Domain by ID - waf_domain_info: + opentelekomcloud.cloud.waf_domain_info: name: "{{ domain.waf_domain.id }}" register: res - name: assert result - assert: + ansible.builtin.assert: that: - res is success - res.waf_domains is defined - res.waf_domains[0].id is defined - name: Drop existing domain - waf_domain: + opentelekomcloud.cloud.waf_domain: # pass ID as name name: "{{ domain.waf_domain.id }}" state: absent register: domain - name: assert result - assert: + ansible.builtin.assert: that: - domain is success - domain is changed - name: Drop not more existing domain - waf_domain: + opentelekomcloud.cloud.waf_domain: name: "{{ domain_name }}" state: absent register: domain - name: assert result - assert: + ansible.builtin.assert: that: - domain is success - domain is not changed always: - - block: - # Cleanup + - name: Cleanup + block: - name: Drop perhaps existing domain - waf_domain: + opentelekomcloud.cloud.waf_domain: name: "{{ domain_name }}" state: absent + failed_when: false - name: Drop certificate - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert_name }}" state: absent + failed_when: false - name: Drop 2nd certificate - waf_certificate: + opentelekomcloud.cloud.waf_certificate: name: "{{ cert2_name }}" state: absent - ignore_errors: true + failed_when: false diff --git a/tests/integration/targets/waf_domain_info/tasks/main.yaml b/tests/integration/targets/waf_domain_info/tasks/main.yaml index d03819f9..e6eac03c 100644 --- a/tests/integration/targets/waf_domain_info/tasks/main.yaml +++ b/tests/integration/targets/waf_domain_info/tasks/main.yaml @@ -1,26 +1,27 @@ --- -- module_defaults: - waf_domain_info: +- name: WAF Domain Info tests + module_defaults: + opentelekomcloud.cloud.waf_domain_info: cloud: "{{ test_cloud }}" block: - name: Get WAF Domains info - waf_domain_info: + opentelekomcloud.cloud.waf_domain_info: register: domain - name: assert result - assert: + ansible.builtin.assert: that: - domain is success - domain is not changed - domain.waf_domains is defined - name: Get WAF Domain with name filter info - waf_domain_info: + opentelekomcloud.cloud.waf_domain_info: name: www.fakedomain.otc register: domain - name: assert result - assert: + ansible.builtin.assert: that: - domain is success - domain is not changed diff --git a/tests/requirements.yml b/tests/requirements.yml deleted file mode 100644 index 9a582ab3..00000000 --- a/tests/requirements.yml +++ /dev/null @@ -1,2 +0,0 @@ -sanity_tests_dependencies: [] -unit_tests_dependencies: [] diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt new file mode 120000 index 00000000..0c39ad8e --- /dev/null +++ b/tests/sanity/ignore-2.13.txt @@ -0,0 +1 @@ +ignore-2.10.txt \ No newline at end of file From 90410d3f079491da3f4dda18a6cfa0cd64782019 Mon Sep 17 00:00:00 2001 From: Kristian Kucerak Date: Tue, 28 Jun 2022 11:30:31 +0200 Subject: [PATCH 57/65] fix to work properly with cloud dict (#203) fix to work properly with cloud dict Reviewed-by: Artem Goncharov --- .ansible-lint | 1 + plugins/doc_fragments/otc.py | 1 - plugins/module_utils/otc.py | 8 ++++---- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index f789d228..814001ab 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -7,3 +7,4 @@ skip_list: - '204' # Lines should be no longer than 160 chars - '301' # Commands should not change things if nothing needs doing - '701' # No 'galaxy_info' found\ + - 'var-spacing' # Jinja2 variables and filters should have spaces before and after. diff --git a/plugins/doc_fragments/otc.py b/plugins/doc_fragments/otc.py index 9c60fd11..a8eee287 100644 --- a/plugins/doc_fragments/otc.py +++ b/plugins/doc_fragments/otc.py @@ -67,7 +67,6 @@ class ModuleDocFragment(object): - Whether or not SSL API requests should be verified. - Before Ansible 2.3 this defaulted to C(yes). type: bool - default: false aliases: [ verify ] ca_cert: description: diff --git a/plugins/module_utils/otc.py b/plugins/module_utils/otc.py index 9f63df80..9431c267 100644 --- a/plugins/module_utils/otc.py +++ b/plugins/module_utils/otc.py @@ -30,7 +30,7 @@ def openstack_full_argument_spec(**kwargs): auth_type=dict(default=None), auth=dict(default=None, type='dict', no_log=True), region_name=dict(default=None), - validate_certs=dict(default=False, type='bool', aliases=['verify']), + validate_certs=dict(default=None, type='bool', aliases=['verify']), ca_cert=dict(default=None, aliases=['cacert']), client_cert=dict(default=None, aliases=['cert']), client_key=dict(default=None, no_log=True, aliases=['key']), @@ -148,9 +148,9 @@ def openstack_cloud_from_module(self, min_version='0.6.9'): # For 'interface' parameter, fail if we receive a non-default value if self.params['interface'] != 'public': self.fail_json(msg=fail_message.format(param='interface')) - conn = sdk.connect(**cloud_config) - otc_sdk.load(conn) - return sdk, conn + conn = sdk.connect(**cloud_config) + otc_sdk.load(conn) + return sdk, conn else: conn = sdk.connect( cloud=cloud_config, From 8649cdcc33d0b8691e83b1861fd251d4cec21708 Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Mon, 18 Jul 2022 12:36:44 +0300 Subject: [PATCH 58/65] vault module (#204) vault module Reviewed-by: Vladimir Vshivkov Reviewed-by: Anton Sidelnikov --- plugins/modules/cbr_vault.py | 553 ++++++++++++++++++ tests/integration/targets/cbr/aliases | 1 + tests/integration/targets/cbr/tasks/main.yaml | 102 ++++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + .../modules/rds_instance/test_rds_instance.py | 2 +- 6 files changed, 659 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/cbr_vault.py create mode 100644 tests/integration/targets/cbr/aliases create mode 100644 tests/integration/targets/cbr/tasks/main.yaml diff --git a/plugins/modules/cbr_vault.py b/plugins/modules/cbr_vault.py new file mode 100644 index 00000000..32272bfb --- /dev/null +++ b/plugins/modules/cbr_vault.py @@ -0,0 +1,553 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: cbr_vault +short_description: Manage CBR Vault +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.1.2" +author: "Gubina Polina (@Polina-Gubina)" +description: + - Manage cbr vault from the OTC. +options: + name: + description: + - Vault name or id. + type: str + required: true + policy: + description: + - Backup policy name or id. If the value of this parameter is null,\ + automatic backup is not performed. Can be set during creation and for\ + binding/unbinding policy to the vault. + type: str + billing: + description: + - Parameter information for creation. Mandatory for creation.\ + Only size can be updated. + type: dict + suboptions: + cloud_type: + description: + - Cloud type, which is public. + type: str + consistent_level: + description: + - Specification, which is crash_consistent\ + by default (crash consistent backup). + type: str + default: "crash_consistent" + object_type: + description: + - Object type, which can be server or disk. + type: str + required: true + choices: ['server', 'disk'] + protect_type: + description: + - Protection type, which is backup. + type: str + required: true + size: + description: + - Capacity, in GB. Minimum 1, maximum 10485760. + type: int + required: true + charging_mode: + description: + - Billing mode, which is post_paid. + default: "post_paid" + type: str + is_auto_renew: + description: + - Whether to automatically renew the subscription after expiration.\ + By default, it is not renewed. + default: False + type: bool + is_auto_pay: + description: + - Whether the fee is automatically deducted from the\ + customer's account balance after an order is submitted.\ + The non-automatic payment mode is used by default. + type: bool + default: False + console_url: + description: + - Redirection URL. Minimum 1, maximum 255. + type: str + description: + description: + - User-defined vault description. Minimum 0, maximum 64. + type: str + resources: + description: + - Associated resources. Set this parameter to [] if no\ + resources are associated when creating a vault. Mandatory for creation\ + and associating resources. + type: list + elements: dict + suboptions: + id: + description: + - ID of the resource to be backed up. + type: str + required: true + type: + description: + - Type of the resource to be backed up. + type: str + required: true + choices: ['OS::Nova::Server', 'OS::Cinder::Volume'] + name: + description: + - Resource name. Minimum 0, maximum 255. + type: str + required: false + tags: + description: + - Tag list. This list cannot be an empty list. The list can contain\ + up to 10 keys. Keys in this list must be unique. + type: list + elements: dict + suboptions: + key: + description: + - Key. It can contain a maximum of 36 characters. + type: str + required: true + value: + description: + - Value. It is mandatory when a tag is added and optional when\ + a tag is deleted. + type: str + auto_bind: + description: + - Whether automatic association is supported. + type: bool + bind_rules: + description: + - Rules for automatic association. Filters automatically associated\ + resources by tag. + type: list + elements: dict + suboptions: + key: + description: + - Key. It can contain a maximum of 36 characters. + type: str + required: true + value: + description: + - Value. It is mandatory when a tag is added and optional when\ + a tag is deleted. + type: str + resource_ids: + description: + - List of resource IDs to be removed. Used for dissociating resources. + type: list + elements: str + auto_expand: + description: + - Whether to enable auto capacity expansion for the vault.\ + Can be set in update. + type: bool + smn_notify: + description: + - Exception notification function. + type: bool + default: true + threshold: + description: + - Vault capacity threshold. If the vault capacity usage exceeds this\ + threshold and smn_notify is true, an exception notification is sent.\ + Can be set only in update. + type: int + default: 80 + state: + description: + - Whether resource should be present or absent. + choices: ['present', 'absent'] + type: str + default: 'present' + action: + description: + - What needs to be done. + choices: ['associate_resources', 'dissociate_resources', 'bind_policy',\ + 'unbind_policy'] + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +vault: + description: Vault object. + type: complex + returned: On Success. + contains: + billing: + description: Operation info. + type: complex + contains: + allocated: + description: + - Allocated capacity, in MB. + type: int + charging_mode: + description: + - Billing mode. + type: str + cloud_type: + description: + - Cloud type. + type: str + consistent_level: + description: + - Specification. + type: str + object_type: + description: + - Object type, which can be server or disk. + type: str + order_id: + description: + - Order id. + type: str + product_id: + description: + - Product id. + type: str + protect_type: + description: + - Protection type, which is backup. + type: str + size: + description: Capacity, in GB. + type: int + spec_code: + description: Specification code. + type: str + status: + description: Vault status. + type: str + storage_unit: + description: Name of the bucket for the vault. + type: str + used: + description: + - Used capacity, in MB. + type: int + frozen_scene: + description: Scenario when an account is frozen. + type: str + description: + description: User-defined vault description. + type: str + id: + description: Vault id. + type: str + name: + description: Vault name. + type: str + project_id: + description: Project ID. + type: str + provider_id: + description: Vault name. + type: list + resources: + description: Vault resources. + type: list + tags: + description: Vault tags. + type: list + auto_bind: + description: Indicates whether automatic association is enabled. + type: bool + bind_rules: + description: Association rule. + type: list + elements: dict + contains: + key: + description: Key.. + type: str + value: + description: Value. + type: str + user_id: + description: User id. + type: str + created_at: + description: Creation time. + type: str + auto_expand: + description: Whether to enable auto capacity expansion for the vault. + type: bool +''' + +EXAMPLES = ''' +- name: Create vault + opentelekomcloud.cloud.cbr_vault: + name: "vault-namenew" + resources: + - id: '9f1e2203-f222-490d-8c78-23c01ca4f4b9' + type: "OS::Cinder::Volume" + billing: + consistent_level: "crash_consistent" + object_type: "disk" + protect_type: "backup" + size: 40 + register: vault + +- name: Associate resources CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "new-vault" + resources: + - id: '9f1e2203-f222-490d-8c78-23c01ca4f4b9' + type: "OS::Cinder::Volume" + action: "associate_resources" + register: vault + +- name: Dissociate resources CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "new-vault" + resource_ids: + - '9f1e2203-f222-490d-8c78-23c01ca4f4b9' + action: "dissociate_resources" + register: vault + +- name: Delete CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "new-vault" + state: absent + register: vault +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CBRVaultModule(OTCModule): + argument_spec = dict( + name=dict(required=True, type='str'), + policy=dict(required=False), + billing=dict(required=False, type='dict', options=dict( + cloud_type=dict(required=False, type='str'), + consistent_level=dict(required=False, type='str', + default='crash_consistent'), + object_type=dict(required=True, type='str', + choices=['server', 'disk']), + protect_type=dict(required=True, type='str'), + size=dict(required=True, type='int'), + charging_mode=dict(required=False, type='str', default='post_paid'), + is_auto_renew=dict(required=False, type='bool', default=False), + is_auto_pay=dict(required=False, type='bool', default=False), + console_url=dict(required=False, type='str'),)), + description=dict(required=False, type='str'), + resources=dict(type='list', elements='dict', options=dict( + id=dict(required=True, type='str'), + type=dict(required=True, type='str', + choices=['OS::Nova::Server', 'OS::Cinder::Volume']), + name=dict(required=False, type='str'))), + resource_ids=dict(required=False, type='list', elements='str'), + tags=dict(required=False, type='list', elements='dict', + options=dict(key=dict(required=True, type='str', + no_log=False), + value=dict(required=False, type='str'))), + auto_bind=dict(type='bool', required=False), + bind_rules=dict(type='list', required=False, elements='dict', + options=dict(key=dict(required=True, + type='str', no_log=False), + value=dict(required=False, type='str'))), + auto_expand=dict(type='bool', required=False), + smn_notify=dict(type='bool', default=True, required=False), + threshold=dict(type='int', default=80, required=False), + state=dict(type='str', required=False, choices=['present', 'absent'], + default='present'), + action=dict(type='str', required=False, choices=['associate_resources', + 'dissociate_resources', 'bind_policy', 'unbind_policy']) + ) + module_kwargs = dict( + required_if=[ + ('action', 'associate_resources', ['name', 'resources']), + ('action', 'dissociate_resources', ['name', 'resource_ids']), + ('action', 'bind_policy', ['name', 'policy']), + ('action', 'unbind_policy', ['name', 'policy']) + ], + supports_check_mode=True + ) + + def _parse_resources(self): + resources = self.params['resources'] + parsed_resources = [] + for resource in resources: + parsed_resource = {} + parsed_resource['id'] = resource.get('id')\ + if resource.get('id') else self.fail_json(msg="'id' is required for 'resource'") + parsed_resource['type'] = resource.get('type')\ + if resource.get('type') else self.fail_json(msg="'type' is required for 'resource'") + if resource.get('name'): + parsed_resource['name'] = resource.get('name') + parsed_resources.append(parsed_resource) + return parsed_resources + + def _parse_tags(self): + tags = self.params['tags'] + parsed_tags = [] + for tag in tags: + parsed_tag = {} + parsed_tag['key'] = tag.get('key')\ + if tag.get('key') else self.fail_json(msg="'key' is required for 'tag'") + if tag.get('value'): + parsed_tag['value'] = tag.get('value') + else: + parsed_tag['value'] = None + parsed_tags.append(parsed_tag) + return parsed_tags + + def _parse_billing(self): + billing = self.params['billing'] + parsed_billing = {} + if billing.get('cloud_type'): + parsed_billing['cloud_type'] = billing['cloud_type'] + parsed_billing['consistent_level'] = billing['consistent_level']\ + if billing.get('consistent_level') else self.fail_json(msg="'consistent_level' is required for 'billing'") + parsed_billing['object_type'] = billing['object_type'] \ + if billing.get('object_type') else self.fail_json(msg="'object_type' is required for 'billing'") + parsed_billing['protect_type'] = billing['protect_type'] \ + if billing.get('protect_type') else self.fail_json(msg="'protect_type' is required for 'billing'") + parsed_billing['size'] = billing['size'] \ + if billing.get('size') else self.fail_json(msg="'size' is required for 'billing'") + if billing.get('charging_mode'): + parsed_billing['charging_mode'] = billing['charging_mode'] + if billing.get('is_auto_renew'): + parsed_billing['is_auto_renew'] = billing['is_auto_renew'] + if billing.get('is_auto_pay'): + parsed_billing['is_auto_pay'] = billing['is_auto_pay'] + if billing.get('console_url'): + parsed_billing['console_url'] = billing['console_url'] + return parsed_billing + + def _system_state_change(self, vault): + state = self.params['state'] + if state == 'present' and not vault: + return True + if state == 'absent' and vault: + return True + return False + + def _require_update(self, vault): + require_update = False + if vault: + for param_key in ['name', 'auto_bind', 'bind_rules', + 'auto_expand', 'smn_notify', 'threshold']: + if self.params[param_key] != vault[param_key]: + require_update = True + break + return require_update + + def run(self): + attrs = {} + action = None + policy = None + state = self.params['state'] + if self.params['description']: + attrs['description'] = self.params['description'] + if self.params['auto_bind']: + attrs['auto_bind'] = self.params['auto_bind'] + if self.params['auto_expand']: + attrs['auto_expand'] = self.params['auto_expand'] + if self.params['smn_notify']: + attrs['smn_notify'] = self.params['smn_notify'] + if self.params['threshold']: + attrs['threshold'] = self.params['threshold'] + if self.params['policy']: + policy = self.conn.cbr.find_policy(name_or_id=self.params['policy']) + if policy: + attrs['policy_id'] = policy.id + else: + self.fail_json("'policy' not found") + if self.params['billing']: + attrs['billing'] = self._parse_billing() + if self.params['tags']: + attrs['tags'] = self._parse_tags() + if self.params['action']: + action = self.params['action'] + + vault = self.conn.cbr.find_vault(name_or_id=self.params['name'], + ignore_missing=True) + + require_update = self._require_update(vault) + if self.ansible.check_mode: + if self._system_state_change(vault) or require_update: + self.exit_json(changed=True) + self.exit_json(changed=False) + + if vault: + if action == 'associate_resources': + resources = self._parse_resources() + self.conn.cbr.associate_resources( + vault=vault.id, resources=resources) + self.exit(changed=True) + + if action == 'dissociate_resources': + resource_ids = self.params['resource_ids'] + self.conn.cbr.dissociate_resources( + vault=vault, resources=resource_ids) + self.exit(changed=True) + + if action == 'bind_policy': + self.conn.cbr.bind_policy(vault=vault, policy=policy) + self.exit(changed=True) + + if action == 'unbind_policy': + self.conn.cbr.unbind_policy(vault=vault, policy=policy) + self.exit(changed=True) + + if state == 'present': + self.conn.cbr.update_vault(vault=vault, **attrs) + self.exit(changed=True) + + if state == 'absent': + self.conn.cbr.delete_vault(vault=vault) + self.exit(changed=True) + + if state == 'absent': + if self.ansible.check_mode: + self.exit_json(changed=False, + msg="vault {0} not found".format(vault.id)) + + if action in ('associate_resources', 'dissociate_resources', + 'bind_policy', 'unbind_policy') or state == 'absent': + if self.ansible.check_mode: + self.exit_json(changed=False) + self.fail_json( + changed=False, msg="vault {0} not found".format(vault.id)) + + if self.params['resources']: + attrs['resources'] = self._parse_resources() + else: + attrs['resources'] = [] + attrs['name'] = self.params['name'] + if self.ansible.check_mode: + self.exit_json(changed=True) + created_vault = self.conn.cbr.create_vault(**attrs) + self.exit(changed=True, vault=created_vault) + + +def main(): + module = CBRVaultModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/cbr/aliases b/tests/integration/targets/cbr/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/cbr/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/cbr/tasks/main.yaml b/tests/integration/targets/cbr/tasks/main.yaml new file mode 100644 index 00000000..79614628 --- /dev/null +++ b/tests/integration/targets/cbr/tasks/main.yaml @@ -0,0 +1,102 @@ +--- +- name: Cbr vault tests + module_defaults: + openstack.cloud.volume: + cloud: "{{ test_cloud }}" + opentelekomcloud.cloud.cbr_vault: + cloud: "{{ test_cloud }}" + block: + - name: Set random prefix + ansible.builtin.set_fact: + prefix: "{{ 999999 | random | to_uuid | hash('md5') }}" + + - name: Set initial facts + ansible.builtin.set_fact: + volume_name1: "{{ (prefix + '_volume1') }}" + volume_name2: "{{ (prefix + '_volume2') }}" + az_name: "eu-de-01" + vault_name: "{{ (prefix + '_vault') }}" + + - name: Create first volume + openstack.cloud.volume: + size: 1 + availability_zone: "{{ az_name }}" + display_name: "{{ volume_name1 }}" + state: present + register: vol1 + + - name: Create second volume + openstack.cloud.volume: + size: 1 + availability_zone: "{{ az_name }}" + display_name: "{{ volume_name2 }}" + state: present + register: vol2 + + - name: Create CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "{{ (vault_name) }}" + billing: + consistent_level: "crash_consistent" + object_type: "disk" + protect_type: "backup" + size: 40 + resources: [{"id": '{{ vol1.volume.id }}', "type":"OS::Cinder::Volume"}] + register: created_vault + + - name: assert result + ansible.builtin.assert: + that: + - created_vault is success + - created_vault is changed + + - name: Associate resources CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "{{ (vault_name) }}" + resources: [{"id": '{{ vol2.volume.id }}', "type":"OS::Cinder::Volume"}] + action: "associate_resources" + register: vault + + - name: assert result + ansible.builtin.assert: + that: + - vault is success + - vault is changed + + - name: Dissociate resources CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "{{ (vault_name) }}" + resource_ids: ['{{ vol2.volume.id }}'] + action: "dissociate_resources" + register: vault + + - name: assert result + ansible.builtin.assert: + that: + - vault is success + - vault is changed + + always: + - name: Cleanup + block: + - name: Delete CBR vault + opentelekomcloud.cloud.cbr_vault: + name: "{{ (vault_name) }}" + state: absent + register: vault + + - name: assert result + ansible.builtin.assert: + that: + - vault is success + - vault is changed + + - name: Delete first volume + openstack.cloud.volume: + display_name: "{{ volume_name1 }}" + state: absent + + - name: Delete second volume + openstack.cloud.volume: + display_name: "{{ volume_name1 }}" + state: absent diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 50a22a2e..baae0efa 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -8,6 +8,7 @@ plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_vault.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_cert_info.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index e281c7ab..5d8a4e04 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -8,6 +8,7 @@ plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_vault.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_cert_info.py validate-modules:missing-gplv3-license diff --git a/tests/unit/modules/rds_instance/test_rds_instance.py b/tests/unit/modules/rds_instance/test_rds_instance.py index de403792..221ce259 100644 --- a/tests/unit/modules/rds_instance/test_rds_instance.py +++ b/tests/unit/modules/rds_instance/test_rds_instance.py @@ -131,7 +131,7 @@ def test_ensure_created(self): router=None, security_group=None, state='present', - validate_certs=False, + validate_certs=None, volume_size=None, volume_type=None, wait=True, From 40d2e2e2e33b190bd971362e374c9a81f2095343 Mon Sep 17 00:00:00 2001 From: Polina Gubina <33940358+Polina-Gubina@users.noreply.github.com> Date: Wed, 27 Jul 2022 10:21:02 +0300 Subject: [PATCH 59/65] cbr backup modules (#201) cbr backup modules Reviewed-by: Vladimir Vshivkov Reviewed-by: Anton Sidelnikov Reviewed-by: Polina Gubina --- meta/runtime.yml | 4 + plugins/modules/cbr_backup.py | 238 ++++++++++++ plugins/modules/cbr_backup_info.py | 359 ++++++++++++++++++ plugins/modules/cbr_restore_point.py | 234 ++++++++++++ plugins/modules/cbr_vault.py | 2 +- tests/integration/targets/cbr/tasks/main.yaml | 59 +++ tests/sanity/ignore-2.10.txt | 3 + tests/sanity/ignore-2.9.txt | 3 + 8 files changed, 901 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/cbr_backup.py create mode 100644 plugins/modules/cbr_backup_info.py create mode 100644 plugins/modules/cbr_restore_point.py diff --git a/meta/runtime.yml b/meta/runtime.yml index fb052324..71e50eec 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -23,6 +23,10 @@ action_groups: - css_cluster_info - css_snapshot - css_snapshot_info + - cbr_backup + - cbr_backup_info + - cbr_restore_point + - cbr_vault - deh_host - deh_host_info - deh_host_type_info diff --git a/plugins/modules/cbr_backup.py b/plugins/modules/cbr_backup.py new file mode 100644 index 00000000..d8aa8729 --- /dev/null +++ b/plugins/modules/cbr_backup.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: cbr_backup +short_description: Manage CBR Backup Resource +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.12.4" +author: "Gubina Polina (@Polina-Gubina)" +description: Manage CBR backup resource from the OTC. +options: + name: + description: Backup name of id. + type: str + required: true + mappings: + description: + - Restored mapping relationship. This parameter is mandatory for VM restoration and optional for disk restoration. + type: list + elements: dict + suboptions: + backup_id: + description: + - backup_id + type: str + required: true + volume_id: + description: + - ID of the disk to which data is restored. + type: str + required: true + power_on: + description: + - Whether the server is powered on after restoration.\ + By default it is powered on after restoration. + type: bool + default: True + server_id: + description: + - ID of the target VM to be restored.\ + This parameter is mandatory for VM restoration. + type: str + volume_id: + description: + - ID of the target disk to be restored.\ + This parameter is mandatory for disk restoration. + type: str + state: + description: + - Whether resource should be present or absent. + choices: [present, absent] + type: str + default: present +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +backup: + description: CBR backups list. + type: complex + returned: On Success. + contains: + checkpoint_id: + description: Restore point ID. + type: str + created_at: + description: Creation time. + type: str + description: + description: Backup description. + type: str + expired_at: + description: Expiration time. + type: str + extend_info: + description: Extended information. + type: complex + contains: + allocated: + description: + - Allocated capacity, in MB. + type: int + charging_mode: + description: + - Billing mode. + type: str + id: + description: Backup id. + type: str + image_type: + description: Backup type. + type: str + name: + description: Backup name. + type: str + parent_id: + description: Parent backup ID. + type: str + project_id: + description: Project ID. + type: str + protected_at: + description: Backup time. + type: str + resource_az: + description: Resource availability zone. + type: str + resource_id: + description: Resource ID. + type: str + resource_name: + description: Resource name. + type: str + resource_size: + description: Resource size, in GB. + type: str + resource_type: + description: Resource type. + type: str + status: + description: Backup status. + type: str + updated_at: + description: Update time. + type: str + vault_id: + description: Vault id. + type: str + provider_id: + description: Backup provider ID, which is used to distinguish\ + backup objects. The value can be as follows:. + type: str +''' + +EXAMPLES = ''' +# Restore backup: +- name: + opentelekomcloud.cloud.cbr_backup: + name: "backup-name-or-id" + volume_id: "volume-id" + +# Delete backup: +- name: + opentelekomcloud.cloud.cbr_backup: + name: "backup-name-or-id" +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CBRBackupModule(OTCModule): + argument_spec = dict( + name=dict(required=True), + mappings=dict(type='list', required=False, elements='dict', + options=dict(backup_id=dict(type='str', required=True), + volume_id=dict(type='str', required=True))), + power_on=dict(type='bool', default=True, required=False), + server_id=dict(type='str', required=False), + volume_id=dict(type='str', required=False), + state=dict(type='str', + choices=['present', 'absent'], default='present') + ) + module_kwargs = dict( + required_if=[ + ('state', 'present', ['name']) + ], + supports_check_mode=True + ) + + def _parse_mappings(self): + mappings = self.params['mappings'] + parsed_mappings = [] + for m in mappings: + mapping = {} + mapping['backup_id'] = m.get('backup_id')\ + if m.get('backup_id') else self.fail_json(msg="'backup_id' is required for 'mappings'") + mapping['volume_id'] = m.get('volume_id')\ + if m.get('volume_id') else self.fail_json(msg="'volume_id' is required for 'mappings'") + parsed_mappings.append(mapping) + return parsed_mappings + + def _system_state_change(self, backup): + state = self.params['state'] + if state == 'present': + if not backup: + return True + elif state == 'absent' and backup: + return True + return False + + def run(self): + query = {} + + if self.params['mappings']: + query['mappings'] = self._parse_mappings() + if self.params['power_on']: + query['power_on'] = self.params['power_on'] + if self.params['server_id']: + query['server_id'] = self.params['server_id'] + if self.params['volume_id']: + query['volume_id'] = self.params['volume_id'] + + backup = self.conn.cbr.find_backup(name_or_id=self.params['name']) + query['backup'] = backup.id + + if self.ansible.check_mode: + self.exit_json(changed=self._system_state_change(backup)) + + if backup: + if self.params['state'] == 'present': + self.conn.cbr.restore_data(**query) + else: + self.conn.cbr.delete_backup(backup=backup.id) + self.exit( + changed=True + ) + self.exit( + changed=False + ) + + +def main(): + module = CBRBackupModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cbr_backup_info.py b/plugins/modules/cbr_backup_info.py new file mode 100644 index 00000000..27d8f7c8 --- /dev/null +++ b/plugins/modules/cbr_backup_info.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: cbr_backup_info +short_description: Get cbr backup resource list +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.12.4" +author: "Gubina Polina (@Polina-Gubina)" +description: + - Get cbr backup resource list. +options: + name: + description: + - Backup name or id. + type: str + checkpoint_id: + description: + - Restore point ID. + type: str + dec: + description: + - Dedicated cloud. + type: bool + end_time: + description: + - Time when the backup ends, in %YYYY-%mm-%ddT%HH:%MM:%SSZ format. + type: str + image_type: + description: + - Backup type. + choices: ['backup', 'replication'] + type: str + limit: + description: + - Number of records displayed per page.\ + The value must be a positive integer. + type: int + marker: + description: + - ID of the last record displayed on the previous page. + type: str + member_status: + description: + - Backup sharing status. + choices: ['pending', 'accept', 'reject'] + type: str + offset: + description: + - Offset value. The value must be a positive integer. + type: int + own_type: + description: + - Owning type of a backup. private backups are queried by default. + type: str + default: 'private' + choices: ['all_granted', 'private', 'shared'] + parent_id: + description: + - Parent backup id. + type: str + resource_az: + description: + - AZ-based filtering is supported. + type: str + resource_id: + description: + - Resource id. + type: str + resource_name: + description: + - Resource name. + type: str + resource_type: + description: + - Resource type. + choices: ['OS::Cinder::Volume', 'OS::Nova::Server'] + type: str + sort: + description: + - A group of properties separated by commas (,) and sorting directions. + type: str + start_time: + description: + - Time when the backup starts, in %YYYY-%mm-%ddT%HH:%MM:%SSZ format.\ + For example, 2018-02-01T12:00:00Z. + type: str + status: + description: + - Status. When the API is called, multiple statuses can be transferred\ + for filtering, for example, status=available&status=error. + type: str + choices: ['available', 'protecting', 'deleting', 'restoring', 'error',\ + 'waiting_protect', 'waiting_delete', 'waiting_restore'] + used_percent: + description: + - Backups are filtered based on the occupied vault capacity. The value\ + ranges from 1 to 100. For example, if used_percent is set to 80,\ + all backups who occupied 80% or more of the vault capacity are displayed. + type: str + vault: + description: + - Vault id or name. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +backups: + description: CBR backups list. + type: complex + returned: On Success. + contains: + checkpoint_id: + description: Restore point ID. + type: str + created_at: + description: Creation time. + type: str + description: + description: Backup description. + type: str + expired_at: + description: Expiration time. + type: str + extend_info: + description: Extended information. + type: complex + contains: + auto_trigger: + description: + - Whether the backup is automatically generated. + type: bool + bootable: + description: + - Whether the backup is a system disk backup. + type: bool + incremental: + description: + - Whether the backup is an incremental backup. + type: bool + snapshot_id: + description: + - Snapshot ID of the disk backup. + type: str + support_lld: + description: + - Whether to allow lazyloading for fast restoration. + type: bool + supported_restore_mode: + description: + - Restoration mode. Possible values are na,\ + snapshot, and backup. snapshot indicates the backup\ + can be used to create a full-server image. backup\ + indicates the data is restored from backups of the EVS\ + disks of the server. na indicates the backup cannot be\ + used for restoration. + type: str + os_images_data: + description: + - ID list of images created using backups. + type: list + elements: dict + contains: + image_id: + description: + - Image ID. + type: str + contain_system_disk: + description: + - Whether the VM backup data contains system disk data. + type: bool + encrypted: + description: + - Whether the backup is encrypted. + type: bool + system_disk: + description: + - Whether the disk is a system disk. + type: bool + id: + description: Backup id. + type: str + image_type: + description: Backup type. + type: str + name: + description: Backup name. + type: str + parent_id: + description: Parent backup ID. + type: str + project_id: + description: Project ID. + type: str + protected_at: + description: Backup time. + type: str + resource_az: + description: Resource availability zone. + type: str + resource_id: + description: Resource ID. + type: str + resource_name: + description: Resource name. + type: str + resource_size: + description: Resource size, in GB. + type: str + resource_type: + description: Resource type. + type: str + status: + description: Backup status. + type: str + updated_at: + description: Update time. + type: str + vault_id: + description: Vault id. + type: str + provider_id: + description: Backup provider ID, which is used to distinguish\ + backup objects. The value can be as follows:. + type: str +''' + +EXAMPLES = ''' +# Getting one cbr backup: +- opentelekomcloud.cloud.cbr_backup_info: + name: "name-or-id" + +# Getting cbr backups list for vault: +- opentelekomcloud.cloud.cbr_backup_info: + vault: "name-or-id-vault" +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CBRBackupsModule(OTCModule): + argument_spec = dict( + name=dict(required=False), + checkpoint_id=dict(required=False), + dec=dict(required=False, type='bool'), + end_time=dict(required=False), + image_type=dict(type='str', choices=['backup', 'replication']), + limit=dict(required=False, type='int'), + marker=dict(type='str'), + member_status=dict(required=False, + type='str', choices=['pending', 'accept', 'reject']), + offset=dict(required=False, type='int'), + own_type=dict(required=False, type='str', + choices=['all_granted', 'private', 'shared'], + default='private'), + parent_id=dict(required=False, type='str'), + resource_az=dict(required=False, type='str'), + resource_id=dict(required=False, type='str'), + resource_name=dict(required=False, type='str'), + resource_type=dict(required=False, type='str', + choices=['OS::Cinder::Volume', 'OS::Nova::Server']), + sort=dict(required=False, type='str'), + start_time=dict(required=False, type='str'), + status=dict(required=False, type='str', + choices=['available', 'protecting', 'deleting', + 'restoring', 'error', 'waiting_protect', + 'waiting_delete', 'waiting_restore']), + used_percent=dict(required=False, type='str'), + vault=dict(required=False, type='str') + ) + module_kwargs = dict( + supports_check_mode=True + ) + + def run(self): + data = [] + query = {} + backup = None + + if self.params['name']: + backup = self.conn.cbr.find_backup( + name_or_id=self.params['name']) + self.exit( + changed=False, + backup=backup + ) + if self.params['checkpoint_id']: + query['checkpoint_id'] = self.params['checkpoint_id'] + if self.params['dec']: + query['dec'] = self.params['dec'] + if self.params['end_time']: + query['end_time'] = self.params['end_time'] + if self.params['image_type']: + query['image_type'] = self.params['image_type'] + if self.params['limit']: + query['limit'] = self.params['limit'] + if self.params['marker']: + query['marker'] = self.params['marker'] + if self.params['member_status']: + query['member_status'] = self.params['member_status'] + if self.params['name']: + query['name'] = self.params['name'] + if self.params['offset']: + query['offset'] = self.params['offset'] + if self.params['own_type']: + query['own_type'] = self.params['own_type'] + if self.params['parent_id']: + query['parent_id'] = self.params['parent_id'] + if self.params['resource_az']: + query['resource_az'] = self.params['resource_az'] + if self.params['resource_id']: + query['resource_id'] = self.params['resource_id'] + if self.params['resource_name']: + query['resource_name'] = self.params['resource_name'] + if self.params['resource_type']: + query['resource_type'] = self.params['resource_type'] + if self.params['sort']: + query['sort'] = self.params['sort'] + if self.params['start_time']: + query['start_time'] = self.params['start_time'] + if self.params['status']: + query['status'] = self.params['status'] + if self.params['used_percent']: + query['used_percent'] = self.params['used_percent'] + if self.params['vault']: + vault = self.conn.cbr.find_vault(name_or_id=self.params['vault']) + if not vault: + self.fail_json(msg="Vault not found") + query['vault_id'] = vault.id + + for raw in self.conn.cbr.backups(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + backups=data + ) + + +def main(): + module = CBRBackupsModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cbr_restore_point.py b/plugins/modules/cbr_restore_point.py new file mode 100644 index 00000000..4ba47ed3 --- /dev/null +++ b/plugins/modules/cbr_restore_point.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: cbr_restore_point +short_description: Manage CBR Restore Point +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.12.4" +author: "Gubina Polina (@Polina-Gubina)" +description: + - Manage cbr restore point from the OTC. +options: + vault: + description: + - Vault name or id. + type: str + required: true + auto_trigger: + description: + - Whether automatic triggering is enabled + type: bool + default: False + description: + description: + - Backup description. + type: str + incremental: + description: + - Whether the backup is an incremental backup. + type: bool + name: + description: + - Backup name. + type: str + resources: + description: + - UUID list of resources to be backed up. + type: list + elements: str + resource_details: + description: + - Resource details. + type: list + elements: dict + suboptions: + id: + description: Cloud type, which is public. + type: str + required: true + name: + description: + - Name of the resource to be backed up.\ + The value consists of 0 to 255 characters.. + type: str + type: + description: Cloud type, which is public. + type: str + required: true + choices: ['OS::Nova::Server', 'OS::Cinder::Volume'] +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +checkpoint: + description: Restore point object. + type: complex + returned: On Success. + contains: + created_at: + description: Creation time. + type: str + id: + description: Restore point ID. + type: str + project_id: + description: Project id. + type: str + status: + description: Status. + type: str + vault: + description: Project ID. + type: complex + contains: + id: + description: Vault ID. + type: str + name: + description: Vault name. + type: str + resources: + description: Backup name. + type: dict + contains: + extra_info: + description: Extra information of the resource. + type: str + id: + description: ID of the resource to be backed up. + type: str + name: + description: Name of the resource to be backed up. + type: str + protect_status: + description: Protected status. + type: str + resource_size: + description: Allocated capacity for the associated resource,\ + in GB. + type: str + type: + description: Type of the resource to be backed up. + type: str + backup_size: + description: Backup size. + type: str + backup_count: + description: Number of backups. + type: str + skipped_resources: + description: Backup name. + type: str + contains: + id: + description: Resource ID. + type: str + type: + description: Resource type. + type: str + name: + description: Resource name. + type: str + code: + description: Error code. + type: str + reason: + description: Reason for the skipping. For example,\ + the resource is being backed up. + type: str + extra_info: + description: Vault name. + type: dict + contains: + name: + description: Backup name. + type: str + description: + description: Backup description. + type: str + retention_duration: + description: Number of days that backups can be retained. + type: int +''' + +EXAMPLES = ''' +# Create a restore point +opentelekomcloud.cloud.cbr_restore_point: + vault: "vault-name-or-id" +''' + +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + + +class CBRRestorePointModule(OTCModule): + argument_spec = dict( + vault=dict(required=True, type='str'), + auto_trigger=dict(required=False, type='bool', default=False), + description=dict(required=False, type='str'), + incremental=dict(required=False, type='bool'), + name=dict(type='str', required=False), + resources=dict(type='list', elements='str', required=False), + resource_details=dict(type='list', elements='dict', required=False, + options=dict( + id=dict(required=True, type='str'), + name=dict(required=False, type='str'), + type=dict(required=True, type='str', + choices=['OS::Nova::Server', + 'OS::Cinder::Volume']) + ) + ) + ) + + def _parse_resource_details(self): + resource_details = self.params['resource_details'] + parsed_resource_details = [] + for rd in resource_details: + parsed_rd = {} + parsed_rd['id'] = rd.get('id')\ + if rd.get('id') else self.fail_json(msg="'id' is required for 'resource_details'") + parsed_rd['type'] = rd.get('type')\ + if rd.get('type') else self.fail_json(msg="'type' is required for 'resource_details'") + if rd.get('name'): + parsed_rd['name'] = rd.get('name') + parsed_resource_details.append(parsed_rd) + return parsed_resource_details + + def run(self): + attrs = {} + vault_id = self.conn.cbr.find_vault(name_or_id=self.params['vault']).id + attrs['vault_id'] = vault_id + if self.params['auto_trigger']: + attrs['auto_trigger'] = self.params['auto_trigger'] + if self.params['description']: + attrs['description'] = self.params['description'] + if self.params['incremental']: + attrs['incremental'] = self.params['incremental'] + if self.params['name']: + attrs['name'] = self.params['name'] + if self.params['resources']: + attrs['resources'] = self.params['resources'] + if self.params['resource_details']: + attrs['resource_details'] = self._parse_resource_details() + + checkpoint = self.conn.cbr.create_checkpoint(**attrs) + self.exit(changed=True, checkpoint=checkpoint) + + +def main(): + module = CBRRestorePointModule() + module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cbr_vault.py b/plugins/modules/cbr_vault.py index 32272bfb..284c3f1e 100644 --- a/plugins/modules/cbr_vault.py +++ b/plugins/modules/cbr_vault.py @@ -279,7 +279,7 @@ elements: dict contains: key: - description: Key.. + description: Key. type: str value: description: Value. diff --git a/tests/integration/targets/cbr/tasks/main.yaml b/tests/integration/targets/cbr/tasks/main.yaml index 79614628..fe2491fc 100644 --- a/tests/integration/targets/cbr/tasks/main.yaml +++ b/tests/integration/targets/cbr/tasks/main.yaml @@ -76,6 +76,65 @@ - vault is success - vault is changed + - name: Create a restore point + opentelekomcloud.cloud.cbr_restore_point: + vault: "{{ vault.vault.id }}" + register: checkpoint + + - name: assert result + ansible.builtin.assert: + that: + - checkpoint is success + - checkpoint is changed + + - name: Getting backup list + opentelekomcloud.cloud.cbr_backup_info: + vault: "{{ vault.vault.id }}" + register: backups_list + + - name: assert result + ansible.builtin.assert: + that: + - backups_list is success + - backups_list['backups']|length == 1 + + - name: Get backup + opentelekomcloud.cloud.cbr_backup_info: + name: "{{ backups_list.backups[0].id }}" + register: backup + + - name: assert result + ansible.builtin.assert: + that: + - backup is success + - backup.backup.id is defined + + - name: Restore backup + opentelekomcloud.cloud.cbr_backup: + name: "{{ backup.backup.id }}" + volume_id: "{{ vol1.volume.id }}" + register: restore_backup + + - name: assert result + ansible.builtin.assert: + that: + - restore_backup is success + - restore_backup is changed + + - name: Delete backup + opentelekomcloud.cloud.cbr_backup: + name: "{{ backup.backup.id }}" + state: absent + wait: true + timeout: 600 + register: delete_backup + + - name: assert result + ansible.builtin.assert: + that: + - delete_backup is success + - delete_backup is changed + always: - name: Cleanup block: diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index baae0efa..c6bf52f4 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -8,6 +8,9 @@ plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_backup.py validate-modules:missing-gplv3-license +plugins/modules/cbr_backup_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_restore_point.py validate-modules:missing-gplv3-license plugins/modules/cbr_vault.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_cert_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 5d8a4e04..5d82fd88 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -8,6 +8,9 @@ plugins/modules/as_policy.py validate-modules:missing-gplv3-license plugins/modules/as_policy_info.py validate-modules:missing-gplv3-license plugins/modules/as_quota_info.py validate-modules:missing-gplv3-license plugins/modules/availability_zone_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_backup.py validate-modules:missing-gplv3-license +plugins/modules/cbr_backup_info.py validate-modules:missing-gplv3-license +plugins/modules/cbr_restore_point.py validate-modules:missing-gplv3-license plugins/modules/cbr_vault.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster.py validate-modules:missing-gplv3-license plugins/modules/cce_cluster_node.py validate-modules:missing-gplv3-license From 64d5cdbd1779f2cbc82170a9edef2584d7ed1ab2 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Fri, 29 Jul 2022 10:02:46 +0300 Subject: [PATCH 60/65] set attr version to optional and delete defaul value for datastore (#212) Fix rds_flavor_info module This pr based on issue #210: "Version" attr set to optional For "datastore" attr default value has been deleted Response field has been completed Reviewed-by: Anton Sidelnikov Reviewed-by: Vladimir Vshivkov --- plugins/modules/rds_flavor_info.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/plugins/modules/rds_flavor_info.py b/plugins/modules/rds_flavor_info.py index 251cada3..36341bc8 100644 --- a/plugins/modules/rds_flavor_info.py +++ b/plugins/modules/rds_flavor_info.py @@ -28,12 +28,10 @@ description: - Name of the database (datastore type). choices: [mysql, postgresql, sqlserver] - default: postgresql type: str version: description: - Datastore version - required: true type: str instance_mode: description: @@ -43,7 +41,6 @@ requirements: ["openstacksdk", "otcextensions"] ''' -# TODO: describe proper output RETURN = ''' rds_flavors: description: List of dictionaries describing RDS flavors @@ -58,6 +55,18 @@ description: Name (version) of the datastore. type: str sample: "10" + ram: + description: Quantity of RAM Gigabytes + type: int + sample: 128 + spec_code: + description: Name of the flavor specification + type: str + sample: "rds.mysql.c3.15xlarge.2.ha" + vcpus: + description: Quantity of available virtual CPUs + type: str + sample: "60" ''' EXAMPLES = ''' @@ -75,9 +84,8 @@ class RdsFlavorModule(OTCModule): argument_spec = dict( name=dict(required=False), - datastore=dict(choices=['mysql', 'postgresql', 'sqlserver'], - default='postgresql'), - version=dict(required=True), + datastore=dict(choices=['mysql', 'postgresql', 'sqlserver']), + version=dict(required=False), instance_mode=dict(choices=['single', 'replica', 'ha']) ) module_kwargs = dict( From c44a06c6470bd8b093e56fd7b45a2a235f0c62ad Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:44:15 +0300 Subject: [PATCH 61/65] Modules examples (#198) Modules examples Base operations with some infrastructure entities with short explanation and tips Reviewed-by: Anton Sidelnikov Reviewed-by: None Reviewed-by: Vladimir Vshivkov --- doc/examples/10_relation_database.yaml | 68 +++++++++++ doc/examples/1_initial_infra.yaml | 111 ++++++++++++++++++ .../2_volume_backup_and_maintenance.yaml | 30 +++++ doc/examples/3_autoscaling.yaml | 85 ++++++++++++++ doc/examples/4_as_policy.yaml | 64 ++++++++++ doc/examples/5_cce_cluster.yaml | 111 ++++++++++++++++++ doc/examples/6_loadbalancer.yaml | 77 ++++++++++++ doc/examples/7_loadbalancer_info.yaml | 31 +++++ doc/examples/8_monitoring.yaml | 110 +++++++++++++++++ doc/examples/9_cloud_search.yaml | 47 ++++++++ 10 files changed, 734 insertions(+) create mode 100644 doc/examples/10_relation_database.yaml create mode 100644 doc/examples/1_initial_infra.yaml create mode 100644 doc/examples/2_volume_backup_and_maintenance.yaml create mode 100644 doc/examples/3_autoscaling.yaml create mode 100644 doc/examples/4_as_policy.yaml create mode 100644 doc/examples/5_cce_cluster.yaml create mode 100644 doc/examples/6_loadbalancer.yaml create mode 100644 doc/examples/7_loadbalancer_info.yaml create mode 100644 doc/examples/8_monitoring.yaml create mode 100644 doc/examples/9_cloud_search.yaml diff --git a/doc/examples/10_relation_database.yaml b/doc/examples/10_relation_database.yaml new file mode 100644 index 00000000..0e2fd95b --- /dev/null +++ b/doc/examples/10_relation_database.yaml @@ -0,0 +1,68 @@ +--- + +# First of all, let's choose type and version of DB of an RDS instance. For example, we want it +# to be a MySQL (besides that you can choose postgresql or sqlserver on Microsoft) in HA (or single or replica) mode +- name: Get info about choosen type of DB + opentelekomcloud.cloud.rds_flavor_info: + datastore: "mysql" + instance_mode: "ha" + register: rds_flavors + +# In this debug you can see all the flavors of the chosen DB type, and now you can decide what +# flavor exactly fits your needs +- name: debug + ansible.builtin.debug: + msg: "{{ rds_flavors.rds_flavors[0].name }}" + +# Now let's create RDS instance. You can locate it in two or more availability zones. +# Password you pass to the module handles in secure mode: this means that it won't be shown in +# module's output. Please pay attention that automatic backup strategy is setting here, too. +# Attribute 'cmk_id' needed for system encryption, has been created beforehand. +- name: Create RDS instance + opentelekomcloud.cloud.rds_instance: + name: "{{ rds_instance_name }}" + state: present + region: "eu-de" + availability_zone: "eu-de-01,eu-de-02" + datastore_type: "mysql" + datastore_version: "8.0" + flavor: "{{ rds_flavors.rds_flavors[0].name }}" + ha_mode: "semisync" + router: "{{ router }}" + network: "{{ network_id }}" + port: 8080 + security_group: "{{ secgroup_id }}" + password: "{{ password }}" + volume_type: "ultrahigh" + volume_size: 40 + disk_encryption: "{{ cmk_id }}" + backup_keepdays: 1 + backup_timeframe: "02:00-03:00" + wait: True + timeout: 777 + register: rds + +# With this info module you can get info about your instance +- name: Let's get info about whole RDS instance + opentelekomcloud.cloud.rds_instance_info: + name: "{{ rds.instance.name }}" + +- name: Let's get info about datastore + opentelekomcloud.cloud.rds_datastore_info: + name: "{{ rds.instance.id }}" + +- name: Now create backup of the created instance + opentelekomcloud.cloud.rds_backup: + instance: "{{ rds.instance.id }}" + name: "{{ rds_backup_name }}" + state: present + description: "Backup of the RDS instance" + wait: True + register: rds_bckp + +# Queirying RDS backup info. You can use any of specified attributes, together or separetely. +- name: Get RDS backup info + opentelekomcloud.cloud.rds_backup_info: + instance: "{{ rds.instance.id }}" + backup: "{{ rds_bckp.backup.id }}" + backup_type: "{{ rds_bckp.backup.type }}" diff --git a/doc/examples/1_initial_infra.yaml b/doc/examples/1_initial_infra.yaml new file mode 100644 index 00000000..e201b9f5 --- /dev/null +++ b/doc/examples/1_initial_infra.yaml @@ -0,0 +1,111 @@ +--- + +# First, we need to create ecosystem for further infrastructure. Its include network entities, such +# VPC and subnet, security group and couple of ECSs. +- name: Create VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + cidr: "10.10.0.0/24" + state: present + register: newvpc + tags: + - vpc + +# Please pay attention on CIDR block: in case of insufficient numbers of available hosts there +# could be errors in autoscaling groups behavior +- name: Create subnet for VPC + opentelekomcloud.cloud.subnet: + name: "{{ vpc_subnet_name }}" + vpc: "{{ vpc_name }}" + cidr: "10.10.0.0/27" + gateway_ip: "10.10.0.1" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + register: sn + tags: + - subnet + +# There are a few mismatches in resources logic and naming between native Openstack and +# Opentelekomcloud. To make it clear we placed examples using native Openstack resources. +# +# - name: Create network. In Open Telekom Cloud infrastructure this entity is hidden inside +# Subnet summary, and isn't create separately, but only querying from the existing Subnet. +# openstack.cloud.os_network: +# name: "{{ network_name }}" +# state: present +# register: network +# +# - name: Create subnet. Openstack's Subnet is equal Open Telekom Cloud Subnet. +# openstack.cloud.os_subnet: +# name: "{{ subnet_name }}" +# state: present +# network_name: "{{ network.network.name }}" +# cidr: "192.168.110.0/24" +# dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" +# register: subnet +# +# - name: Create router. In Open Telekom Cloud terms it's a VPC. Please pay attention that +# Network argument here is not an Network created on previous step, but constanta for OTC. +# openstack.cloud.os_router: +# name: "{{ router_name }}" +# state: present +# network: admin_external_net +# enable_snat: true +# interfaces: +# - net: "{{ network.network.name }}" +# subnet: "{{ subnet.subnet.name }}" +# register: router + +# Exclusive mode guarantee that only explicitly passed rules are will take effect, and all of the +# existing before will be deleted. To disable this behavior set Exclusive option as False +- name: Create new security group + opentelekomcloud.cloud.security_group: + state: present + name: "{{ security_group_name }}" + description: "Security group for testing purposes" + security_group_rules: + - direction: "egress" + ethertype: "IPv4" + protocol: "tcp" + - direction: "egress" + ethertype: "IPv6" + - direction: "ingress" + ethertype: "IPv4" + protocol: "tcp" + port_range_max: 22 + port_range_min: 22 + exclusive: True + register: secgroup + tags: + - security_group + +- name: Create first ECS and attach it to the resources + openstack.cloud.server: + name: "{{ ecs1_name }}" + image: "{{ ecs_image }}" + network: "{{ newvpc.vpc.id }}" + flavor: "s3.medium.1" + availability_zone: "eu-de-01" + volume_size: 6 + security_groups: "{{ security_group_name }}" + auto_ip: no + state: present + register: ecs1 + tags: + - server1 + +- name: Create second ECS and attach it to the resources + openstack.cloud.server: + name: "{{ ecs2_name }}" + image: "{{ ecs_image }}" + network: "{{ newvpc.vpc.id }}" + flavor: "s3.medium.1" + availability_zone: "eu-de-01" + volume_size: 6 + security_groups: "{{ security_group_name }}" + auto_ip: no + state: present + register: ecs2 + tags: + - server2 diff --git a/doc/examples/2_volume_backup_and_maintenance.yaml b/doc/examples/2_volume_backup_and_maintenance.yaml new file mode 100644 index 00000000..db705a43 --- /dev/null +++ b/doc/examples/2_volume_backup_and_maintenance.yaml @@ -0,0 +1,30 @@ +--- + +# You're able to backup both types of disks: system and additionally attached. Cloud Server +# Backups will be cover in a next examples +- name: Create a backup of the system volume + opentelekomcloud.cloud.volume_backup: + display_name: "{{ backup_name }}" + display_description: "Full backup of the test instance" + state: absent + volume: "{{ ecs_1_vol }}" + force: True + wait: yes + timeout: 123 + register: bckp + tags: + - volume_backup + +- name: Let's check whether we have a backup of the ECS volume + opentelekomcloud.cloud.volume_backup_info: + volume: "{{ ecs_1_vol }}" + tags: backup_info + +# Snapshot is mandatory for any kind of backup, both full or incremental. If there are no any +# backups created before, and current backup is the first one for this volume, snapshot will be +# create automatically. +- name: Check if we have a snapshot + opentelekomcloud.cloud.volume_snapshot_info: + name: "yet_another**" + tags: + - snapshot_info diff --git a/doc/examples/3_autoscaling.yaml b/doc/examples/3_autoscaling.yaml new file mode 100644 index 00000000..7dc2334a --- /dev/null +++ b/doc/examples/3_autoscaling.yaml @@ -0,0 +1,85 @@ +--- + +# Keypair is mandatory condition for creating and modifying AS configurations and groups. Be avoid +# of accidental deleting of this entity, because in this case you'll lost control on your AS +# entities. +- name: Create new keypair for accessing AS config + openstack.cloud.keypair: + name: "{{ keypair_name }}" + register: kp + tags: + - create_keypair + +# You're able to create a new AS config based on existing ECS, using it as a template. For this, +# point ECS's id as a parameter. Here is example of a new AS config, taken from scratch. +- name: Create new AS config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ as_new_config_name }}" + key_name: "{{ keypair_name }}" + image: "Standard_CentOS_7_latest" + flavor: "s3.medium.1" + disk: + - size: 10 + volume_type: 'SAS' + disk_type: 'SYS' + register: as_config_new + tags: + - create_as_config + +# Please pay attention to numbers of desiring instances. It should fall within range given in CIDR +# block of attaching subnet. Router parameter points to VPC ID. +- name: Create AS Group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + scaling_configuration: "{{ as_config_new.as_config.name }}" + min_instance_number: 0 + desire_instance_number: 2 + max_instance_number: 4 + availability_zones: ["eu-de-01"] + networks: [{"id": "{{ network_id }}"}] + security_groups: [{"id": "{{ secgroup_id }}"}] + router: "{{ router }}" + delete_publicip: true + delete_volume: true + action: "resume" + state: "present" + wait: true + timeout: 400 + register: as_group + tags: + - create_as_group + +- name: Rename AS group + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + name: "{{ new_as_group_name }}" + max_instance_number: 4 + register: as_group_new + +- name: Get list of AS instances using AS group id + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group_new.as_group.id }}" + register: as_inst_list + tags: + - get_list + +# Besides creating instances directly from AS group module, you can add already existing ECSs to the +# AS group. Please pay attention that instances to be added must be in the same AZ as AS group. +- name: Add AS instances to the AS group + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group_new.as_group.id }}" + scaling_instances: + - "{{ ecs1.server.id }}" + - "{{ ecs2.server.id }}" + action: "add" + state: present + register: as_instances + tags: + - add_instances + +- name: Get list of AS Instances after adding new instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list_af diff --git a/doc/examples/4_as_policy.yaml b/doc/examples/4_as_policy.yaml new file mode 100644 index 00000000..04b8a06b --- /dev/null +++ b/doc/examples/4_as_policy.yaml @@ -0,0 +1,64 @@ +--- + +# There are many services interconnected with Cloud eye. All the services are logically united +# into groups named Namespaces. Every Namespace supports plenty of metrics, and each of them can +# be monitored. Besides particular metric you want to check up, you need to know Dimension - +# this entity pecifies the metric dimension of the selected resource type. In this +# example we want to monitor inbound bandwidth of public IP connected to our VPC. So first of +# all we will assign a new public IP for further monitoring. +- name: Assign Floating IP + opentelekomcloud.cloud.floating_ip: + network: admin_external_net + register: fl + +# First we need to create an Alarm, which will be included in AS Policy. +- name: Create Cloud Eye Alarm + opentelekomcloud.cloud.ces_alarms: + alarm_name: "{{ alarm_name }}" + state: present + metric: + namespace: "SYS.VPC" + dimensions: + - name: "publicip_id" + value: "{{ fl.floating_ip.id }}" + metric_name: "down_stream" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 6 + unit: "B" + count: 1 + alarm_enabled: true + alarm_action_enabled: false + register: alarm + +- name: Create AS policy + opentelekomcloud.cloud.as_policy: + scaling_group: "{{ as_group_name }}" + scaling_policy: "{{ as_policy_name }}" + scaling_policy_type: "alarm" + alarm: "{{ alarm_name }}" + state: "present" + register: as_policy + +- name: Get list of AS Policies + opentelekomcloud.cloud.as_policy_info: + scaling_group: "{{ as_group_name }}" + register: as_policies + +- name: Update AS policy (add scaling_policy_action) + opentelekomcloud.cloud.as_policy: + scaling_group: "{{ as_group_name }}" + scaling_policy: "{{ as_policy_name }}" + scaling_policy_type: "alarm" + alarm: "{{ alarm_name }}" + state: "present" + scaling_policy_action: + operation: "add" + instance_number: 1 + register: as_policy + +- name: Check AS group quotas + opentelekomcloud.cloud.as_quota_info: + scaling_group_id: "{{ scaling_group_id }}" diff --git a/doc/examples/5_cce_cluster.yaml b/doc/examples/5_cce_cluster.yaml new file mode 100644 index 00000000..ce234350 --- /dev/null +++ b/doc/examples/5_cce_cluster.yaml @@ -0,0 +1,111 @@ +--- +# Let's create a Cloud Container Engine cluster and attach it to the previously deployed +# infrastructure. +- name: Create CCE Cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + description: "Cloud Container Engine test cluster" + type: "virtualmachine" + version: "v1.21" + flavor: "{{ cce_flavor }}" + authentication_mode: "rbac" + kube_proxy_mode: "iptables" + router: "{{ router }}" + network: "{{ network_id }}" + container_network_mode: "{{ container_network_mode }}" + container_network_cidr: "10.0.0.0/16" + availability_zone: "multi_az" + state: present + register: cluster + +- name: Get info about cluster certificate + opentelekomcloud.cloud.cce_cluster_cert_info: + cluster: "{{ cluster.cce_cluster.id }}" + +# After cluster creating is finished, you should create a node pool, which will contain some number +# of working nodes. +- name: Create node pool + opentelekomcloud.cloud.cce_node_pool: + name: "{{ node_pool_name }}" + availability_zone: "eu-de-01" + autoscaling_enabled: False + cluster: "{{ cluster.cce_cluster.id }}" + data_volumes: + - volumetype: "SSD" + size: 120 + - volumetype: "SATA" + size: 100 + encrypted: False + flavor: "{{ node_flavor }}" + initial_node_count: 0 + k8s_tags: + mytag: "myvalue" + mysecondtag: "mysecondvalue" + min_node_count: 1 + max_node_count: 3 + network: "{{ network_id }}" + priority: 2 + os: "{{ os_cluster_name }}" + ssh_key: "{{ keypair_name }}" + tags: + - key: "my_first_key" + value: "my_first_value" + - key: "my_second_key" + value: "my_secound_value" + taints: + - key: "first_taint_key" + value: "first_taint_value" + effect: "NoSchedule" + - key: "second_taint_key" + value: "second_taint_value" + effect: "NoExecute" + state: present + register: pool + +# Now you can add to the node pool nodes. Please pay attention that AZ of these nodes must be +# equal to node pool's AZ. +- name: Create CCE Cluster Node + opentelekomcloud.cloud.cce_cluster_node: + annotations: + annotation1: "Test cluster nodes" + availability_zone: "eu-de-01" + cluster: "{{ cce_cluster_name }}" + count: 1 + data_volumes: + - volumetype: "SATA" + size: 100 + encrypted: false + - volumetype: "SAS" + size: 120 + flavor: "{{ node_flavor }}" + k8s_tags: + testtag: "value" + ssh_key: "{{ keypair_name }}" + labels: + mein: "label" + max_pods: 16 + name: "{{ cce_node_name }}" + network: "{{ network_id }}" + os: "{{ os_cluster_name }}" + root_volume_size: 40 + root_volume_type: SATA + tags: + - key: "key1" + value: "value1" + - key: "key2" + value: "value2" + wait: true + state: present + register: node + +- name: Get info about cluster + opentelekomcloud.cloud.cce_cluster_info: + name: "{{ cluster.cce_cluster.id }}" + +- name: Get info about node pool + opentelekomcloud.cloud.cce_node_pool_info: + cce_cluster: "{{ cluster.cce_cluster.id }}" + +- name: Get info about cluster nodes + opentelekomcloud.cloud.cce_cluster_node_info: + cce_cluster: "{{ cluster.cce_cluster.id }}" diff --git a/doc/examples/6_loadbalancer.yaml b/doc/examples/6_loadbalancer.yaml new file mode 100644 index 00000000..f3545568 --- /dev/null +++ b/doc/examples/6_loadbalancer.yaml @@ -0,0 +1,77 @@ +--- +# This loadbalancer would be attached to the couple of ECSs united into one backend server group. +- name: Create loadbalancer for cluster + opentelekomcloud.cloud.loadbalancer: + state: present + auto_public_ip: True + name: "{{ lb_name }}" + vip_subnet: "{{ vpc_subnet_name }}" + register: lb + +# This module just integrate public and private keys into certificate for listener. Both public +# and private keys you should obtain before on third-party resources, for instance, Letsencrypt, +# and put in available for Ansible engine place. +- name: Create certificate for HTTPS connections + opentelekomcloud.cloud.lb_certificate: + name: "elb_https_cert" + type: "server" + content: "/home/user/files/rootCA.pem" + private_key: "/home/user/files/rootCA.key" + register: elb_cert + +# For every type of protocol you can create its own listener. In case of HTTP listener, please pay +# attention on your subnet addresses pool, it must be sufficient for all the instances including +# listener itself. +- name: Create listener for HTTPS traffic + opentelekomcloud.cloud.lb_listener: + name: "{{ listener_https_name }}" + protocol: terminated_https + protocol_port: 443 + loadbalancer: "{{ lb.loadbalancer.id }}" + default_tls_container_ref: "{{ elb_cert.elb_certificate.id }}" + register: listener_https + +# This backend server group will contain multiple ECSs. Here we use roundrobin algorithm, as we use http protocol, but you can choose source_ip or least_connection +- name: Create backend server group + opentelekomcloud.cloud.lb_pool: + state: present + name: "{{ backend_server_name }}" + protocol: http + lb_algorithm: round_robin + listener: "{{ listener_https }}" + loadbalancer: "{{ lb.loadbalancer.id }}" + register: backend + +- name: Add first to the backend server group + opentelekomcloud.cloud.lb_member: + name: "{{ ecs1_name }}" + address: "10.10.0.18" + protocol_port: 443 + subnet: "{{ vpc_subnet_name }}" + pool: "{{ backend.server_group.id }}" + register: bcknd_1 + +- name: Add second server to the backend server group + opentelekomcloud.cloud.lb_member: + name: "{{ ecs2_name }}" + address: "10.10.0.23" + protocol_port: 443 + subnet: "{{ vpc_subnet_name }}" + pool: "{{ backend_group_id }}" + register: bcknd_2 + +# After setting up an backend server group, it's highly recommend that you attach health check +# monitoring to it. +- name: Add HTTPS health check for the backend server group + opentelekomcloud.cloud.lb_healthmonitor: + name: "{{ health_https_name }}" + state: present + delay: 9 + max_retries: 3 + pool: "{{ backend_group_id }}" + monitor_timeout: 5 + type: http + monitor_port: 443 + expected_codes: 200 + http_method: get + register: https_health diff --git a/doc/examples/7_loadbalancer_info.yaml b/doc/examples/7_loadbalancer_info.yaml new file mode 100644 index 00000000..50761864 --- /dev/null +++ b/doc/examples/7_loadbalancer_info.yaml @@ -0,0 +1,31 @@ +--- +# This playbook contains examples with info modules related with load balancer infrastructure. +- name: Get info about specified load balancer + opentelekomcloud.cloud.loadbalancer_info: + name: "{{ lb_name }}" + register: lb_info + +- name: Get info about specified certificate + opentelekomcloud.cloud.lb_certificate: + name: "elb_https_cert" + register: elb_cert_info + +- name: Get info about specified litener + opentelekomcloud.cloud.lb_listener_info: + name: "{{ listener_https_name }}" + register: listener_https_info + +- name: Get info about specified backend server group + opentelekomcloud.cloud.lb_pool: + name: "{{ backend_server_name }}" + register: backend_group_info + +- name: Get info about specified pool members + opentelekomcloud.cloud.lb_member_info: + pool: "{{ backend_server_name }}" + register: bcknd_members_info + +- name: Get info about health checks for HTTP protocol + opentelekomcloud.cloud.lb_healthmonitor_info: + type: http + register: https_health_info diff --git a/doc/examples/8_monitoring.yaml b/doc/examples/8_monitoring.yaml new file mode 100644 index 00000000..e0d8d9f9 --- /dev/null +++ b/doc/examples/8_monitoring.yaml @@ -0,0 +1,110 @@ +--- + +# Now we'll create several alarms to watch our infrastructure. Mind that 'alarm_name' is given by +# user, and 'dimensions' name and 'metric_name' are embedded and constant for each kind of resource, and can be +# taken from user's guide on docs portal here https://docs.otc.t-systems.com/usermanual/ces/en-us_topic_0202622212.html +# SMN topic here has been created beforehand. +- name: Create alarm for ECS CPU utilization + opentelekomcloud.cloud.ces_alarms: + alarm_name: "ecs1_cpu_load" + state: present + metric: + namespace: "SYS.ECS" + dimensions: + - name: "instance_id" + value: "{{ ecs_1_id }}" + metric_name: "CPU_usage" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 + alarm_enabled: true + alarm_action_enabled: False + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: ecs_cpu_alarm + +# As we're watching ECS, 'namespace' attribute is the same, but 'metric_name' is different. +- name: Create alarm for ECS CPU and memory usage + opentelekomcloud.cloud.ces_alarms: + alarm_name: "ecs1_mem_util" + state: present + metric: + namespace: "SYS.ECS" + dimensions: + - name: "instance_id" + value: "{{ ecs_1_id }}" + metric_name: "Memory_usage" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 + alarm_enabled: true + alarm_action_enabled: True + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: ecs_mem_alarm + +# Let's set up alarm for upstream bandwidth for ELB. +- name: Create watchdog alarm for Load Balancer + opentelekomcloud.cloud.ces_alarms: + alarm_name: "lb_watchdog" + state: present + metric: + namespace: "SYS.ELB" + dimensions: + - name: "lbaas_instance_id" + value: "{{ elb_id }}" + metric_name: "m16_l7_upstream_5xx" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 5 + unit: "Count/s" + count: 1 + alarm_enabled: true + alarm_action_enabled: True + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: elb_5xx_alarm + +# Here type of 'alarm_actions' has been switched to 'autoscaling'. In this case you should set +# field 'notificationList' to empty list. +- name: Create load alarm for Auto Scaling Group to adjust number of instances + opentelekomcloud.cloud.ces_alarms: + alarm_name: "as_load" + state: present + metric: + namespace: "SYS.AS" + dimensions: + - name: "AutoScalingGroup" + value: "{{ as_group_name }}" + metric_name: "mem_util" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 2 + alarm_enabled: true + alarm_action_enabled: True + alarm_actions: + - type: "autoscaling" + notificationList: [] + register: as_mem_alarm + +- name: Get Alarm Infos + opentelekomcloud.cloud.ces_alarms_info: + name: "{{ alarm_name }}" + register: ces_al_info diff --git a/doc/examples/9_cloud_search.yaml b/doc/examples/9_cloud_search.yaml new file mode 100644 index 00000000..fec761b1 --- /dev/null +++ b/doc/examples/9_cloud_search.yaml @@ -0,0 +1,47 @@ +--- + +# Here we'll create Cloud Search cluster contained 1 node. Attribute 'cmk_id' is the Master +# Key, which encrypts system. This attribute has been created beforehand. Please pay attention that +# backup strategy is setting up also in this module. +- name: Create Cloud Search cluster + opentelekomcloud.cloud.css_cluster: + name: "{{ css_cluster_name }}" + state: present + flavor: "{{ css_flavour }}" + instance_num: 1 + datastore_version: "7.6.2" + datastore_type: "elasticsearch" + volume_type: "common" + volume_size: 40 + system_encrypted: 1 + system_cmkid: "{{ cmk_id }}" + https_enable: False + authority_enable: False + admin_pwd: "{{ password }}" + router: "{{ router }}" + net: "{{ network_id }}" + security_group: "{{ secgroup_id }}" + backup_period: "00:00 GMT+03:00" + backup_prefix: "yetanother" + backup_keepday: 1 + register: css_cluster + +- name: Get info about created cluster + opentelekomcloud.cloud.css_cluster_info: + name: "{{ css_cluster.id }}" + register: css_info + +# By default, data of all indices is backed up. You can use the asterisk (*) to back up data of +# certain indices. +- name: Create snapshot of the cluster + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster.id }}" + name: "{{ css_snapshot_name }}" + description: "Example snapshot of the CSS cluster" + state: present + indices: "yetanother*" + register: css_snapshot + +- name: Get info about CSS snapshot + opentelekomcloud.cloud.css_snapshot_info: + cluster: "{{ css_cluster.id }}" From 4bffedabe0c5377423ab0a4f0f273f1c951ad393 Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov <53078276+anton-sidelnikov@users.noreply.github.com> Date: Tue, 2 Aug 2022 11:23:58 +0300 Subject: [PATCH 62/65] kms info module (#217) kms info module Simple kms info module Reviewed-by: Vladimir Vshivkov Reviewed-by: Artem Lifshits Reviewed-by: None --- doc/examples/10_relation_database.yaml | 4 +- doc/examples/1_initial_infra.yaml | 6 +- .../2_volume_backup_and_maintenance.yaml | 4 +- doc/examples/3_autoscaling.yaml | 4 +- doc/examples/5_cce_cluster.yaml | 4 +- doc/examples/6_loadbalancer.yaml | 5 +- doc/examples/8_monitoring.yaml | 56 +++---- doc/examples/9_cloud_search.yaml | 4 +- meta/runtime.yml | 1 + plugins/modules/kms_info.py | 148 ++++++++++++++++++ .../targets/kms_info/tasks/main.yaml | 13 ++ tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + 13 files changed, 208 insertions(+), 43 deletions(-) create mode 100644 plugins/modules/kms_info.py create mode 100644 tests/integration/targets/kms_info/tasks/main.yaml diff --git a/doc/examples/10_relation_database.yaml b/doc/examples/10_relation_database.yaml index 0e2fd95b..dca6a731 100644 --- a/doc/examples/10_relation_database.yaml +++ b/doc/examples/10_relation_database.yaml @@ -38,7 +38,7 @@ disk_encryption: "{{ cmk_id }}" backup_keepdays: 1 backup_timeframe: "02:00-03:00" - wait: True + wait: true timeout: 777 register: rds @@ -57,7 +57,7 @@ name: "{{ rds_backup_name }}" state: present description: "Backup of the RDS instance" - wait: True + wait: true register: rds_bckp # Queirying RDS backup info. You can use any of specified attributes, together or separetely. diff --git a/doc/examples/1_initial_infra.yaml b/doc/examples/1_initial_infra.yaml index e201b9f5..6dd36935 100644 --- a/doc/examples/1_initial_infra.yaml +++ b/doc/examples/1_initial_infra.yaml @@ -75,7 +75,7 @@ protocol: "tcp" port_range_max: 22 port_range_min: 22 - exclusive: True + exclusive: true register: secgroup tags: - security_group @@ -89,7 +89,7 @@ availability_zone: "eu-de-01" volume_size: 6 security_groups: "{{ security_group_name }}" - auto_ip: no + auto_ip: false state: present register: ecs1 tags: @@ -104,7 +104,7 @@ availability_zone: "eu-de-01" volume_size: 6 security_groups: "{{ security_group_name }}" - auto_ip: no + auto_ip: false state: present register: ecs2 tags: diff --git a/doc/examples/2_volume_backup_and_maintenance.yaml b/doc/examples/2_volume_backup_and_maintenance.yaml index db705a43..ce1a915b 100644 --- a/doc/examples/2_volume_backup_and_maintenance.yaml +++ b/doc/examples/2_volume_backup_and_maintenance.yaml @@ -8,8 +8,8 @@ display_description: "Full backup of the test instance" state: absent volume: "{{ ecs_1_vol }}" - force: True - wait: yes + force: true + wait: true timeout: 123 register: bckp tags: diff --git a/doc/examples/3_autoscaling.yaml b/doc/examples/3_autoscaling.yaml index 7dc2334a..262d952f 100644 --- a/doc/examples/3_autoscaling.yaml +++ b/doc/examples/3_autoscaling.yaml @@ -71,8 +71,8 @@ opentelekomcloud.cloud.as_instance: scaling_group: "{{ as_group_new.as_group.id }}" scaling_instances: - - "{{ ecs1.server.id }}" - - "{{ ecs2.server.id }}" + - "{{ ecs1.server.id }}" + - "{{ ecs2.server.id }}" action: "add" state: present register: as_instances diff --git a/doc/examples/5_cce_cluster.yaml b/doc/examples/5_cce_cluster.yaml index ce234350..c07d2d73 100644 --- a/doc/examples/5_cce_cluster.yaml +++ b/doc/examples/5_cce_cluster.yaml @@ -28,14 +28,14 @@ opentelekomcloud.cloud.cce_node_pool: name: "{{ node_pool_name }}" availability_zone: "eu-de-01" - autoscaling_enabled: False + autoscaling_enabled: false cluster: "{{ cluster.cce_cluster.id }}" data_volumes: - volumetype: "SSD" size: 120 - volumetype: "SATA" size: 100 - encrypted: False + encrypted: false flavor: "{{ node_flavor }}" initial_node_count: 0 k8s_tags: diff --git a/doc/examples/6_loadbalancer.yaml b/doc/examples/6_loadbalancer.yaml index f3545568..6d3de676 100644 --- a/doc/examples/6_loadbalancer.yaml +++ b/doc/examples/6_loadbalancer.yaml @@ -3,7 +3,7 @@ - name: Create loadbalancer for cluster opentelekomcloud.cloud.loadbalancer: state: present - auto_public_ip: True + auto_public_ip: true name: "{{ lb_name }}" vip_subnet: "{{ vpc_subnet_name }}" register: lb @@ -31,7 +31,8 @@ default_tls_container_ref: "{{ elb_cert.elb_certificate.id }}" register: listener_https -# This backend server group will contain multiple ECSs. Here we use roundrobin algorithm, as we use http protocol, but you can choose source_ip or least_connection +# This backend server group will contain multiple ECSs. +# Here we use roundrobin algorithm, as we use http protocol, but you can choose source_ip or least_connection. - name: Create backend server group opentelekomcloud.cloud.lb_pool: state: present diff --git a/doc/examples/8_monitoring.yaml b/doc/examples/8_monitoring.yaml index e0d8d9f9..296c81f6 100644 --- a/doc/examples/8_monitoring.yaml +++ b/doc/examples/8_monitoring.yaml @@ -15,14 +15,14 @@ value: "{{ ecs_1_id }}" metric_name: "CPU_usage" condition: - period: 300 - filter: average - comparison_operator: ">=" - value: 50 - unit: "Percent" - count: 1 + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 alarm_enabled: true - alarm_action_enabled: False + alarm_action_enabled: false alarm_actions: - type: "notification" notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" @@ -40,14 +40,14 @@ value: "{{ ecs_1_id }}" metric_name: "Memory_usage" condition: - period: 300 - filter: average - comparison_operator: ">=" - value: 50 - unit: "Percent" - count: 1 + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 alarm_enabled: true - alarm_action_enabled: True + alarm_action_enabled: true alarm_actions: - type: "notification" notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" @@ -65,14 +65,14 @@ value: "{{ elb_id }}" metric_name: "m16_l7_upstream_5xx" condition: - period: 300 - filter: average - comparison_operator: ">=" - value: 5 - unit: "Count/s" - count: 1 + period: 300 + filter: average + comparison_operator: ">=" + value: 5 + unit: "Count/s" + count: 1 alarm_enabled: true - alarm_action_enabled: True + alarm_action_enabled: true alarm_actions: - type: "notification" notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" @@ -91,14 +91,14 @@ value: "{{ as_group_name }}" metric_name: "mem_util" condition: - period: 300 - filter: average - comparison_operator: ">=" - value: 50 - unit: "Percent" - count: 2 + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 2 alarm_enabled: true - alarm_action_enabled: True + alarm_action_enabled: true alarm_actions: - type: "autoscaling" notificationList: [] diff --git a/doc/examples/9_cloud_search.yaml b/doc/examples/9_cloud_search.yaml index fec761b1..bbcd9db9 100644 --- a/doc/examples/9_cloud_search.yaml +++ b/doc/examples/9_cloud_search.yaml @@ -15,8 +15,8 @@ volume_size: 40 system_encrypted: 1 system_cmkid: "{{ cmk_id }}" - https_enable: False - authority_enable: False + https_enable: false + authority_enable: false admin_pwd: "{{ password }}" router: "{{ router }}" net: "{{ network_id }}" diff --git a/meta/runtime.yml b/meta/runtime.yml index 71e50eec..c55bcbb1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -84,3 +84,4 @@ action_groups: - anti_ddos_optional_policies_info.py - object_info.py - dns_nameserver_info.py + - kms_info.py diff --git a/plugins/modules/kms_info.py b/plugins/modules/kms_info.py new file mode 100644 index 00000000..b9e06697 --- /dev/null +++ b/plugins/modules/kms_info.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = ''' +module: kms_info +short_description: Get info about KMS keys. +extends_documentation_fragment: opentelekomcloud.cloud.otc +version_added: "0.12.5" +author: "Anton Sidelnikov (@anton-sidelnikov)" +description: + - Get KMS key info from the OTC. +options: + name: + description: + - name or ID of the CMK to be queried. + type: str + key_state: + description: + - State of a CMK, values from 1 to 5. + type: str +requirements: ["openstacksdk", "otcextensions"] +''' + +RETURN = ''' +keys: + description: + - Info about about a CMK. + returned: On Success + type: complex + contains: + key_id: + description: CMK ID. + type: str + sample: "0d0466b0-e727-4d9c-b35d-f84bb474a37f" + creation_date: + description: Time when a key is created. + type: str + sample: "1638806642000" + default_key_flag: + description: Identification of a Master Key. + type: str + sample: "0" + domain_id: + description: User domain ID. + type: str + sample: "b168fe00ff56492495a7d22974df2d0b" + key_alias: + description: Alias of a CMK. + type: str + sample: "do-not-delete-pls" + key_description: + description: Description of a CMK. + type: str + sample: "" + key_state: + description: State of a CMK. + type: str + sample: "2" + key_type: + description: Type of a CMK. + type: str + sample: "1" + realm: + description: Region where a CMK resides. + type: str + sample: "eu-de" + scheduled_deletion_date: + description: Time when a key will be deleted as scheduled. + type: str + sample: "" +''' + +EXAMPLES = ''' +# Get info about KMS keys +- opentelekomcloud.cloud.kms_info: + name: "kms-test-123" + register: result +''' + +import re +from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule + +UUID_PATTERN = re.compile(r'^[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}$', re.IGNORECASE) + + +class KMSInfoModule(OTCModule): + argument_spec = dict( + name=dict(required=False), + key_state=dict(required=False, no_log=False), + ) + module_kwargs = dict( + supports_check_mode=True + ) + + otce_min_version = '0.26.0' + + def run(self): + + data = [] + query = {} + + if self.params['key_state']: + query['key_state'] = self.params['key_state'] + + if self.params['name']: + if UUID_PATTERN.match(self.params['name']): + raw = self.conn.kms.get_key(self.params['name']) + if raw: + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + else: + raw = self.conn.kms.find_key( + alias=self.params['name'], + ignore_missing=True) + if raw: + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + else: + for raw in self.conn.kms.keys(**query): + dt = raw.to_dict() + dt.pop('location') + data.append(dt) + + self.exit( + changed=False, + keys=data + ) + + +def main(): + module = KMSInfoModule() + module() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/kms_info/tasks/main.yaml b/tests/integration/targets/kms_info/tasks/main.yaml new file mode 100644 index 00000000..2b848571 --- /dev/null +++ b/tests/integration/targets/kms_info/tasks/main.yaml @@ -0,0 +1,13 @@ +--- +- name: KMS Info test + block: + - name: Get AS config info + opentelekomcloud.cloud.kms_info: + register: key + + - name: assert result + ansible.builtin.assert: + that: + - key is success + - key is not changed + - key is defined diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index c6bf52f4..6d8a3418 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -99,3 +99,4 @@ plugins/modules/anti_ddos_optional_policies_info.py validate-modules:missing-gpl plugins/modules/server_group_info.py validate-modules:missing-gplv3-license plugins/modules/object_info.py validate-modules:missing-gplv3-license plugins/modules/dns_nameserver_info.py validate-modules:missing-gplv3-license +plugins/modules/kms_info.py validate-modules:missing-gplv3-license diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 5d82fd88..fcdb0ea4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -100,3 +100,4 @@ plugins/modules/anti_ddos_optional_policies_info.py validate-modules:missing-gpl plugins/modules/server_group_info.py validate-modules:missing-gplv3-license plugins/modules/object_info.py validate-modules:missing-gplv3-license plugins/modules/dns_nameserver_info.py validate-modules:missing-gplv3-license +plugins/modules/kms_info.py validate-modules:missing-gplv3-license From be031fb91625bef0ef5c4249d6e1478b43124f02 Mon Sep 17 00:00:00 2001 From: Vladimir Vshivkov <32225815+vladimirvshivkov@users.noreply.github.com> Date: Fri, 12 Aug 2022 13:44:53 +0400 Subject: [PATCH 63/65] [CSS] cluster create backup strategy fix (#218) [CSS] cluster create backup strategy fix Reviewed-by: None Reviewed-by: Anton Sidelnikov Reviewed-by: Vladimir Vshivkov Reviewed-by: Polina Gubina --- plugins/modules/css_cluster.py | 114 ++++++++++++++++++-------- tests/integration/targets/dms/aliases | 1 + 2 files changed, 80 insertions(+), 35 deletions(-) create mode 100644 tests/integration/targets/dms/aliases diff --git a/plugins/modules/css_cluster.py b/plugins/modules/css_cluster.py index 95ae74ea..cf6f237c 100644 --- a/plugins/modules/css_cluster.py +++ b/plugins/modules/css_cluster.py @@ -33,7 +33,7 @@ - Engine version. The value can be 6.2.3, 7.1.1 or 7.6.2. - The default value is 7.6.2. type: str - choices: [6.2.3, 7.1.1, 7.6.2] + choices: [7.6.2, 7.9.3] default: 7.6.2 datastore_type: description: @@ -135,22 +135,42 @@ description: - Tag value. The value can contain 0 to 43 characters. Only digits, letters, hyphens (-) and underscores (_) are allowed. - backup_period: + backup_strategy: description: - - Time when a snapshot is created every day. Snapshots can only be created - on the hour. The time format is the time followed by the time zone, - specifically, HH:mm z. In the format, HH:mm refers to the hour time and - z refers to the time zone, for example, 00:00 GMT+08:00 and 01:00 - GMT+08:00. - type: str - backup_prefix: - description: Prefix of the name of the snapshot that is automatically created. - type: str - backup_keepday: - description: - - Number of days for which automatically created snapshots are reserved. - - Value range is 1 to 90 - type: int + - Automatic snapshot creation. This function is disabled by default. + type: dict + suboptions: + period: + description: + - Time when a snapshot is created every day. Snapshots can only be created + on the hour. The time format is the time followed by the time zone, + specifically, HH:mm z. In the format, HH:mm refers to the hour time and + z refers to the time zone, for example, 00:00 GMT+08:00 and 01:00 + GMT+08:00. + type: str + prefix: + description: + - Prefix of the name of the snapshot that is automatically created. + type: str + keepday: + description: + - Number of days for which automatically created snapshots are reserved. + Value range is 1 to 90 + type: int + bucket: + description: + - OBS bucket used for storing backup. + If there is snapshot data in an OBS bucket, + only the OBS bucket will be used for backup storage and cannot be changed. + type: str + basepath: + description: + - Storage path of the snapshot in the OBS bucket. + type: str + agency: + description: + - IAM agency used to access OBS. + type: str state: description: Instance state type: str @@ -199,6 +219,13 @@ 'value': "value0" - 'key': "key1" 'value': "value1" + backup_strategy: + period: "00:00 GMT+03:00" + prefix: "yetanother" + keepday: 1 + agency: "css-agency" + bucket: "css-bucket" + basepath: "css-test" #Delete CSS Cluster - hosts: localhost @@ -215,7 +242,7 @@ class CssClusterModule(OTCModule): argument_spec = dict( name=dict(type='str', required=True), - datastore_version=dict(type='str', choices=['6.2.3', '7.1.1', '7.6.2'], default='7.6.2'), + datastore_version=dict(type='str', choices=['7.6.2', '7.9.3'], default='7.6.2'), datastore_type=dict(type='str', default='elasticsearch'), instance_num=dict(type='int'), flavor=dict(type='str'), @@ -225,14 +252,20 @@ class CssClusterModule(OTCModule): system_cmkid=dict(type='str'), https_enable=dict(type='bool'), authority_enable=dict(type='bool'), - admin_pwd=dict(type='str'), + admin_pwd=dict(type='str', no_log=True), router=dict(type='str'), net=dict(type='str'), security_group=dict(type='str'), tags=dict(required=False, type='list', elements='dict'), - backup_period=dict(type='str'), - backup_prefix=dict(type='str'), - backup_keepday=dict(type='int'), + backup_strategy=dict(type='dict', options=dict( + period=dict(type='str'), + prefix=dict(type='str'), + keepday=dict(type='int'), + bucket=dict(type='str'), + basepath=dict(type='str'), + agency=dict(type='str'), + )), + state=dict(type='str', choices=['present', 'absent'], default='present') @@ -242,8 +275,6 @@ class CssClusterModule(OTCModule): ('state', 'present', ['flavor', 'router', 'net', 'security_group', 'instance_num']), - ('backup_period', not None, ['backup_keepday']), - ('backup_keepday', not None, ['backup_period']), ('authority_enable', 'true', ['admin_pwd']), ('system_encrypted', '1', @@ -262,9 +293,6 @@ def _system_state_change(self, cluster): return False def run(self): - attrs = {} - - cluster = None changed = False cluster = self.conn.css.find_cluster( @@ -293,7 +321,6 @@ def run(self): volume_type = self.params['volume_type'] attrs = { - 'name': self.params['name'], 'datastore': { 'type': self.params['datastore_type'], 'version': self.params['datastore_version'] @@ -312,7 +339,9 @@ def run(self): }, 'diskEncryption': { 'systemEncrypted': self.params['system_encrypted'] - } + }, + 'backupStrategy': {}, + 'name': self.params['name'] } if self.params['system_cmkid']: @@ -327,13 +356,28 @@ def run(self): attrs['adminPwd'] = self.params['admin_pwd'] if self.params['tags']: attrs['tags'] = self.params['tags'] - if self.params['backup_period']: - attrs['backupStrategy']['period'] = self.params['backup_period'] - if self.params['backup_prefix']: - attrs['backupStrategy']['prefix'] = self.params['backup_prefix'] - if self.params['backup_keepday']: - attrs['backupStrategy']['keepday'] = self.params['backup_keepday'] - cluster = self.conn.css.create_cluster(**attrs) + + if self.params['backup_strategy']: + if self.params['backup_strategy']['period']: + attrs['backupStrategy']['period'] = self.params['backup_strategy']['period'] + if self.params['backup_strategy']['prefix']: + attrs['backupStrategy']['prefix'] = self.params['backup_strategy']['prefix'] + if self.params['backup_strategy']['bucket']: + attrs['backupStrategy']['bucket'] = self.params['backup_strategy']['bucket'] + if self.params['backup_strategy']['basepath']: + attrs['backupStrategy']['basePath'] = self.params['backup_strategy']['basepath'] + if self.params['backup_strategy']['agency']: + attrs['backupStrategy']['agency'] = self.params['backup_strategy']['agency'] + if self.params['backup_strategy']['keepday'] in range(1, 90): + attrs['backupStrategy']['keepDay'] = self.params['backup_strategy']['keepday'] + else: + self.exit( + changed=False, + failed=True, + message='backup strategy keepday must be in range from 1 to 90' + ) + + cluster = self.conn.css.create_cluster(**self.params) self.exit_json( changed=changed, diff --git a/tests/integration/targets/dms/aliases b/tests/integration/targets/dms/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/dms/aliases @@ -0,0 +1 @@ +disabled From 112a9cc70c301f3088fc0f015890fe542784c66c Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Wed, 24 Aug 2022 10:58:18 +0300 Subject: [PATCH 64/65] Usecases examples (#222) Usecases examples two more examples move examples dir to the repo root add link in README.md Reviewed-by: Vladimir Vshivkov Reviewed-by: Artem Lifshits --- README.md | 3 + examples/10_relation_database.yaml | 68 +++++++++++ examples/11_document_database.yaml | 96 +++++++++++++++ examples/12_dedicated_host.yaml | 49 ++++++++ examples/1_initial_infra.yaml | 111 ++++++++++++++++++ examples/2_volume_backup_and_maintenance.yaml | 30 +++++ examples/3_autoscaling.yaml | 85 ++++++++++++++ examples/4_as_policy.yaml | 64 ++++++++++ examples/5_cce_cluster.yaml | 111 ++++++++++++++++++ examples/6_loadbalancer.yaml | 78 ++++++++++++ examples/7_loadbalancer_info.yaml | 31 +++++ examples/8_monitoring.yaml | 110 +++++++++++++++++ examples/9_cloud_search.yaml | 47 ++++++++ 13 files changed, 883 insertions(+) create mode 100644 examples/10_relation_database.yaml create mode 100644 examples/11_document_database.yaml create mode 100644 examples/12_dedicated_host.yaml create mode 100644 examples/1_initial_infra.yaml create mode 100644 examples/2_volume_backup_and_maintenance.yaml create mode 100644 examples/3_autoscaling.yaml create mode 100644 examples/4_as_policy.yaml create mode 100644 examples/5_cce_cluster.yaml create mode 100644 examples/6_loadbalancer.yaml create mode 100644 examples/7_loadbalancer_info.yaml create mode 100644 examples/8_monitoring.yaml create mode 100644 examples/9_cloud_search.yaml diff --git a/README.md b/README.md index 0383fe2c..89f6ce2b 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,9 @@ all dependencies: debug: var: gw.nat_gateways ``` +[Here](https://github.com/opentelekomcloud/ansible-collection-cloud/tree/master/examples) you can +find some [examples](https://github.com/opentelekomcloud/ansible-collection-cloud/tree/master/examples) of using OTC collection. All +the examples are based on real usecases, and contains some tips and tricks. Run the playbook to verify the functionality: diff --git a/examples/10_relation_database.yaml b/examples/10_relation_database.yaml new file mode 100644 index 00000000..dca6a731 --- /dev/null +++ b/examples/10_relation_database.yaml @@ -0,0 +1,68 @@ +--- + +# First of all, let's choose type and version of DB of an RDS instance. For example, we want it +# to be a MySQL (besides that you can choose postgresql or sqlserver on Microsoft) in HA (or single or replica) mode +- name: Get info about choosen type of DB + opentelekomcloud.cloud.rds_flavor_info: + datastore: "mysql" + instance_mode: "ha" + register: rds_flavors + +# In this debug you can see all the flavors of the chosen DB type, and now you can decide what +# flavor exactly fits your needs +- name: debug + ansible.builtin.debug: + msg: "{{ rds_flavors.rds_flavors[0].name }}" + +# Now let's create RDS instance. You can locate it in two or more availability zones. +# Password you pass to the module handles in secure mode: this means that it won't be shown in +# module's output. Please pay attention that automatic backup strategy is setting here, too. +# Attribute 'cmk_id' needed for system encryption, has been created beforehand. +- name: Create RDS instance + opentelekomcloud.cloud.rds_instance: + name: "{{ rds_instance_name }}" + state: present + region: "eu-de" + availability_zone: "eu-de-01,eu-de-02" + datastore_type: "mysql" + datastore_version: "8.0" + flavor: "{{ rds_flavors.rds_flavors[0].name }}" + ha_mode: "semisync" + router: "{{ router }}" + network: "{{ network_id }}" + port: 8080 + security_group: "{{ secgroup_id }}" + password: "{{ password }}" + volume_type: "ultrahigh" + volume_size: 40 + disk_encryption: "{{ cmk_id }}" + backup_keepdays: 1 + backup_timeframe: "02:00-03:00" + wait: true + timeout: 777 + register: rds + +# With this info module you can get info about your instance +- name: Let's get info about whole RDS instance + opentelekomcloud.cloud.rds_instance_info: + name: "{{ rds.instance.name }}" + +- name: Let's get info about datastore + opentelekomcloud.cloud.rds_datastore_info: + name: "{{ rds.instance.id }}" + +- name: Now create backup of the created instance + opentelekomcloud.cloud.rds_backup: + instance: "{{ rds.instance.id }}" + name: "{{ rds_backup_name }}" + state: present + description: "Backup of the RDS instance" + wait: true + register: rds_bckp + +# Queirying RDS backup info. You can use any of specified attributes, together or separetely. +- name: Get RDS backup info + opentelekomcloud.cloud.rds_backup_info: + instance: "{{ rds.instance.id }}" + backup: "{{ rds_bckp.backup.id }}" + backup_type: "{{ rds_bckp.backup.type }}" diff --git a/examples/11_document_database.yaml b/examples/11_document_database.yaml new file mode 100644 index 00000000..ce7b4741 --- /dev/null +++ b/examples/11_document_database.yaml @@ -0,0 +1,96 @@ +--- +# Here we'll allocate Document Database cluster. First, check what are flavors are available in +# chosen region. +- name: Query info about flavors in region + opentelekomcloud.cloud.dds_flavor_info: + region: "eu-de" + register: dds_flavor + +# Also query supporting datastore versions. Datastore name is the constant: DDS-Community. +- name: Query database version + opentelekomcloud.cloud.dds_datastore_info: + datastore_name: "DDS-Community" + register: dds_ds + +# Now we're ready to create DDS cluster. Each cluster consists of mongos, config and shard nodes. +# First config node should be allocated. For this kind of node storage size is fixed and equals 20 GB. +# Spec code you can find in dds_flavor_info module's output. Please mind that you cannot connect +# to config node. +- name: Create config node for DDS cluster + opentelekomcloud.cloud.dds_instance: + name: "{{ dds_instance_name }}" + state: present + region: "eu-de" + availability_zone: "eu-de-01" + datastore_version: "{{ dds_ds.datastores[1].version }}" + router: "{{ router }}" + network: "{{ network_id }}" + security_group: "{{ secgroup_id }}" + password: "{{ password }}" + disk_encryption: "{{ cmk_id }}" + mode: "Sharding" + flavors: + - type: "config" + num: 1 + size: 20 + spec_code: "dds.mongodb.s2.large.2.config" + backup_timeframe: "18:00 GMT+03:00" + backup_keepdays: 3 + ssl_option: 1 + register: dds_conf + +# Now let's create shards. Each DDS cluster supports from 2 to 16 shards. Storage size varies +# from 10 to 1000 GB. Spec code you can find in dds_flavor_info module's output. Please mind that +# you cannot connect to a shard node. +- name: Create shard instances for DDS cluster + opentelekomcloud.cloud.dds_instance: + name: "{{ dds_instance_name }}" + state: present + region: "eu-de" + availability_zone: "eu-de-01" + datastore_version: "{{ dds_ds.datastores[1].version }}" + router: "{{ router }}" + network: "{{ network_id }}" + security_group: "{{ secgroup_id }}" + password: "{{ password }}" + disk_encryption: "{{ cmk_id }}" + mode: "Sharding" + flavors: + - type: "shard" + num: 2 + size: "10" + spec_code: "dds.mongodb.s2.medium.4.shard" + backup_timeframe: "20:00" + backup_keepdays: 1 + ssl_option: 1 + register: dds_shard + +# For mongos instances storage size is invalid parameter. Mongos instances are the only one which +# you could be connect to. +- name: Create mongos instances for DDS cluster + opentelekomcloud.cloud.dds_instance: + name: "{{ dds_instance_name }}" + state: present + region: "eu-de" + availability_zone: "eu-de-01" + datastore_version: "{{ dds_ds.datastores[1].version }}" + router: "{{ router }}" + network: "{{ network_id }}" + security_group: "{{ secgroup_id }}" + password: "{{ password }}" + disk_encryption: "{{ cmk_id }}" + mode: "Sharding" + flavors: + - type: "mongos" + num: 2 + spec_code: "dds.mongodb.s2.medium.4.mongos" + backup_timeframe: "22:00" + backup_keepdays: 1 + ssl_option: 1 + register: dds_mng + +# Now we're all set. Creating of cluster takes 15 minutes approximately. After that, you can get +# info about your cluster. +- name: Get info about DDS cluster + opentelekomcloud.cloud.dds_instance_info: + instance: "{{ dds_instance_name }}" diff --git a/examples/12_dedicated_host.yaml b/examples/12_dedicated_host.yaml new file mode 100644 index 00000000..43d804d7 --- /dev/null +++ b/examples/12_dedicated_host.yaml @@ -0,0 +1,49 @@ +--- + +# This playbook shows how to allocate dedicated host in OTC. First, query list of available host +# types to choose one of them. +- name: Query list of available host types + opentelekomcloud.cloud.deh_host_type_info: + az: "eu-de-01" + register: deh_type + +# This play is allocating Dedicated host. Set 'auto_placement' to true to allow an ECS to be placed +# on any available DeH if its DeH ID is not specified during its creation. And please pay +# attention that more than one DEHs with the same name are possible! +- name: Allocate Dedicated host + opentelekomcloud.cloud.deh_host: + name: "{{ deh_name }}" + state: present + auto_placement: true + availability_zone: "eu-de-01" + quantity: 1 + host_type: "s2" + tags: + - key: "First" + value: "101" + register: deh + +# In this play we'll change the hostname. For this, you need an host's ID as the only attribute to +# unequivocally define the host. +- name: Change host name + opentelekomcloud.cloud.deh_host: + id: "{{ deh.deh_host.dedicated_host_ids[0] }}" + name: "{{ deh_new_name }}" + register: deh_change + +- name: Get info about host after name changing + opentelekomcloud.cloud.deh_host_info: + host: "{{ deh_change.deh_host.name }}" + register: deh_new_info + +# Let's check whether hostname has been changed. +- name: Assert result + ansible.builtin.assert: + that: + - deh.deh_host.name != deh_change.deh_host.name + - deh_new_info.deh_hosts[0].name == deh_change.deh_host.name + + +- name: Get info about ECSs allocated on dedicated host + opentelekomcloud.cloud.deh_server_info: + dedicated_host: "{{ deh_change.deh_host.id }}" diff --git a/examples/1_initial_infra.yaml b/examples/1_initial_infra.yaml new file mode 100644 index 00000000..3ddfb3c2 --- /dev/null +++ b/examples/1_initial_infra.yaml @@ -0,0 +1,111 @@ +--- + +# First, we need to create ecosystem for further infrastructure. Its include network entities, such +# VPC and subnet, security group and couple of ECSs. +- name: Create VPC + opentelekomcloud.cloud.vpc: + name: "{{ vpc_name }}" + cidr: "10.10.0.0/24" + state: present + register: newvpc + tags: + - vpc + +# Please pay attention on CIDR block: in case of insufficient numbers of available hosts there +# could be errors in autoscaling groups behavior +- name: Create subnet for VPC + opentelekomcloud.cloud.subnet: + name: "{{ vpc_subnet_name }}" + vpc: "{{ vpc_name }}" + cidr: "10.10.0.0/27" + gateway_ip: "10.10.0.1" + dns_list: + - "100.125.4.25" + - "100.125.129.199" + register: sn + tags: + - subnet + +# There are a few mismatches in resources logic and naming between native Openstack and +# Opentelekomcloud. To make it clear we placed examples using native Openstack resources. +# +# - name: Create network. In Open Telekom Cloud infrastructure this entity is hidden inside +# Subnet summary, and isn't create separately, but only querying from the existing Subnet. +# openstack.cloud.os_network: +# name: "{{ network_name }}" +# state: present +# register: network +# +# - name: Create subnet. Openstack's Subnet is equal Open Telekom Cloud Subnet. +# openstack.cloud.os_subnet: +# name: "{{ subnet_name }}" +# state: present +# network_name: "{{ network.network.name }}" +# cidr: "192.168.110.0/24" +# dns_nameservers: "{{ ['100.125.4.25', '8.8.8.8'] }}" +# register: subnet +# +# - name: Create router. In Open Telekom Cloud terms it's a VPC. Please pay attention that +# Network argument here is not an Network created on previous step, but constanta for OTC. +# openstack.cloud.os_router: +# name: "{{ router_name }}" +# state: present +# network: admin_external_net +# enable_snat: true +# interfaces: +# - net: "{{ network.network.name }}" +# subnet: "{{ subnet.subnet.name }}" +# register: router + +# Exclusive mode guarantee that only explicitly passed rules are will take effect, and all of the +# existing before will be deleted. To disable this behavior set Exclusive option as false +- name: Create new security group + opentelekomcloud.cloud.security_group: + state: present + name: "{{ security_group_name }}" + description: "Security group for testing purposes" + security_group_rules: + - direction: "egress" + ethertype: "IPv4" + protocol: "tcp" + - direction: "egress" + ethertype: "IPv6" + - direction: "ingress" + ethertype: "IPv4" + protocol: "tcp" + port_range_max: 22 + port_range_min: 22 + exclusive: true + register: secgroup + tags: + - security_group + +- name: Create first ECS and attach it to the resources + openstack.cloud.server: + name: "{{ ecs1_name }}" + image: "{{ ecs_image }}" + network: "{{ newvpc.vpc.id }}" + flavor: "s3.medium.1" + availability_zone: "eu-de-01" + volume_size: 6 + security_groups: "{{ security_group_name }}" + auto_ip: false + state: present + register: ecs1 + tags: + - server1 + +- name: Create second ECS and attach it to the resources + openstack.cloud.server: + name: "{{ ecs2_name }}" + image: "{{ ecs_image }}" + network: "{{ newvpc.vpc.id }}" + flavor: "s3.medium.1" + availability_zone: "eu-de-01" + volume_size: 6 + security_groups: "{{ security_group_name }}" + auto_ip: false + state: present + register: ecs2 + tags: + - server2 diff --git a/examples/2_volume_backup_and_maintenance.yaml b/examples/2_volume_backup_and_maintenance.yaml new file mode 100644 index 00000000..ce1a915b --- /dev/null +++ b/examples/2_volume_backup_and_maintenance.yaml @@ -0,0 +1,30 @@ +--- + +# You're able to backup both types of disks: system and additionally attached. Cloud Server +# Backups will be cover in a next examples +- name: Create a backup of the system volume + opentelekomcloud.cloud.volume_backup: + display_name: "{{ backup_name }}" + display_description: "Full backup of the test instance" + state: absent + volume: "{{ ecs_1_vol }}" + force: true + wait: true + timeout: 123 + register: bckp + tags: + - volume_backup + +- name: Let's check whether we have a backup of the ECS volume + opentelekomcloud.cloud.volume_backup_info: + volume: "{{ ecs_1_vol }}" + tags: backup_info + +# Snapshot is mandatory for any kind of backup, both full or incremental. If there are no any +# backups created before, and current backup is the first one for this volume, snapshot will be +# create automatically. +- name: Check if we have a snapshot + opentelekomcloud.cloud.volume_snapshot_info: + name: "yet_another**" + tags: + - snapshot_info diff --git a/examples/3_autoscaling.yaml b/examples/3_autoscaling.yaml new file mode 100644 index 00000000..262d952f --- /dev/null +++ b/examples/3_autoscaling.yaml @@ -0,0 +1,85 @@ +--- + +# Keypair is mandatory condition for creating and modifying AS configurations and groups. Be avoid +# of accidental deleting of this entity, because in this case you'll lost control on your AS +# entities. +- name: Create new keypair for accessing AS config + openstack.cloud.keypair: + name: "{{ keypair_name }}" + register: kp + tags: + - create_keypair + +# You're able to create a new AS config based on existing ECS, using it as a template. For this, +# point ECS's id as a parameter. Here is example of a new AS config, taken from scratch. +- name: Create new AS config + opentelekomcloud.cloud.as_config: + scaling_configuration: "{{ as_new_config_name }}" + key_name: "{{ keypair_name }}" + image: "Standard_CentOS_7_latest" + flavor: "s3.medium.1" + disk: + - size: 10 + volume_type: 'SAS' + disk_type: 'SYS' + register: as_config_new + tags: + - create_as_config + +# Please pay attention to numbers of desiring instances. It should fall within range given in CIDR +# block of attaching subnet. Router parameter points to VPC ID. +- name: Create AS Group + opentelekomcloud.cloud.as_group: + scaling_group: + name: "{{ as_group_name }}" + scaling_configuration: "{{ as_config_new.as_config.name }}" + min_instance_number: 0 + desire_instance_number: 2 + max_instance_number: 4 + availability_zones: ["eu-de-01"] + networks: [{"id": "{{ network_id }}"}] + security_groups: [{"id": "{{ secgroup_id }}"}] + router: "{{ router }}" + delete_publicip: true + delete_volume: true + action: "resume" + state: "present" + wait: true + timeout: 400 + register: as_group + tags: + - create_as_group + +- name: Rename AS group + opentelekomcloud.cloud.as_group: + scaling_group: + id: "{{ as_group.as_group.id }}" + name: "{{ new_as_group_name }}" + max_instance_number: 4 + register: as_group_new + +- name: Get list of AS instances using AS group id + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group_new.as_group.id }}" + register: as_inst_list + tags: + - get_list + +# Besides creating instances directly from AS group module, you can add already existing ECSs to the +# AS group. Please pay attention that instances to be added must be in the same AZ as AS group. +- name: Add AS instances to the AS group + opentelekomcloud.cloud.as_instance: + scaling_group: "{{ as_group_new.as_group.id }}" + scaling_instances: + - "{{ ecs1.server.id }}" + - "{{ ecs2.server.id }}" + action: "add" + state: present + register: as_instances + tags: + - add_instances + +- name: Get list of AS Instances after adding new instances + opentelekomcloud.cloud.as_instance_info: + scaling_group: "{{ as_group.as_group.id }}" + register: as_inst_list_af diff --git a/examples/4_as_policy.yaml b/examples/4_as_policy.yaml new file mode 100644 index 00000000..04b8a06b --- /dev/null +++ b/examples/4_as_policy.yaml @@ -0,0 +1,64 @@ +--- + +# There are many services interconnected with Cloud eye. All the services are logically united +# into groups named Namespaces. Every Namespace supports plenty of metrics, and each of them can +# be monitored. Besides particular metric you want to check up, you need to know Dimension - +# this entity pecifies the metric dimension of the selected resource type. In this +# example we want to monitor inbound bandwidth of public IP connected to our VPC. So first of +# all we will assign a new public IP for further monitoring. +- name: Assign Floating IP + opentelekomcloud.cloud.floating_ip: + network: admin_external_net + register: fl + +# First we need to create an Alarm, which will be included in AS Policy. +- name: Create Cloud Eye Alarm + opentelekomcloud.cloud.ces_alarms: + alarm_name: "{{ alarm_name }}" + state: present + metric: + namespace: "SYS.VPC" + dimensions: + - name: "publicip_id" + value: "{{ fl.floating_ip.id }}" + metric_name: "down_stream" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 6 + unit: "B" + count: 1 + alarm_enabled: true + alarm_action_enabled: false + register: alarm + +- name: Create AS policy + opentelekomcloud.cloud.as_policy: + scaling_group: "{{ as_group_name }}" + scaling_policy: "{{ as_policy_name }}" + scaling_policy_type: "alarm" + alarm: "{{ alarm_name }}" + state: "present" + register: as_policy + +- name: Get list of AS Policies + opentelekomcloud.cloud.as_policy_info: + scaling_group: "{{ as_group_name }}" + register: as_policies + +- name: Update AS policy (add scaling_policy_action) + opentelekomcloud.cloud.as_policy: + scaling_group: "{{ as_group_name }}" + scaling_policy: "{{ as_policy_name }}" + scaling_policy_type: "alarm" + alarm: "{{ alarm_name }}" + state: "present" + scaling_policy_action: + operation: "add" + instance_number: 1 + register: as_policy + +- name: Check AS group quotas + opentelekomcloud.cloud.as_quota_info: + scaling_group_id: "{{ scaling_group_id }}" diff --git a/examples/5_cce_cluster.yaml b/examples/5_cce_cluster.yaml new file mode 100644 index 00000000..c07d2d73 --- /dev/null +++ b/examples/5_cce_cluster.yaml @@ -0,0 +1,111 @@ +--- +# Let's create a Cloud Container Engine cluster and attach it to the previously deployed +# infrastructure. +- name: Create CCE Cluster + opentelekomcloud.cloud.cce_cluster: + name: "{{ cce_cluster_name }}" + description: "Cloud Container Engine test cluster" + type: "virtualmachine" + version: "v1.21" + flavor: "{{ cce_flavor }}" + authentication_mode: "rbac" + kube_proxy_mode: "iptables" + router: "{{ router }}" + network: "{{ network_id }}" + container_network_mode: "{{ container_network_mode }}" + container_network_cidr: "10.0.0.0/16" + availability_zone: "multi_az" + state: present + register: cluster + +- name: Get info about cluster certificate + opentelekomcloud.cloud.cce_cluster_cert_info: + cluster: "{{ cluster.cce_cluster.id }}" + +# After cluster creating is finished, you should create a node pool, which will contain some number +# of working nodes. +- name: Create node pool + opentelekomcloud.cloud.cce_node_pool: + name: "{{ node_pool_name }}" + availability_zone: "eu-de-01" + autoscaling_enabled: false + cluster: "{{ cluster.cce_cluster.id }}" + data_volumes: + - volumetype: "SSD" + size: 120 + - volumetype: "SATA" + size: 100 + encrypted: false + flavor: "{{ node_flavor }}" + initial_node_count: 0 + k8s_tags: + mytag: "myvalue" + mysecondtag: "mysecondvalue" + min_node_count: 1 + max_node_count: 3 + network: "{{ network_id }}" + priority: 2 + os: "{{ os_cluster_name }}" + ssh_key: "{{ keypair_name }}" + tags: + - key: "my_first_key" + value: "my_first_value" + - key: "my_second_key" + value: "my_secound_value" + taints: + - key: "first_taint_key" + value: "first_taint_value" + effect: "NoSchedule" + - key: "second_taint_key" + value: "second_taint_value" + effect: "NoExecute" + state: present + register: pool + +# Now you can add to the node pool nodes. Please pay attention that AZ of these nodes must be +# equal to node pool's AZ. +- name: Create CCE Cluster Node + opentelekomcloud.cloud.cce_cluster_node: + annotations: + annotation1: "Test cluster nodes" + availability_zone: "eu-de-01" + cluster: "{{ cce_cluster_name }}" + count: 1 + data_volumes: + - volumetype: "SATA" + size: 100 + encrypted: false + - volumetype: "SAS" + size: 120 + flavor: "{{ node_flavor }}" + k8s_tags: + testtag: "value" + ssh_key: "{{ keypair_name }}" + labels: + mein: "label" + max_pods: 16 + name: "{{ cce_node_name }}" + network: "{{ network_id }}" + os: "{{ os_cluster_name }}" + root_volume_size: 40 + root_volume_type: SATA + tags: + - key: "key1" + value: "value1" + - key: "key2" + value: "value2" + wait: true + state: present + register: node + +- name: Get info about cluster + opentelekomcloud.cloud.cce_cluster_info: + name: "{{ cluster.cce_cluster.id }}" + +- name: Get info about node pool + opentelekomcloud.cloud.cce_node_pool_info: + cce_cluster: "{{ cluster.cce_cluster.id }}" + +- name: Get info about cluster nodes + opentelekomcloud.cloud.cce_cluster_node_info: + cce_cluster: "{{ cluster.cce_cluster.id }}" diff --git a/examples/6_loadbalancer.yaml b/examples/6_loadbalancer.yaml new file mode 100644 index 00000000..74ce195d --- /dev/null +++ b/examples/6_loadbalancer.yaml @@ -0,0 +1,78 @@ +--- +# This loadbalancer would be attached to the couple of ECSs united into one backend server group. +- name: Create loadbalancer for cluster + opentelekomcloud.cloud.loadbalancer: + state: present + auto_public_ip: true + name: "{{ lb_name }}" + vip_subnet: "{{ vpc_subnet_name }}" + register: lb + +# This module just integrate public and private keys into certificate for listener. Both public +# and private keys you should obtain before on third-party resources, for instance, Letsencrypt, +# and put in available for Ansible engine place. +- name: Create certificate for HTTPS connections + opentelekomcloud.cloud.lb_certificate: + name: "elb_https_cert" + type: "server" + content: "/home/user/files/rootCA.pem" + private_key: "/home/user/files/rootCA.key" + register: elb_cert + +# For every type of protocol you can create its own listener. In case of HTTP listener, please pay +# attention on your subnet addresses pool, it must be sufficient for all the instances including +# listener itself. +- name: Create listener for HTTPS traffic + opentelekomcloud.cloud.lb_listener: + name: "{{ listener_https_name }}" + protocol: terminated_https + protocol_port: 443 + loadbalancer: "{{ lb.loadbalancer.id }}" + default_tls_container_ref: "{{ elb_cert.elb_certificate.id }}" + register: listener_https + +# This backend server group will contain multiple ECSs. Here we use roundrobin algorithm, as we use http protocol, +# but you can choose source_ip or least_connection. +- name: Create backend server group + opentelekomcloud.cloud.lb_pool: + state: present + name: "{{ backend_server_name }}" + protocol: http + lb_algorithm: round_robin + listener: "{{ listener_https }}" + loadbalancer: "{{ lb.loadbalancer.id }}" + register: backend + +- name: Add first to the backend server group + opentelekomcloud.cloud.lb_member: + name: "{{ ecs1_name }}" + address: "10.10.0.18" + protocol_port: 443 + subnet: "{{ vpc_subnet_name }}" + pool: "{{ backend.server_group.id }}" + register: bcknd_1 + +- name: Add second server to the backend server group + opentelekomcloud.cloud.lb_member: + name: "{{ ecs2_name }}" + address: "10.10.0.23" + protocol_port: 443 + subnet: "{{ vpc_subnet_name }}" + pool: "{{ backend_group_id }}" + register: bcknd_2 + +# After setting up an backend server group, it's highly recommend that you attach health check +# monitoring to it. +- name: Add HTTPS health check for the backend server group + opentelekomcloud.cloud.lb_healthmonitor: + name: "{{ health_https_name }}" + state: present + delay: 9 + max_retries: 3 + pool: "{{ backend_group_id }}" + monitor_timeout: 5 + type: http + monitor_port: 443 + expected_codes: 200 + http_method: get + register: https_health diff --git a/examples/7_loadbalancer_info.yaml b/examples/7_loadbalancer_info.yaml new file mode 100644 index 00000000..50761864 --- /dev/null +++ b/examples/7_loadbalancer_info.yaml @@ -0,0 +1,31 @@ +--- +# This playbook contains examples with info modules related with load balancer infrastructure. +- name: Get info about specified load balancer + opentelekomcloud.cloud.loadbalancer_info: + name: "{{ lb_name }}" + register: lb_info + +- name: Get info about specified certificate + opentelekomcloud.cloud.lb_certificate: + name: "elb_https_cert" + register: elb_cert_info + +- name: Get info about specified litener + opentelekomcloud.cloud.lb_listener_info: + name: "{{ listener_https_name }}" + register: listener_https_info + +- name: Get info about specified backend server group + opentelekomcloud.cloud.lb_pool: + name: "{{ backend_server_name }}" + register: backend_group_info + +- name: Get info about specified pool members + opentelekomcloud.cloud.lb_member_info: + pool: "{{ backend_server_name }}" + register: bcknd_members_info + +- name: Get info about health checks for HTTP protocol + opentelekomcloud.cloud.lb_healthmonitor_info: + type: http + register: https_health_info diff --git a/examples/8_monitoring.yaml b/examples/8_monitoring.yaml new file mode 100644 index 00000000..296c81f6 --- /dev/null +++ b/examples/8_monitoring.yaml @@ -0,0 +1,110 @@ +--- + +# Now we'll create several alarms to watch our infrastructure. Mind that 'alarm_name' is given by +# user, and 'dimensions' name and 'metric_name' are embedded and constant for each kind of resource, and can be +# taken from user's guide on docs portal here https://docs.otc.t-systems.com/usermanual/ces/en-us_topic_0202622212.html +# SMN topic here has been created beforehand. +- name: Create alarm for ECS CPU utilization + opentelekomcloud.cloud.ces_alarms: + alarm_name: "ecs1_cpu_load" + state: present + metric: + namespace: "SYS.ECS" + dimensions: + - name: "instance_id" + value: "{{ ecs_1_id }}" + metric_name: "CPU_usage" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 + alarm_enabled: true + alarm_action_enabled: false + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: ecs_cpu_alarm + +# As we're watching ECS, 'namespace' attribute is the same, but 'metric_name' is different. +- name: Create alarm for ECS CPU and memory usage + opentelekomcloud.cloud.ces_alarms: + alarm_name: "ecs1_mem_util" + state: present + metric: + namespace: "SYS.ECS" + dimensions: + - name: "instance_id" + value: "{{ ecs_1_id }}" + metric_name: "Memory_usage" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 1 + alarm_enabled: true + alarm_action_enabled: true + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: ecs_mem_alarm + +# Let's set up alarm for upstream bandwidth for ELB. +- name: Create watchdog alarm for Load Balancer + opentelekomcloud.cloud.ces_alarms: + alarm_name: "lb_watchdog" + state: present + metric: + namespace: "SYS.ELB" + dimensions: + - name: "lbaas_instance_id" + value: "{{ elb_id }}" + metric_name: "m16_l7_upstream_5xx" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 5 + unit: "Count/s" + count: 1 + alarm_enabled: true + alarm_action_enabled: true + alarm_actions: + - type: "notification" + notificationList: "urn:smn:eu-de:5dd3c0b24cdc4d31952c49589182a89d:yet_another_topic" + register: elb_5xx_alarm + +# Here type of 'alarm_actions' has been switched to 'autoscaling'. In this case you should set +# field 'notificationList' to empty list. +- name: Create load alarm for Auto Scaling Group to adjust number of instances + opentelekomcloud.cloud.ces_alarms: + alarm_name: "as_load" + state: present + metric: + namespace: "SYS.AS" + dimensions: + - name: "AutoScalingGroup" + value: "{{ as_group_name }}" + metric_name: "mem_util" + condition: + period: 300 + filter: average + comparison_operator: ">=" + value: 50 + unit: "Percent" + count: 2 + alarm_enabled: true + alarm_action_enabled: true + alarm_actions: + - type: "autoscaling" + notificationList: [] + register: as_mem_alarm + +- name: Get Alarm Infos + opentelekomcloud.cloud.ces_alarms_info: + name: "{{ alarm_name }}" + register: ces_al_info diff --git a/examples/9_cloud_search.yaml b/examples/9_cloud_search.yaml new file mode 100644 index 00000000..bbcd9db9 --- /dev/null +++ b/examples/9_cloud_search.yaml @@ -0,0 +1,47 @@ +--- + +# Here we'll create Cloud Search cluster contained 1 node. Attribute 'cmk_id' is the Master +# Key, which encrypts system. This attribute has been created beforehand. Please pay attention that +# backup strategy is setting up also in this module. +- name: Create Cloud Search cluster + opentelekomcloud.cloud.css_cluster: + name: "{{ css_cluster_name }}" + state: present + flavor: "{{ css_flavour }}" + instance_num: 1 + datastore_version: "7.6.2" + datastore_type: "elasticsearch" + volume_type: "common" + volume_size: 40 + system_encrypted: 1 + system_cmkid: "{{ cmk_id }}" + https_enable: false + authority_enable: false + admin_pwd: "{{ password }}" + router: "{{ router }}" + net: "{{ network_id }}" + security_group: "{{ secgroup_id }}" + backup_period: "00:00 GMT+03:00" + backup_prefix: "yetanother" + backup_keepday: 1 + register: css_cluster + +- name: Get info about created cluster + opentelekomcloud.cloud.css_cluster_info: + name: "{{ css_cluster.id }}" + register: css_info + +# By default, data of all indices is backed up. You can use the asterisk (*) to back up data of +# certain indices. +- name: Create snapshot of the cluster + opentelekomcloud.cloud.css_snapshot: + cluster: "{{ css_cluster.id }}" + name: "{{ css_snapshot_name }}" + description: "Example snapshot of the CSS cluster" + state: present + indices: "yetanother*" + register: css_snapshot + +- name: Get info about CSS snapshot + opentelekomcloud.cloud.css_snapshot_info: + cluster: "{{ css_cluster.id }}" From 79849038edaf39f91c1cae2d2825b6b3c992c6a8 Mon Sep 17 00:00:00 2001 From: YustinaKvr <62885041+YustinaKvr@users.noreply.github.com> Date: Tue, 13 Sep 2022 07:03:09 +0300 Subject: [PATCH 65/65] Fix test_requirements (#225) Fix test_requirements Integration tests are nor working on the current settings. Try to find fix Reviewed-by: Anton Sidelnikov Reviewed-by: Artem Lifshits Reviewed-by: Vladimir Vshivkov --- tests/integration/requirements.txt | 4 ++-- .../targets/as_config/tasks/main.yaml | 4 ++-- .../targets/as_group/tasks/main.yaml | 24 +++++++++---------- .../targets/as_instance/tasks/main.yaml | 24 +++++++++---------- .../targets/as_instance_info/tasks/main.yaml | 18 +++++++------- .../targets/as_policy/tasks/main.yaml | 18 +++++++------- .../targets/as_policy_info/tasks/main.yaml | 24 +++++++++---------- .../lb_listener_certificates/tasks/main.yaml | 18 +++++++------- .../integration/targets/loadbalancer/aliases | 1 + .../targets/loadbalancer/tasks/main.yaml | 12 +++++----- 10 files changed, 74 insertions(+), 73 deletions(-) create mode 100644 tests/integration/targets/loadbalancer/aliases diff --git a/tests/integration/requirements.txt b/tests/integration/requirements.txt index 74b54db2..67d95a2a 100644 --- a/tests/integration/requirements.txt +++ b/tests/integration/requirements.txt @@ -1,2 +1,2 @@ -otcextensions -openstacksdk +otcextensions<=0.26.3 +openstacksdk<=0.61.0 diff --git a/tests/integration/targets/as_config/tasks/main.yaml b/tests/integration/targets/as_config/tasks/main.yaml index 02eb2c54..18438ecd 100644 --- a/tests/integration/targets/as_config/tasks/main.yaml +++ b/tests/integration/targets/as_config/tasks/main.yaml @@ -14,7 +14,7 @@ key_name: "{{ ( prefix + '_key') }}" - name: Create keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" - name: Create as config - check_mode @@ -70,6 +70,6 @@ - dropped_as_config is changed - name: Delete keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" state: absent diff --git a/tests/integration/targets/as_group/tasks/main.yaml b/tests/integration/targets/as_group/tasks/main.yaml index bbe3f81c..c48b0028 100644 --- a/tests/integration/targets/as_group/tasks/main.yaml +++ b/tests/integration/targets/as_group/tasks/main.yaml @@ -5,13 +5,13 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_instance_info: cloud: "{{ test_cloud }}" - openstack.cloud.os_keypair: + openstack.cloud.keypair: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_config: cloud: "{{ test_cloud }}" @@ -31,17 +31,17 @@ router_name: "{{ ( prefix + '_router') }}" - name: Create keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" - name: Create network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: network - name: Create subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ network.network.name }}" @@ -50,7 +50,7 @@ register: subnet - name: Create router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -246,25 +246,25 @@ failed_when: "dropped_as_config is not changed" - name: Delete keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" state: absent failed_when: false - name: Drop existing router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router.router.name }}" state: absent failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet.subnet.name }}" state: absent failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network.network.name }}" state: absent ignore_errors: false diff --git a/tests/integration/targets/as_instance/tasks/main.yaml b/tests/integration/targets/as_instance/tasks/main.yaml index b976a5e8..fe60b524 100644 --- a/tests/integration/targets/as_instance/tasks/main.yaml +++ b/tests/integration/targets/as_instance/tasks/main.yaml @@ -11,17 +11,17 @@ cloud: "{{ test_cloud }}" openstack.cloud.security_group: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" opentelekomcloud.cloud.floating_ip: cloud: "{{ test_cloud }}" openstack.cloud.server: cloud: "{{ test_cloud }}" - openstack.cloud.os_keypair: + openstack.cloud.keypair: cloud: "{{ test_cloud }}" block: - name: Set random prefix @@ -51,7 +51,7 @@ az2_name: "eu-de-03" - name: Create keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ kp_name }}" register: kp @@ -62,13 +62,13 @@ register: secgroup - name: Create network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: network - name: Create subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ network.network.name }}" @@ -77,7 +77,7 @@ register: subnet - name: Create router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -492,25 +492,25 @@ failed_when: false - name: Delete existing router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router.router.name }}" state: absent failed_when: false - name: Delete existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet.subnet.name }}" state: absent failed_when: false - name: Delete existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network.network.name }}" state: absent failed_when: false - name: Delete keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ kp_name }}" state: absent failed_when: false diff --git a/tests/integration/targets/as_instance_info/tasks/main.yaml b/tests/integration/targets/as_instance_info/tasks/main.yaml index 926f7d95..315ea903 100644 --- a/tests/integration/targets/as_instance_info/tasks/main.yaml +++ b/tests/integration/targets/as_instance_info/tasks/main.yaml @@ -5,11 +5,11 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_group: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" block: - name: Set random prefix @@ -24,13 +24,13 @@ router_name: "{{ ( prefix + '_router') }}" - name: Create network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: network - name: Create subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ network.network.name }}" @@ -39,7 +39,7 @@ register: subnet - name: Create router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -106,19 +106,19 @@ failed_when: false - name: Drop existing router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router.router.name }}" state: absent failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet.subnet.name }}" state: absent failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network.network.name }}" state: absent failed_when: false diff --git a/tests/integration/targets/as_policy/tasks/main.yaml b/tests/integration/targets/as_policy/tasks/main.yaml index 171a5472..eb846170 100644 --- a/tests/integration/targets/as_policy/tasks/main.yaml +++ b/tests/integration/targets/as_policy/tasks/main.yaml @@ -11,11 +11,11 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.floating_ip: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" block: - name: Set random prefix @@ -32,7 +32,7 @@ router_name: "{{ ( prefix + '_router') }}" - name: Create network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: network @@ -69,7 +69,7 @@ register: alarm - name: Create subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ network.network.name }}" @@ -78,7 +78,7 @@ register: subnet - name: Create router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -240,19 +240,19 @@ failed_when: false - name: Drop existing router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router.router.name }}" state: absent failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet.subnet.name }}" state: absent failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network.network.name }}" state: absent failed_when: false diff --git a/tests/integration/targets/as_policy_info/tasks/main.yaml b/tests/integration/targets/as_policy_info/tasks/main.yaml index 41c8fca6..aa0fb141 100644 --- a/tests/integration/targets/as_policy_info/tasks/main.yaml +++ b/tests/integration/targets/as_policy_info/tasks/main.yaml @@ -5,13 +5,13 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.as_group: cloud: "{{ test_cloud }}" - openstack.cloud.os_keypair: + openstack.cloud.keypair: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" block: - name: Set random prefix @@ -27,17 +27,17 @@ router_name: "{{ ( prefix + '_router') }}" - name: Create keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" - name: Create network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: network - name: Create subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ network.network.name }}" @@ -46,7 +46,7 @@ register: subnet - name: Create router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -113,25 +113,25 @@ failed_when: false - name: Delete keypair - openstack.cloud.os_keypair: + openstack.cloud.keypair: name: "{{ key_name }}" state: absent failed_when: false - name: Drop existing router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router.router.name }}" state: absent failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet.subnet.name }}" state: absent failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network.network.name }}" state: absent failed_when: false diff --git a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml index 3f338ba3..9ed08ef8 100644 --- a/tests/integration/targets/lb_listener_certificates/tasks/main.yaml +++ b/tests/integration/targets/lb_listener_certificates/tasks/main.yaml @@ -9,11 +9,11 @@ cloud: "{{ test_cloud }}" opentelekomcloud.cloud.lb_certificate_info: cloud: "{{ test_cloud }}" - openstack.cloud.os_network: + openstack.cloud.network: cloud: "{{ test_cloud }}" - openstack.cloud.os_subnet: + openstack.cloud.subnet: cloud: "{{ test_cloud }}" - openstack.cloud.os_router: + openstack.cloud.router: cloud: "{{ test_cloud }}" block: - name: Set random prefix @@ -119,13 +119,13 @@ -----END RSA PRIVATE KEY----- - name: Create network for ELB - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: lb_net - name: Create subnet for ELB - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ lb_net.network.name }}" @@ -134,7 +134,7 @@ register: lb_net_subnet - name: Create Router for ELB - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -356,21 +356,21 @@ failed_when: false - name: Drop existing Router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: absent register: drop failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent register: drop failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: absent register: drop diff --git a/tests/integration/targets/loadbalancer/aliases b/tests/integration/targets/loadbalancer/aliases new file mode 100644 index 00000000..7a68b11d --- /dev/null +++ b/tests/integration/targets/loadbalancer/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/loadbalancer/tasks/main.yaml b/tests/integration/targets/loadbalancer/tasks/main.yaml index 63c1c8b3..507cecbb 100644 --- a/tests/integration/targets/loadbalancer/tasks/main.yaml +++ b/tests/integration/targets/loadbalancer/tasks/main.yaml @@ -18,13 +18,13 @@ pool_name: "{{ ( prefix + '_acc-lb-pool') }}" - name: Create network for ELB - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: present register: lb_net - name: Create subnet for ELB - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: present network_name: "{{ lb_net.network.name }}" @@ -33,7 +33,7 @@ register: lb_net_subnet - name: Create Router for ELB - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: present network: admin_external_net @@ -239,21 +239,21 @@ failed_when: false - name: Drop existing Router - openstack.cloud.os_router: + openstack.cloud.router: name: "{{ router_name }}" state: absent register: lb_net_router failed_when: false - name: Drop existing subnet - openstack.cloud.os_subnet: + openstack.cloud.subnet: name: "{{ subnet_name }}" state: absent register: lb_net_subnet failed_when: false - name: Drop existing network - openstack.cloud.os_network: + openstack.cloud.network: name: "{{ network_name }}" state: absent register: lb_net