From 99bb36872077ca003462072f7ea851e3092bd9c3 Mon Sep 17 00:00:00 2001 From: Gevorg Khachatryan Date: Mon, 23 Sep 2024 09:29:35 +0400 Subject: [PATCH 01/15] doc's fix for uuid description (#437) Co-authored-by: Gevorg-Khachatryaan --- plugins/modules/ntnx_acps.py | 4 +++- plugins/modules/ntnx_address_groups.py | 1 + plugins/modules/ntnx_floating_ips.py | 4 +++- plugins/modules/ntnx_image_placement_policy.py | 7 +++---- plugins/modules/ntnx_images.py | 8 ++++---- plugins/modules/ntnx_ndb_clusters.py | 4 +++- plugins/modules/ntnx_ndb_database_clones.py | 1 + plugins/modules/ntnx_ndb_database_snapshots.py | 1 + plugins/modules/ntnx_ndb_databases.py | 1 + plugins/modules/ntnx_ndb_db_server_vms.py | 1 + plugins/modules/ntnx_ndb_maintenance_window.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 3 ++- plugins/modules/ntnx_ndb_slas.py | 1 + plugins/modules/ntnx_ndb_stretched_vlans.py | 1 + plugins/modules/ntnx_ndb_tags.py | 1 + plugins/modules/ntnx_ndb_vlans.py | 1 + plugins/modules/ntnx_projects.py | 3 +-- plugins/modules/ntnx_protection_rules.py | 1 + plugins/modules/ntnx_recovery_plans.py | 4 +++- plugins/modules/ntnx_roles.py | 1 + plugins/modules/ntnx_security_rules.py | 4 +++- plugins/modules/ntnx_service_groups.py | 8 ++++---- plugins/modules/ntnx_user_groups.py | 5 +---- plugins/modules/ntnx_users.py | 5 +---- plugins/modules/ntnx_vms.py | 4 +++- 25 files changed, 46 insertions(+), 30 deletions(-) diff --git a/plugins/modules/ntnx_acps.py b/plugins/modules/ntnx_acps.py index 04f1f1f56..afd57b89d 100644 --- a/plugins/modules/ntnx_acps.py +++ b/plugins/modules/ntnx_acps.py @@ -19,7 +19,9 @@ required: False type: str acp_uuid: - description: acp UUID + description: + - acp UUID + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str desc: description: The description of the association of a role to a user in a given context diff --git a/plugins/modules/ntnx_address_groups.py b/plugins/modules/ntnx_address_groups.py index 55b950c73..eee2ddc5b 100644 --- a/plugins/modules/ntnx_address_groups.py +++ b/plugins/modules/ntnx_address_groups.py @@ -29,6 +29,7 @@ address_group_uuid: description: - uuid of the address group + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) - only required while updating or deleting required: false type: str diff --git a/plugins/modules/ntnx_floating_ips.py b/plugins/modules/ntnx_floating_ips.py index 826083bbe..3986b73e0 100644 --- a/plugins/modules/ntnx_floating_ips.py +++ b/plugins/modules/ntnx_floating_ips.py @@ -15,7 +15,9 @@ description: 'Create, Update, Delete floating_ips' options: fip_uuid: - description: floating_ip UUID + description: + - floating_ip UUID + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str external_subnet: description: A subnet with external connectivity diff --git a/plugins/modules/ntnx_image_placement_policy.py b/plugins/modules/ntnx_image_placement_policy.py index 087e7ea56..471d3c2e4 100644 --- a/plugins/modules/ntnx_image_placement_policy.py +++ b/plugins/modules/ntnx_image_placement_policy.py @@ -20,9 +20,7 @@ - If C(state) is set to C(present) then the operation will be create the item. - if C(state) is set to C(present) and C(policy_uuid) is given then it will update that image placement policy. - if C(state) is set to C(present) then C(image_uuid) or one of C(name), C(image_categories), C(cluster_categories) needs to be set. - - >- - If C(state) is set to C(absent) and if the item exists, then - item is removed. + - If C(state) is set to C(absent) and if the item exists, then item is removed. choices: - present - absent @@ -41,8 +39,9 @@ type: str policy_uuid: description: - - image placement policy of existig uuid + - image placement policy of existing uuid - required only when updating or deleting + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str required: false desc: diff --git a/plugins/modules/ntnx_images.py b/plugins/modules/ntnx_images.py index 8733b9df7..de98f2f66 100644 --- a/plugins/modules/ntnx_images.py +++ b/plugins/modules/ntnx_images.py @@ -21,9 +21,7 @@ - if C(state) is set to C(present) and C(image_uuid) is given then it will update that image. - if C(state) is set to C(present) then C(image_uuid), C(source_uri) and C(source_path) are mutually exclusive. - if C(state) is set to C(present) then C(image_uuid) or C(name) needs to be set. - - >- - If C(state) is set to C(absent) and if the item exists, then - item is removed. + - If C(state) is set to C(absent) and if the item exists, then item is removed. choices: - present - absent @@ -39,7 +37,9 @@ required: false type: str image_uuid: - description: Image uuid + description: + - Image uuid + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str required: false desc: diff --git a/plugins/modules/ntnx_ndb_clusters.py b/plugins/modules/ntnx_ndb_clusters.py index aa8b5565f..24a3b6a4e 100644 --- a/plugins/modules/ntnx_ndb_clusters.py +++ b/plugins/modules/ntnx_ndb_clusters.py @@ -21,7 +21,9 @@ - Update allowed. uuid: type: str - description: UUID of the cluster. + description: + - UUID of the cluster. + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) desc: type: str description: diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py index d8ada7059..181894119 100644 --- a/plugins/modules/ntnx_ndb_database_clones.py +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -16,6 +16,7 @@ uuid: description: - uuid of database clone for update and delete + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str name: description: diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py index b4720f672..751ae1f8e 100644 --- a/plugins/modules/ntnx_ndb_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -21,6 +21,7 @@ snapshot_uuid: description: - snapshot uuid for delete or update + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str name: description: diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index 6a7d76cdd..1c8560164 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -20,6 +20,7 @@ db_uuid: description: - uuid for update or delete of database instance + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str name: description: diff --git a/plugins/modules/ntnx_ndb_db_server_vms.py b/plugins/modules/ntnx_ndb_db_server_vms.py index d8a57c320..adaf14702 100644 --- a/plugins/modules/ntnx_ndb_db_server_vms.py +++ b/plugins/modules/ntnx_ndb_db_server_vms.py @@ -27,6 +27,7 @@ uuid: description: - uuid of database server vm for updating or deleting vm + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str desc: description: diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py index f90e8fb96..b8981627c 100644 --- a/plugins/modules/ntnx_ndb_maintenance_window.py +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -20,7 +20,7 @@ uuid: description: - uuid of maintenance window - - should be used for update or delete + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str desc: description: diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index 3c342679b..a617e0433 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -17,11 +17,12 @@ - currently, compute, network, database parameters and software profiles are supported - only software profile supports versions operations - version related operations can be configured under "software" - - only software profile supports multi cluster availibility + - only software profile supports multi cluster availability options: profile_uuid: description: - uuid of profile for delete or update + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str name: description: diff --git a/plugins/modules/ntnx_ndb_slas.py b/plugins/modules/ntnx_ndb_slas.py index eb04fa925..d858bcc6b 100644 --- a/plugins/modules/ntnx_ndb_slas.py +++ b/plugins/modules/ntnx_ndb_slas.py @@ -25,6 +25,7 @@ sla_uuid: description: - sla uuid + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str frequency: description: diff --git a/plugins/modules/ntnx_ndb_stretched_vlans.py b/plugins/modules/ntnx_ndb_stretched_vlans.py index bebead6d0..a413a6dff 100644 --- a/plugins/modules/ntnx_ndb_stretched_vlans.py +++ b/plugins/modules/ntnx_ndb_stretched_vlans.py @@ -17,6 +17,7 @@ stretched_vlan_uuid: description: - uuid for update or delete of stretched vlan + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str vlans: description: diff --git a/plugins/modules/ntnx_ndb_tags.py b/plugins/modules/ntnx_ndb_tags.py index 4c473805c..bf145c4c0 100644 --- a/plugins/modules/ntnx_ndb_tags.py +++ b/plugins/modules/ntnx_ndb_tags.py @@ -22,6 +22,7 @@ uuid: description: - uuid of tag for update and delete + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str desc: description: diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py index bd978ce57..c77f65d43 100644 --- a/plugins/modules/ntnx_ndb_vlans.py +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -19,6 +19,7 @@ vlan_uuid: description: - uuid for update or delete of vlan + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str name: description: diff --git a/plugins/modules/ntnx_projects.py b/plugins/modules/ntnx_projects.py index 67afb5d8c..935917f8a 100644 --- a/plugins/modules/ntnx_projects.py +++ b/plugins/modules/ntnx_projects.py @@ -27,8 +27,7 @@ project_uuid: description: - This field can be used for update and delete of project - - if C(project_uuid) and C(state)==present will update the project - - if C(project_uuid) and C(state)==absent will delete the project + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str required: false desc: diff --git a/plugins/modules/ntnx_protection_rules.py b/plugins/modules/ntnx_protection_rules.py index a27b7d6c6..01c8b9aff 100644 --- a/plugins/modules/ntnx_protection_rules.py +++ b/plugins/modules/ntnx_protection_rules.py @@ -17,6 +17,7 @@ description: - protection_rule uuid - required for update and delete + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str required: false start_time: diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index 777ce9f2b..fc39156f5 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -14,7 +14,9 @@ description: 'Create, Update, Delete Recovery Plan' options: plan_uuid: - description: recovery_plan uuid + description: + - recovery_plan uuid + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str required: false name: diff --git a/plugins/modules/ntnx_roles.py b/plugins/modules/ntnx_roles.py index 55d6a2559..72315cae0 100644 --- a/plugins/modules/ntnx_roles.py +++ b/plugins/modules/ntnx_roles.py @@ -30,6 +30,7 @@ description: - uuid of the role - only required while updating or deleting + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) required: false type: str desc: diff --git a/plugins/modules/ntnx_security_rules.py b/plugins/modules/ntnx_security_rules.py index a95be3ab6..6787af1e9 100644 --- a/plugins/modules/ntnx_security_rules.py +++ b/plugins/modules/ntnx_security_rules.py @@ -62,7 +62,9 @@ required: false type: str security_rule_uuid: - description: security_rule UUID + description: + - security_rule UUID + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str allow_ipv6_traffic: description: Allow traffic from ipv6 diff --git a/plugins/modules/ntnx_service_groups.py b/plugins/modules/ntnx_service_groups.py index 14d01d454..8882494dd 100644 --- a/plugins/modules/ntnx_service_groups.py +++ b/plugins/modules/ntnx_service_groups.py @@ -18,9 +18,7 @@ description: - Specify state of service_groups - If C(state) is set to C(present) then service_groups is created. - - >- - If C(state) is set to C(absent) and if the service_groups exists, then - service_groups is removed. + - If C(state) is set to C(absent) and if the service_groups exists, then service_groups is removed. choices: - present - absent @@ -36,7 +34,9 @@ required: False type: str service_group_uuid: - description: service_group UUID + description: + - service_group UUID + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str desc: description: service_groups description diff --git a/plugins/modules/ntnx_user_groups.py b/plugins/modules/ntnx_user_groups.py index a86f8d1da..641f4a2dd 100644 --- a/plugins/modules/ntnx_user_groups.py +++ b/plugins/modules/ntnx_user_groups.py @@ -18,10 +18,7 @@ description: - Specify state - If C(state) is set to C(present) then the operation will be create the item. - - if C(state) is set to C(present) and C(user_group_uuid) is given then it will update that user_group. - - >- - If C(state) is set to C(absent) and if the item exists, then - item is removed. + - If C(state) is set to C(absent) and if the item exists, then item is removed. choices: - present - absent diff --git a/plugins/modules/ntnx_users.py b/plugins/modules/ntnx_users.py index 2c06d8efd..90708d627 100644 --- a/plugins/modules/ntnx_users.py +++ b/plugins/modules/ntnx_users.py @@ -18,10 +18,7 @@ description: - Specify state - If C(state) is set to C(present) then the operation will be create the item. - - if C(state) is set to C(present) and C(user_uuid) is given then it will update that user. - - >- - If C(state) is set to C(absent) and if the item exists, then - item is removed. + - If C(state) is set to C(absent) and if the item exists, then item is removed. choices: - present - absent diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 7d1d329d3..11ad3fa77 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -44,7 +44,9 @@ required: false type: str vm_uuid: - description: VM UUID + description: + - VM UUID + - will be used to update if C(state) is C(present) and to delete if C(state) is C(absent) type: str remove_categories: description: From 48aab1ae243c11841a64ed8071e2f757738e8ae8 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Mon, 23 Sep 2024 08:30:14 +0300 Subject: [PATCH 02/15] update requirements (#499) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 89699d2bd..e6aa82c3e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ pip~=21.3.1 ipaddress~=1.0.23 -setuptools~=44.1.1 +setuptools~=68.0.0 ansible-core==2.15.0 requests~=2.26.0 black==22.8.0 From de49880d7901d78a711fc8ee0f039e803d14ba4d Mon Sep 17 00:00:00 2001 From: Abhinav Bansal Date: Mon, 23 Sep 2024 21:20:34 +0530 Subject: [PATCH 03/15] Added fix for module.fail_json, now it takes 2 Arguments (#498) * Added fix for module.fail_json, now it takes 2 Arguments. Issue was: https://github.com/nutanix/nutanix.ansible/issues/465 * After running black --- plugins/modules/ntnx_vms.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 11ad3fa77..65c7daf88 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -881,7 +881,8 @@ def update_vm(module, result): if is_vm_on and vm.is_restart_required(): if not module.params.get("force_power_off"): module.fail_json( - "To make these changes, the VM should be restarted, but 'force_power_off' is False" + msg="To make these changes, the VM should be restarted, but 'force_power_off' is False", + **result, ) power_off_vm(vm, module, result) From b2f4c72685b1103e821dce028391728c9734600c Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Tue, 24 Sep 2024 16:13:35 +0300 Subject: [PATCH 04/15] Adding ansible lint fixes for examples directory --- .github/workflows/ansible-lint.yml | 17 + examples/acp.yml | 9 +- examples/acp_info.yml | 11 +- examples/address_groups_crud.yml | 14 +- examples/category_crud.yml | 24 +- examples/clusters_info.yml | 44 ++- examples/dr/protection_policy.yml | 294 ++++++++------- examples/dr/recovery_plan_with_execution.yml | 310 +++++++-------- examples/fc/api_keys_create.yml | 25 +- examples/fc/api_keys_info.yml | 41 +- examples/fc/fc.yml | 177 +++++---- examples/fc/imaged_cluster_info.yml | 61 ++- examples/fc/imaged_nodes_info.yml | 61 ++- examples/fip.yml | 12 +- examples/fip_info.yml | 14 +- examples/foundation/get_images_info.yml | 21 +- examples/foundation/image_nodes.yml | 106 +++--- examples/foundation/image_upload.yml | 16 +- examples/foundation/ipmi_config.yml | 28 +- .../node_discovery_network_info.yml | 32 +- examples/hosts_info.yml | 46 ++- examples/iaas/iaas.yml | 22 +- examples/iaas/policies_create.yml | 7 +- examples/iaas/policies_delete.yml | 5 +- .../iaas/roles/external_subnet/meta/main.yml | 5 +- .../external_subnet/tasks/external_subnet.yml | 47 +-- .../iaas/roles/external_subnet/tasks/main.yml | 11 +- examples/iaas/roles/fip/meta/main.yml | 5 +- examples/iaas/roles/fip/tasks/fip.yml | 19 +- examples/iaas/roles/fip/tasks/main.yml | 6 +- .../iaas/roles/overlay_subnet/meta/main.yml | 5 +- .../iaas/roles/overlay_subnet/tasks/main.yml | 52 ++- .../overlay_subnet/tasks/overlay_subnet.yml | 21 +- examples/iaas/roles/pbr/meta/main.yml | 5 +- examples/iaas/roles/pbr/tasks/main.yml | 6 +- examples/iaas/roles/pbr/tasks/pbr.yml | 19 +- examples/iaas/roles/pbr_delete/meta/main.yml | 5 +- examples/iaas/roles/pbr_delete/tasks/main.yml | 2 +- .../roles/pbr_delete/tasks/pbr_delete.yml | 7 +- .../iaas/roles/static_route/meta/main.yml | 5 +- .../iaas/roles/static_route/tasks/main.yml | 6 +- .../roles/static_route/tasks/static_route.yml | 27 +- examples/iaas/roles/vm/meta/main.yml | 5 +- examples/iaas/roles/vm/tasks/main.yml | 30 +- examples/iaas/roles/vm/tasks/vm.yml | 25 +- examples/iaas/roles/vpc/meta/main.yml | 5 +- examples/iaas/roles/vpc/tasks/main.yml | 9 +- examples/iaas/roles/vpc/tasks/vpc.yml | 28 +- examples/iaas/vars.yml | 66 ++-- examples/images.yml | 26 +- examples/inventory/nutanix.yaml | 9 +- examples/karbon/cluster_info.yml | 37 +- examples/karbon/create_k8s_cluster.yml | 314 ++++++++-------- examples/karbon/create_registries.yml | 53 ++- examples/karbon/registries_info.yml | 16 +- examples/ndb/all_day2_actions.yml | 117 +++--- examples/ndb/create_clone.yml | 67 ++-- examples/ndb/create_stretched_vlan.yml | 18 +- examples/ndb/create_time_machine_cluster.yml | 18 +- examples/ndb/create_vlan.yml | 33 +- examples/ndb/db_server_vms.yml | 353 +++++++++--------- ...ision_database_on_registered_db_server.yml | 10 +- ...rovision_postgres_ha_instance_with_ips.yml | 95 +++-- examples/ndb/refresh_clone.yml | 34 +- examples/ndb/registr_cluster.yml | 46 ++- .../single_instance_postgress_database.yml | 12 +- .../ndb/soft_delete_database_instance.yml | 12 +- examples/ndb/software_profiles.yml | 218 ++++++----- examples/pbr.yml | 34 +- examples/pbr_info.yml | 11 +- examples/permissions_info.yml | 40 +- examples/projects_crud.yml | 16 +- examples/projects_with_role_mapping.yml | 24 +- examples/roles_crud.yml | 16 +- examples/static_routes.yml | 14 +- examples/subnet.yml | 74 ++-- examples/subnet_info.yml | 14 +- examples/user-groups.yml | 58 ++- examples/user.yml | 78 ++-- examples/vm.yml | 105 +++--- examples/vm_info.yml | 18 +- examples/vm_operations.yml | 32 +- examples/vm_update.yml | 108 +++--- examples/vpc.yml | 10 +- examples/vpc_info.yml | 13 +- 85 files changed, 1987 insertions(+), 1984 deletions(-) create mode 100644 .github/workflows/ansible-lint.yml diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml new file mode 100644 index 000000000..0f4591dc8 --- /dev/null +++ b/.github/workflows/ansible-lint.yml @@ -0,0 +1,17 @@ +--- +name: ansible-lint +on: + - pull_request + +jobs: + build: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.x (latest) + uses: actions/setup-python@v2 + with: + python-version: 3.x + - name: Run ansible-lint + uses: ansible/ansible-lint@main diff --git a/examples/acp.yml b/examples/acp.yml index 8efb39915..b48af6b08 100644 --- a/examples/acp.yml +++ b/examples/acp.yml @@ -2,8 +2,6 @@ - name: ACP playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,10 +10,9 @@ validate_certs: false tasks: - - name: Create ACP with all specfactions - ntnx_acps: - validate_certs: False + nutanix.ncp.ntnx_acps: + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -41,7 +38,7 @@ collection: ALL - name: Delete ACP - ntnx_acps: + nutanix.ncp.ntnx_acps: state: absent acp_uuid: "{{ acp_uuid }}" register: result diff --git a/examples/acp_info.yml b/examples/acp_info.yml index 41850e614..9b57514bd 100644 --- a/examples/acp_info.yml +++ b/examples/acp_info.yml @@ -2,8 +2,6 @@ - name: ACP_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,20 +10,19 @@ validate_certs: false tasks: - - name: List ACPs using ascending, sorting and name filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_acps_info: filter: name: "{{ acp_name }}" kind: access_control_policy sort_order: "ASCENDING" sort_attribute: "name" register: result - ignore_errors: True + ignore_errors: true - name: List ACPs using length and offset - ntnx_floating_ips_info: + nutanix.ncp.ntnx_acps_info: length: 3 offset: 0 register: result - ignore_errors: True + ignore_errors: true diff --git a/examples/address_groups_crud.yml b/examples/address_groups_crud.yml index 35e5febdb..6cd5ad0a0 100644 --- a/examples/address_groups_crud.yml +++ b/examples/address_groups_crud.yml @@ -1,8 +1,6 @@ - name: Address group crud playbook. Here we will create, update, read and delete the address group. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a address group - ntnx_address_groups: + nutanix.ncp.ntnx_address_groups: state: present name: test-ansible-group-1 desc: test-ansible-group-1-desc @@ -22,8 +20,8 @@ network_prefix: 32 register: ag - - name: update address group - ntnx_address_groups: + - name: Update address group + nutanix.ncp.ntnx_address_groups: state: present address_group_uuid: "{{ ag.address_group_uuid }}" name: test-ansible-group-1-updated @@ -34,16 +32,16 @@ register: updated_ag - name: Read the updated address group - ntnx_address_groups_info: + nutanix.ncp.ntnx_address_groups_info: address_group_uuid: "{{ updated_ag.address_group_uuid }}" register: ag_info - name: Print the address group details - debug: + ansible.builtin.debug: msg: "{{ ag_info }}" - name: Delete the address group. - ntnx_address_groups: + nutanix.ncp.ntnx_address_groups: state: absent address_group_uuid: "{{ updated_ag.address_group_uuid }}" register: op diff --git a/examples/category_crud.yml b/examples/category_crud.yml index 3c4c6b12e..c5c88664b 100644 --- a/examples/category_crud.yml +++ b/examples/category_crud.yml @@ -1,8 +1,6 @@ -- name: categories crud playbook. Here we will create, update, read and delete the category key values. +- name: Categories crud playbook. Here we will create, update, read and delete the category key values. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,14 +9,14 @@ validate_certs: false tasks: - name: Create only category key with description - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-1" desc: "test-cat-1-desc" register: cat1 - name: Add category values to test-cat-1 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-1" values: @@ -26,7 +24,7 @@ - "val2" - name: Create category key with values - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-2" desc: "test-cat-2-desc" @@ -36,7 +34,7 @@ register: cat2 - name: Add more category values to test-cat-2 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-2" values: @@ -44,25 +42,25 @@ - "val6" - name: Get categories info - ntnx_categories_info: + nutanix.ncp.ntnx_categories_info: name: "test-cat-1" register: cat1_info - name: Delete val1 category value from test-cat-1 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-1" values: - val1 - - name: delete all category values from test-cat-1 - ntnx_categories: + - name: Delete all category values from test-cat-1 + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-1" remove_values: true - - name: delete category key test-cat-2 including its all values - ntnx_categories: + - name: Delete category key test-cat-2 including its all values + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-2" remove_values: true diff --git a/examples/clusters_info.yml b/examples/clusters_info.yml index c50b8286b..84eb807e0 100644 --- a/examples/clusters_info.yml +++ b/examples/clusters_info.yml @@ -2,8 +2,6 @@ - name: Clusters_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,27 +10,27 @@ validate_certs: false tasks: - - name: test getting all clusters - ntnx_clusters_info: - register: clusters + - name: Test getting all clusters + nutanix.ncp.ntnx_clusters_info: + register: clusters - - name: test getting particular cluster using uuid - ntnx_clusters_info: - cluster_uuid: '{{ clusters.response.entities[0].metadata.uuid }}' - register: result + - name: Test getting particular cluster using uuid + nutanix.ncp.ntnx_clusters_info: + cluster_uuid: "{{ clusters.response.entities[0].metadata.uuid }}" + register: result - - name: List clusters using length, offset, sort order and priority sort attribute - ntnx_clusters_info: - length: 2 - offset: 0 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result + - name: List clusters using length, offset, sort order and priority sort attribute + nutanix.ncp.ntnx_clusters_info: + length: 2 + offset: 0 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result - - name: List clusters using filter and custom_filter - ntnx_clusters_info: - filter: - name: - custom_filter: - external_ip: - register: result + - name: List clusters using filter and custom_filter + nutanix.ncp.ntnx_clusters_info: + filter: + name: + custom_filter: + external_ip: + register: result diff --git a/examples/dr/protection_policy.yml b/examples/dr/protection_policy.yml index 1b5a2d816..218f64a86 100644 --- a/examples/dr/protection_policy.yml +++ b/examples/dr/protection_policy.yml @@ -1,3 +1,4 @@ +--- ######## Description ########### # Tasks done by this playbook: # 1. Create synchronous protection policy and asynchronous protection policy @@ -5,163 +6,160 @@ # 3. Get created protection plans info and associated entities # 4. Delete protection plan from primary site. ################################# - - - name: PC DR hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Create protection rule with synchronous schedule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - name: test-ansible - desc: test-ansible-desc - protected_categories: - Environment: - - Dev - - Staging - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: SYNC - auto_suspend_timeout: 20 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: SYNC - auto_suspend_timeout: 10 - register: pr + - name: Create protection rule with synchronous schedule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + name: test-ansible + desc: test-ansible-desc + protected_categories: + Environment: + - Dev + - Staging + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: SYNC + auto_suspend_timeout: 20 + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: SYNC + auto_suspend_timeout: 10 + register: pr - - name: delete the protection rule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "absent" - rule_uuid: "{{ pr.rule_uuid }}" + - name: Delete the protection rule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + rule_uuid: "{{ pr.rule_uuid }}" - - name: Create protection rule with async schedule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - name: test-ansible-1 - desc: test-ansible-desc-1 - protected_categories: - Environment: - - Dev - - Testing - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 1 - rpo_unit: HOUR - snapshot_type: "CRASH_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - rollup_retention_policy: - snapshot_interval_type: HOURLY - multiple: 2 + - name: Create protection rule with async schedule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + name: test-ansible-1 + desc: test-ansible-desc-1 + protected_categories: + Environment: + - Dev + - Testing + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 1 + rpo_unit: HOUR + snapshot_type: CRASH_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + rollup_retention_policy: + snapshot_interval_type: HOURLY + multiple: 2 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 1 - rpo_unit: HOUR - snapshot_type: "CRASH_CONSISTENT" - local_retention_policy: - num_snapshots: 2 - remote_retention_policy: - num_snapshots: 1 - register: result + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 1 + rpo_unit: HOUR + snapshot_type: CRASH_CONSISTENT + local_retention_policy: + num_snapshots: 2 + remote_retention_policy: + num_snapshots: 1 + register: result - - name: Update previously created protection policy - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - rule_uuid: "{{result.rule_uuid}}" - name: test-ansible-updated - desc: test-ansible-desc-updated - protected_categories: - Environment: - - Testing - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 2 - rpo_unit: DAY - snapshot_type: "APPLICATION_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - rollup_retention_policy: - snapshot_interval_type: YEARLY - multiple: 2 + - name: Update previously created protection policy + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + rule_uuid: "{{ result.rule_uuid }}" + name: test-ansible-updated + desc: test-ansible-desc-updated + protected_categories: + Environment: + - Testing + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 2 + rpo_unit: DAY + snapshot_type: APPLICATION_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + rollup_retention_policy: + snapshot_interval_type: YEARLY + multiple: 2 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 2 - rpo_unit: DAY - snapshot_type: "APPLICATION_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - num_snapshots: 2 - register: pr + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 2 + rpo_unit: DAY + snapshot_type: APPLICATION_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + num_snapshots: 2 + register: pr - - name: Get protection policy info and its associated vms info - ntnx_protection_rules_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - rule_uuid: "{{ pr.rule_uuid }}" - register: result + - name: Get protection policy info and its associated vms info + nutanix.ncp.ntnx_protection_rules_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + rule_uuid: "{{ pr.rule_uuid }}" + register: result - - debug: - msg: "{{ result }}" + - name: Print protection policy info + ansible.builtin.debug: + msg: "{{ result }}" - - name: delete the protection rule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "absent" - rule_uuid: "{{ pr.rule_uuid }}" + - name: Delete the protection rule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + rule_uuid: "{{ pr.rule_uuid }}" diff --git a/examples/dr/recovery_plan_with_execution.yml b/examples/dr/recovery_plan_with_execution.yml index 561536001..92fed079d 100644 --- a/examples/dr/recovery_plan_with_execution.yml +++ b/examples/dr/recovery_plan_with_execution.yml @@ -1,3 +1,4 @@ +--- ######## Description ########### # Tasks done by this playbook: # 1. Create Recovery plan using ntnx_recovery_plans @@ -11,170 +12,169 @@ - name: PC DR hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: -################################# Create recovery plans using ntnx_recovery_plans ############# - - name: Create recovery plan with custom ip network mapping - ntnx_recovery_plans: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: example-rp - desc: recovery plan desc - stages: - - vms: - - name: "test-check" - enable_script_exec: true - delay: 10 - primary_location: - url: "" - recovery_location: - url: "" - network_type: NON_STRETCH - network_mappings: - - primary: - test: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - prod: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - recovery: - test: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - prod: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - register: result + ################################# Create recovery plans using ntnx_recovery_plans ############# + - name: Create recovery plan with custom ip network mapping + nutanix.ncp.ntnx_recovery_plans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: example-rp + desc: recovery plan desc + stages: + - vms: + - name: test-check + enable_script_exec: true + delay: 10 + primary_location: + url: + recovery_location: + url: + network_type: NON_STRETCH + network_mappings: + - primary: + test: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + prod: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + recovery: + test: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + prod: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + register: result + - name: Print recovery plan details + ansible.builtin.debug: + msg: "{{ result }}" - - debug: - msg: "{{ result }}" + - name: Update recovery plan by adding more stages and remove custom IP to enable dynamic IP allocation + nutanix.ncp.ntnx_recovery_plans: + plan_uuid: "{{ result.plan_uuid }}" + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: example-rp-updated + desc: recovery plan desc updated + stages: + - vms: + - name: test-check + enable_script_exec: true + categories: + - key: Environment + value: Staging + enable_script_exec: true + delay: 2 + - categories: + - key: Environment + value: Dev + primary_location: + url: + recovery_location: + url: + network_type: NON_STRETCH + network_mappings: + - primary: + test: + name: + prod: + name: + recovery: + test: + name: + prod: + name: + register: recovery_plan + #################################### Lets recover the vms on recovery site using ntnx_recovery_plan_jobs ################ - - name: Update recovery plan by adding more stages and remove custom IP to enable dynamic IP allocation - ntnx_recovery_plans: - plan_uuid: "{{result.plan_uuid}}" - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: example-rp-updated - desc: recovery plan desc updated - stages: - - vms: - - name: "test-check" - enable_script_exec: true - categories: - - key: Environment - value: Staging - enable_script_exec: true - delay: 2 - - categories: - - key: Environment - value: Dev - primary_location: - url: "" - recovery_location: - url: "" - network_type: NON_STRETCH - network_mappings: - - primary: - test: - name: "" - prod: - name: "" - recovery: - test: - name: "" - prod: - name: "" - register: recovery_plan + - name: Recovery plan info and its affected entities get + nutanix.ncp.ntnx_recovery_plans_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + plan_uuid: "{{ recovery_plan.plan_uuid }}" + register: recovery_plan_info + - name: Print recovery plan info + ansible.builtin.debug: + msg: "{{ recovery_plan_info }}" -#################################### Lets recover the vms on recovery site using ntnx_recovery_plan_jobs ################ + # We can also perform FAILOVER, LIVE_MIGRATE and FAILOVER here + - name: Run migrate (planned failover) + nutanix.ncp.ntnx_recovery_plan_jobs: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: test-failover-123 + recovery_plan: + uuid: "{{ recovery_plan.plan_uuid }}" + failed_site: + url: + recovery_site: + url: + action: MIGRATE + ignore_validation_failures: true + register: migrate_job - - name: recovery plan info and its affected entities get - ntnx_recovery_plans_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - plan_uuid: "{{recovery_plan.plan_uuid}}" - register: recovery_plan_info + - name: Print migrate job + ansible.builtin.debug: + msg: "{{ migrate_job }}" - - debug: - msg: "{{recovery_plan_info}}" + - name: Get recovery plan job status using info module + nutanix.ncp.ntnx_recovery_plan_jobs_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + job_uuid: "{{ migrate_job.job_uuid }}" + register: result + ignore_errors: true - # We can also perform FAILOVER, LIVE_MIGRATE and FAILOVER here - - name: Run migrate (planned failover) - ntnx_recovery_plan_jobs: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: test-failover-123 - recovery_plan: - uuid: "{{recovery_plan.plan_uuid}}" - failed_site: - url: "" - recovery_site: - url: "" - action: MIGRATE - ignore_validation_failures: true - register: migrate_job + - name: Print recovery plan job status + ansible.builtin.debug: + msg: "{{ result }}" - - debug: - msg: "{{migrate_job}}" + ###################################### delete the recovery plan ################# - - name: Get recovery plan job status using info module - ntnx_recovery_plan_jobs_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - job_uuid: "{{migrate_job.job_uuid}}" - register: result - ignore_errors: True - - - debug: - msg: "{{ result }}" - - ###################################### delete the recovery plan ################# - - - name: Delete recovery plan - ntnx_recovery_plans: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - job_uuid: "{{migrate_job.job_uuid}}" - plan_uuid: "{{recovery_plan.plan_uuid}}" - state: "absent" - register: result + - name: Delete recovery plan + nutanix.ncp.ntnx_recovery_plans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + job_uuid: "{{ migrate_job.job_uuid }}" + plan_uuid: "{{ recovery_plan.plan_uuid }}" + state: absent + register: result diff --git a/examples/fc/api_keys_create.yml b/examples/fc/api_keys_create.yml index 41a5df90c..6618cf2b8 100644 --- a/examples/fc/api_keys_create.yml +++ b/examples/fc/api_keys_create.yml @@ -2,19 +2,16 @@ - name: API Keys Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Create a new API Key - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - alias: "test-alias" - register: output + - name: Create a new API Key + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: test-alias + register: output - - name: output of api_key - debug: - msg: '{{ output }}' + - name: Output of api_key + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/api_keys_info.yml b/examples/fc/api_keys_info.yml index 7ea8513e5..5999781d0 100644 --- a/examples/fc/api_keys_info.yml +++ b/examples/fc/api_keys_info.yml @@ -2,28 +2,25 @@ - name: API Keys Info Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: API key response with alias - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - alias: "test" - register: output + - name: API key response with alias + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: test + register: output - - name: API key response with key_uuid - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - key_uuid: "" - register: output + - name: API key response with key_uuid + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + key_uuid: + register: output - - name: output of api_key - debug: - msg: '{{ output }}' + - name: Output of api_key + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/fc.yml b/examples/fc/fc.yml index 00f9732fb..0489d25d1 100644 --- a/examples/fc/fc.yml +++ b/examples/fc/fc.yml @@ -2,99 +2,96 @@ - name: Foundation Central Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Nodes Imaging with Cluster Creation with manual mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - # skip_cluster_creation: false #set this to true to skip cluster creation - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-1" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - name: Nodes Imaging with Cluster Creation with manual mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + # skip_cluster_creation: false #set this to true to skip cluster creation + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-1 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-2" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-2 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: Nodes Imaging without Cluster Creation with discovery mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - skip_cluster_creation: true - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - discovery_override: - cvm_ip: + - name: Nodes Imaging without Cluster Creation with discovery mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + skip_cluster_creation: true + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + discovery_override: + cvm_ip: - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: output of list - debug: - msg: '{{ output }}' + - name: Output of list + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/imaged_cluster_info.yml b/examples/fc/imaged_cluster_info.yml index 6e23546a7..f6fe53e88 100644 --- a/examples/fc/imaged_cluster_info.yml +++ b/examples/fc/imaged_cluster_info.yml @@ -2,39 +2,36 @@ - name: Imaged Clusters Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Imaged-Cluster details with imaged_cluster_uuid - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - imaged_cluster_uuid: "" - register: output + - name: Imaged-Cluster details with imaged_cluster_uuid + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_cluster_uuid: + register: output - - name: Imaged-Cluster details with filters - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - archived: true - register: output + - name: Imaged-Cluster details with filters + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + archived: true + register: output - - name: Imaged-Cluster details with custom filter - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - custom_filter: - cvm_gateway: "" - register: output + - name: Imaged-Cluster details with custom filter + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + custom_filter: + cvm_gateway: + register: output - - name: details of imaged-clusters - debug: - msg: '{{ output }}' + - name: Details of imaged-clusters + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/imaged_nodes_info.yml b/examples/fc/imaged_nodes_info.yml index 3eda3012d..0cdf06ae3 100644 --- a/examples/fc/imaged_nodes_info.yml +++ b/examples/fc/imaged_nodes_info.yml @@ -2,39 +2,36 @@ - name: Imaged Nodes Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Imaged-Node details with imaged_node_uuid - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - imaged_node_uuid: "" - register: output + - name: Imaged-Node details with imaged_node_uuid + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_node_uuid: + register: output - - name: Imaged-Node details with filters - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - node_state: "STATE_IMAGING" - register: output + - name: Imaged-Node details with filters + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + node_state: STATE_IMAGING + register: output - - name: Imaged-Node details with custom filter - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - custom_filter: - model: "" - register: output + - name: Imaged-Node details with custom filter + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + custom_filter: + model: + register: output - - name: details of imaged node - debug: - msg: '{{ output }}' + - name: Details of imaged node + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fip.yml b/examples/fip.yml index 0fb7976c2..985a8b62b 100644 --- a/examples/fip.yml +++ b/examples/fip.yml @@ -2,8 +2,6 @@ - name: FIP playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,20 +10,20 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: external_subnet_name: "" vm_name: "" - name: Create floating ip with external subnet uuid - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: present - wait: True + wait: true external_subnet: name: "{{ external_subnet_name }}" register: result - name: Assign floating ip to vm - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: present external_subnet: name: "{{ external_subnet.name }}" @@ -34,7 +32,7 @@ register: result - name: Delete all created floating ips - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: absent fip_uuid: "{{ result.fip_uuid }}" register: result diff --git a/examples/fip_info.yml b/examples/fip_info.yml index 3d92c2c20..aa3e9cbd2 100644 --- a/examples/fip_info.yml +++ b/examples/fip_info.yml @@ -2,8 +2,6 @@ - name: FIP_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,27 +10,25 @@ validate_certs: false tasks: - - name: List floating_ips using ascending ip sorting and floating_ip filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: filter: floating_ip: "10.0.1.2" kind: floating_ip sort_order: "ASCENDING" sort_attribute: "floating_ip" register: result - ignore_errors: True + ignore_errors: true - name: List floating_ips using length and offset - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: length: 3 offset: 0 register: result - ignore_errors: True - + ignore_errors: true - name: List floating_ips using filter and custom_filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: filter: name: custom_filter: diff --git a/examples/foundation/get_images_info.yml b/examples/foundation/get_images_info.yml index 9ee06f18b..31b5095e7 100644 --- a/examples/foundation/get_images_info.yml +++ b/examples/foundation/get_images_info.yml @@ -1,21 +1,22 @@ +--- # pull hypervisor and nos packages info from FVM - name: Get hypervisor and nos packages info from FVM hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: get hypervisor images info from foundation - ntnx_foundation_hypervisor_images_info: - nutanix_host: "10.xx.xx.xx" + - name: Get hypervisor images info from foundation + nutanix.ncp.ntnx_foundation_hypervisor_images_info: + nutanix_host: 10.xx.xx.xx register: hyp - - name: get aos images info from foundation - ntnx_foundation_aos_packages_info: - nutanix_host: "10.xx.xx.xx" + - name: Get aos images info from foundation + nutanix.ncp.ntnx_foundation_aos_packages_info: + nutanix_host: 10.xx.xx.xx register: nos - - debug: + - name: Print available hypervisor image details + ansible.builtin.debug: msg: "{{ hyp }}" - - debug: + - name: Print available NOS image details + ansible.builtin.debug: msg: "{{ nos }}" diff --git a/examples/foundation/image_nodes.yml b/examples/foundation/image_nodes.yml index 6e2b7cb58..0e486f6c0 100644 --- a/examples/foundation/image_nodes.yml +++ b/examples/foundation/image_nodes.yml @@ -1,63 +1,63 @@ +--- # Here we will image three kind of nodes with different methods. # We will image one node using manual mode. Second node, which can be dos running node, will be imaged using discovery mode using cvm # Third node, which can be nutanix imaged(aos running) node, will be imaged using discovery mode using IPMI - name: Image nodes hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Image nodes using manual and discovery modes. Create cluster - ntnx_foundation: - timeout: 4500 - nutanix_host: "10.xx.xx.xx" - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - hypervisor_gateway: "10.xx.xx.xx" - hypervisor_netmask: "xx.xx.xx.xx" - default_ipmi_user: "" - nos_package: "" - blocks: - - block_id: "xxxxx" - nodes: - - manual_mode: - cvm_ip: "10.xx.xx.xx" - cvm_gb_ram: 50 - hypervisor_hostname: "superman1" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_gateway: "10.xx.xx.xx" - ipmi_ip: "10.xx.xx.xx" - ipmi_password: "" - hypervisor: "kvm" - hypervisor_ip: "10.xx.xx.xx" - node_position: "A" - #dos node using cvm and discover it using discovery mode. Here we have to provide hypervisor details mandatorily as its dos nodes. - #You can skip hypervisor details incase of aos running node and discovery mode. AOS running nodes have hypervisor running and network - #configuration is pulled internally. - - discovery_mode: - cvm_gb_ram: 50 - node_serial: "xxxxxx" - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "superman2" - hypervisor_ip: "10.xx.xx.xx" - cvm_ip: "10.xx.xx.xx" - hypervisor: "kvm" - #image aos running node using ipmi and discover it using discovery mode - - discovery_mode: - cvm_gb_ram: 50 - ipmi_password: "" - node_serial: "xxxxxx" - discovery_override: - hypervisor_hostname: "superman3" - clusters: + - name: Image nodes using manual and discovery modes. Create cluster + nutanix.ncp.ntnx_foundation: + timeout: 4500 + nutanix_host: 10.xx.xx.xx + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + hypervisor_gateway: 10.xx.xx.xx + hypervisor_netmask: xx.xx.xx.xx + default_ipmi_user: + nos_package: + blocks: + - block_id: xxxxx + nodes: + - manual_mode: + cvm_ip: 10.xx.xx.xx + cvm_gb_ram: 50 + hypervisor_hostname: superman1 + ipmi_netmask: xx.xx.xx.xx + ipmi_gateway: 10.xx.xx.xx + ipmi_ip: 10.xx.xx.xx + ipmi_password: + hypervisor: kvm + hypervisor_ip: 10.xx.xx.xx + node_position: A + # dos node using cvm and discover it using discovery mode. Here we have to provide hypervisor details mandatorily as its dos nodes. + # You can skip hypervisor details incase of aos running node and discovery mode. AOS running nodes have hypervisor running and network + # configuration is pulled internally. + - discovery_mode: + cvm_gb_ram: 50 + node_serial: xxxxxx + device_hint: vm_installer + discovery_override: + hypervisor_hostname: superman2 + hypervisor_ip: 10.xx.xx.xx + cvm_ip: 10.xx.xx.xx + hypervisor: kvm + # image aos running node using ipmi and discover it using discovery mode + - discovery_mode: + cvm_gb_ram: 50 + ipmi_password: + node_serial: xxxxxx + discovery_override: + hypervisor_hostname: superman3 + clusters: - redundancy_factor: 2 cluster_members: - - "10.xx.xx.xx" - - "10.xx.xx.xx" - - "10.xx.xx.xx" - name: "test-cluster" - register: output + - 10.xx.xx.xx + - 10.xx.xx.xx + - 10.xx.xx.xx + name: test-cluster + register: output - - debug: - msg: '{{ output }}' + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/foundation/image_upload.yml b/examples/foundation/image_upload.yml index cb1463240..cd4d9171b 100644 --- a/examples/foundation/image_upload.yml +++ b/examples/foundation/image_upload.yml @@ -1,22 +1,22 @@ +--- # Here this will upload image from local machine (where this script runs) to the FVM - name: Upload images hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - name: Image upload # check_mode: yes - ntnx_foundation_image_upload: - nutanix_host: "10.xx.xx.xx" + nutanix.ncp.ntnx_foundation_image_upload: + nutanix_host: 10.xx.xx.xx # change state to "absent" to delete this image. For delete, source is not required state: present - source: "" - filename: "" + source: + filename: # value of installer_type must be one of: kvm, esx, hyperv, xen or nos installer_type: kvm timeout: 1800 register: upload_result - - debug: - msg: '{{ upload_result }}' + - name: Print upload result + ansible.builtin.debug: + msg: "{{ upload_result }}" diff --git a/examples/foundation/ipmi_config.yml b/examples/foundation/ipmi_config.yml index 20fce2b9f..7f7342a5a 100644 --- a/examples/foundation/ipmi_config.yml +++ b/examples/foundation/ipmi_config.yml @@ -1,23 +1,23 @@ +--- # Here we will configure IPMI of one node - name: Configure IPMI hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: configure ipmi + - name: Configure ipmi # check_mode: yes - ntnx_foundation_bmc_ipmi_config: - nutanix_host: "10.xx.xx.xx" - ipmi_user: "" - ipmi_password: "" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_gateway: "10.xx.xx.xx" - blocks: + nutanix.ncp.ntnx_foundation_bmc_ipmi_config: + nutanix_host: 10.xx.xx.xx + ipmi_user: + ipmi_password: + ipmi_netmask: xx.xx.xx.xx + ipmi_gateway: 10.xx.xx.xx + blocks: - nodes: - ipmi_mac: xx:xx:xx:xx:xx:xx - ipmi_ip: "10.xx.xx.xx" - register: output + ipmi_ip: 10.xx.xx.xx + register: output - - debug: - msg: '{{ output }}' + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/foundation/node_discovery_network_info.yml b/examples/foundation/node_discovery_network_info.yml index 2f81eb083..526d77338 100644 --- a/examples/foundation/node_discovery_network_info.yml +++ b/examples/foundation/node_discovery_network_info.yml @@ -1,25 +1,25 @@ +--- # Here we will discover nodes and also get node network info of particular some discovered nodes - name: Discover nodes and get their network info hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Discover all nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "10.xx.xx.xx" + - name: Discover all nodes + nutanix.ncp.ntnx_foundation_discover_nodes_info: + nutanix_host: 10.xx.xx.xx # unskip line 12 to include configured(nodes part of cluster) nodes in the output # include_configured: true - register: discovered_nodes + register: discovered_nodes - # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module - - name: Get node network info of some discovered nodes - ntnx_foundation_node_network_info: - nutanix_host: "10.xx.xx.xx" - nodes: - - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" - - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" - register: result + # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module + - name: Get node network info of some discovered nodes + nutanix.ncp.ntnx_foundation_node_network_info: + nutanix_host: 10.xx.xx.xx + nodes: + - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" + - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" + register: result - - debug: - msg: "{{ result }}" + - name: Print node network info + ansible.builtin.debug: + msg: "{{ result }}" diff --git a/examples/hosts_info.yml b/examples/hosts_info.yml index f478b5249..0d204ea7f 100644 --- a/examples/hosts_info.yml +++ b/examples/hosts_info.yml @@ -2,8 +2,6 @@ - name: Hosts_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,28 +10,28 @@ validate_certs: false tasks: - - name: test getting all hosts - ntnx_hosts_info: - register: hosts + - name: Test getting all hosts + nutanix.ncp.ntnx_hosts_info: + register: hosts_list - - name: test getting particular host using uuid - ntnx_hosts_info: - host_uuid: '{{ hosts.response.entities[0].metadata.uuid }}' - register: result + - name: Test getting particular host using uuid + nutanix.ncp.ntnx_hosts_info: + host_uuid: "{{ hosts_list.response.entities[0].metadata.uuid }}" + register: result - - name: List hosts using length, offset, sort order and name sort attribute - ntnx_hosts_info: - length: 2 - offset: 0 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result - ignore_errors: True + - name: List hosts using length, offset, sort order and name sort attribute + nutanix.ncp.ntnx_hosts_info: + length: 2 + offset: 0 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result + ignore_errors: true - - name: List hosts using filter and custom_filter - ntnx_hosts_info: - filter: - name: - custom_filter: - serial_number: - register: result + - name: List hosts using filter and custom_filter + nutanix.ncp.ntnx_hosts_info: + filter: + name: + custom_filter: + serial_number: + register: result diff --git a/examples/iaas/iaas.yml b/examples/iaas/iaas.yml index 1275e3521..5c1059560 100644 --- a/examples/iaas/iaas.yml +++ b/examples/iaas/iaas.yml @@ -2,8 +2,6 @@ - name: IaaS Provisioning hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,17 +10,23 @@ validate_certs: false tasks: - name: Include vars file - include_vars: + ansible.builtin.include_vars: file: vars.yml - - include_role: + - name: Include external_subnet role + ansible.builtin.include_role: name: external_subnet - - include_role: + - name: Include vpc role + ansible.builtin.include_role: name: vpc - - include_role: + - name: Include static_route role + ansible.builtin.include_role: name: static_route - - include_role: + - name: Include overlay_subnet role + ansible.builtin.include_role: name: overlay_subnet - - include_role: + - name: Include vm role + ansible.builtin.include_role: name: vm - - include_role: + - name: Include fip role + ansible.builtin.include_role: name: fip diff --git a/examples/iaas/policies_create.yml b/examples/iaas/policies_create.yml index c0150c2c7..9ae92b544 100644 --- a/examples/iaas/policies_create.yml +++ b/examples/iaas/policies_create.yml @@ -2,8 +2,6 @@ - name: Policy based routing - NACLs hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,7 +10,8 @@ validate_certs: false tasks: - name: Include vars file - include_vars: + ansible.builtin.include_vars: file: vars.yml - - include_role: + - name: Include pbr role + ansible.builtin.include_role: name: pbr diff --git a/examples/iaas/policies_delete.yml b/examples/iaas/policies_delete.yml index b2ff1aea0..7bf4ff32f 100644 --- a/examples/iaas/policies_delete.yml +++ b/examples/iaas/policies_delete.yml @@ -2,8 +2,6 @@ - name: Policy based routing - NACLs hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,5 +9,6 @@ nutanix_password: validate_certs: false tasks: - - include_role: + - name: Include pbr role + ansible.builtin.include_role: name: pbr_delete diff --git a/examples/iaas/roles/external_subnet/meta/main.yml b/examples/iaas/roles/external_subnet/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/external_subnet/meta/main.yml +++ b/examples/iaas/roles/external_subnet/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/external_subnet/tasks/external_subnet.yml b/examples/iaas/roles/external_subnet/tasks/external_subnet.yml index b4c232d4e..f7c889d7d 100644 --- a/examples/iaas/roles/external_subnet/tasks/external_subnet.yml +++ b/examples/iaas/roles/external_subnet/tasks/external_subnet.yml @@ -1,22 +1,27 @@ --- - - name: Create {{ item.name }} external subnet - ntnx_subnets: - state: present - name: "{{ item.name }}" - external_subnet: - vlan_id: "{{ item.vlan_id }}" - cluster: - name: "{{ cluster.name }}" - enable_nat: "{{ item.eNat }}" - ipam: - network_ip: "{{ item.ip }}" - network_prefix: "{{ item.prefix }}" - gateway_ip: "{{ item.gip }}" - ip_pools: - - start_ip: "{{ item.sip }}" - end_ip: "{{ item.eip }}" - register: external_network - - debug: - msg: - - "name: Ext_Nat" - - "uuid: {{ external_network.subnet_uuid }}" +- name: Start external subnet task + ansible.builtin.debug: + msg: Create {{ item.name }} external subnet + +- name: Create external subnet + nutanix.ncp.ntnx_subnets: + state: present + name: "{{ item.name }}" + external_subnet: + vlan_id: "{{ item.vlan_id }}" + cluster: + name: "{{ cluster.name }}" + enable_nat: "{{ item.eNat }}" + ipam: + network_ip: "{{ item.ip }}" + network_prefix: "{{ item.prefix }}" + gateway_ip: "{{ item.gip }}" + ip_pools: + - start_ip: "{{ item.sip }}" + end_ip: "{{ item.eip }}" + register: external_network +- name: Print external subnet name and uuid + ansible.builtin.debug: + msg: + - "name: Ext_Nat" + - "uuid: {{ external_network.subnet_uuid }}" diff --git a/examples/iaas/roles/external_subnet/tasks/main.yml b/examples/iaas/roles/external_subnet/tasks/main.yml index d0a8dcff9..e1c78b4f7 100644 --- a/examples/iaas/roles/external_subnet/tasks/main.yml +++ b/examples/iaas/roles/external_subnet/tasks/main.yml @@ -1,5 +1,12 @@ --- - name: Inputs for external subnets task - include_tasks: external_subnet.yml + ansible.builtin.include_tasks: external_subnet.yml with_items: - - { name: "{{external_subnet.name}}", vlan_id: "{{external_subnet.vlan_id}}", ip: "{{external_subnet.ip}}", prefix: "{{external_subnet.prefix}}", gip: "{{external_subnet.gip}}", sip: "{{external_subnet.sip}}", eip: "{{external_subnet.eip}}", eNat: "{{external_subnet.eNat}}" } + - name: "{{ external_subnet.name }}" + vlan_id: "{{ external_subnet.vlan_id }}" + ip: "{{ external_subnet.ip }}" + prefix: "{{ external_subnet.prefix }}" + gip: "{{ external_subnet.gip }}" + sip: "{{ external_subnet.sip }}" + eip: "{{ external_subnet.eip }}" + eNat: "{{ external_subnet.eNat }}" diff --git a/examples/iaas/roles/fip/meta/main.yml b/examples/iaas/roles/fip/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/fip/meta/main.yml +++ b/examples/iaas/roles/fip/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/fip/tasks/fip.yml b/examples/iaas/roles/fip/tasks/fip.yml index 9db0eda42..56f3c36f7 100644 --- a/examples/iaas/roles/fip/tasks/fip.yml +++ b/examples/iaas/roles/fip/tasks/fip.yml @@ -1,13 +1,14 @@ --- - name: Assign Floating IP for "{{ item.vm_name }}" - ntnx_floating_ips: - state: present - external_subnet: - name: "Ext-Nat" - vm: - name: "{{ item.vm_name }}" + nutanix.ncp.ntnx_floating_ips: + state: present + external_subnet: + name: Ext-Nat + vm: + name: "{{ item.vm_name }}" register: fip -- debug: +- name: Print fip uuid and vm name + ansible.builtin.debug: msg: - - "VM name: {{ item.vm_name }}" - - "uuid: {{ fip.fip_uuid }}" + - "VM name: {{ item.vm_name }}" + - "uuid: {{ fip.fip_uuid }}" diff --git a/examples/iaas/roles/fip/tasks/main.yml b/examples/iaas/roles/fip/tasks/main.yml index 5f20326a5..aa531a7f6 100644 --- a/examples/iaas/roles/fip/tasks/main.yml +++ b/examples/iaas/roles/fip/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Inputs for Floating IP task - include_tasks: fip.yml + ansible.builtin.include_tasks: fip.yml with_items: - - {vm_name: "Prod-Wordpress-App"} - - {vm_name: "Dev-Wordpress-App"} + - { vm_name: Prod-Wordpress-App } + - { vm_name: Dev-Wordpress-App } diff --git a/examples/iaas/roles/overlay_subnet/meta/main.yml b/examples/iaas/roles/overlay_subnet/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/overlay_subnet/meta/main.yml +++ b/examples/iaas/roles/overlay_subnet/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/overlay_subnet/tasks/main.yml b/examples/iaas/roles/overlay_subnet/tasks/main.yml index 357cd3989..3831c3133 100644 --- a/examples/iaas/roles/overlay_subnet/tasks/main.yml +++ b/examples/iaas/roles/overlay_subnet/tasks/main.yml @@ -1,14 +1,44 @@ --- - name: Inputs for overlay subnets - include_tasks: overlay_subnet.yml + ansible.builtin.include_tasks: overlay_subnet.yml with_items: - - { name: "{{Prod_SubnetA.name}}", vpc_name: "{{Prod_SubnetA.vpc_name}}", - nip: "{{Prod_SubnetA.nip}}", prefix: "{{Prod_SubnetA.prefix}}", - gip: "{{Prod_SubnetA.gip}}", sip: "{{Prod_SubnetA.sip}}", eip: "{{Prod_SubnetA.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Prod_SubnetB.name}}", vpc_name: "{{Prod_SubnetB.vpc_name}}", nip: "{{Prod_SubnetB.nip}}", prefix: "{{Prod_SubnetB.prefix}}", gip: "{{Prod_SubnetB.gip}}", sip: "{{Prod_SubnetB.sip}}", eip: "{{Prod_SubnetB.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Dev_SubnetA.name}}", vpc_name: "{{Dev_SubnetA.vpc_name}}", nip: "{{Dev_SubnetA.nip}}", prefix: "{{Dev_SubnetA.prefix}}", gip: "{{Dev_SubnetA.gip}}", sip: "{{Dev_SubnetA.sip}}", eip: "{{Dev_SubnetA.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Dev_SubnetB.name}}", vpc_name: "{{Dev_SubnetB.vpc_name}}", nip: "{{Dev_SubnetB.nip}}", prefix: "{{Dev_SubnetB.prefix}}", gip: "{{Dev_SubnetB.gip}}", sip: "{{Dev_SubnetB.sip}}", eip: "{{Dev_SubnetB.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } + - name: "{{ Prod_SubnetA.name }}" + vpc_name: "{{ Prod_SubnetA.vpc_name }}" + nip: "{{ Prod_SubnetA.nip }}" + prefix: "{{ Prod_SubnetA.prefix }}" + gip: "{{ Prod_SubnetA.gip }}" + sip: "{{ Prod_SubnetA.sip }}" + eip: "{{ Prod_SubnetA.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Prod_SubnetB.name }}" + vpc_name: "{{ Prod_SubnetB.vpc_name }}" + nip: "{{ Prod_SubnetB.nip }}" + prefix: "{{ Prod_SubnetB.prefix }}" + gip: "{{ Prod_SubnetB.gip }}" + sip: "{{ Prod_SubnetB.sip }}" + eip: "{{ Prod_SubnetB.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Dev_SubnetA.name }}" + vpc_name: "{{ Dev_SubnetA.vpc_name }}" + nip: "{{ Dev_SubnetA.nip }}" + prefix: "{{ Dev_SubnetA.prefix }}" + gip: "{{ Dev_SubnetA.gip }}" + sip: "{{ Dev_SubnetA.sip }}" + eip: "{{ Dev_SubnetA.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Dev_SubnetB.name }}" + vpc_name: "{{ Dev_SubnetB.vpc_name }}" + nip: "{{ Dev_SubnetB.nip }}" + prefix: "{{ Dev_SubnetB.prefix }}" + gip: "{{ Dev_SubnetB.gip }}" + sip: "{{ Dev_SubnetB.sip }}" + eip: "{{ Dev_SubnetB.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" diff --git a/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml b/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml index 50866ba23..1e02cb1d3 100644 --- a/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml +++ b/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml @@ -1,6 +1,10 @@ --- -- name: Create {{ item.name }} overlay subnet - ntnx_subnets: +- name: Start overlay subnet task + ansible.builtin.debug: + msg: Create {{ item.name }} overlay subnet + +- name: Create overlay subnet + nutanix.ncp.ntnx_subnets: state: present name: "{{ item.name }}" overlay_subnet: @@ -11,14 +15,15 @@ network_prefix: "{{ item.prefix }}" gateway_ip: "{{ item.gip }}" ip_pools: - - start_ip: "{{ item.sip }}" - end_ip: "{{ item.eip }}" + - start_ip: "{{ item.sip }}" + end_ip: "{{ item.eip }}" dhcp: - dns_servers: "{{ item.dns_servers }}" - domain_name: "{{ item.domain_name }}" - domain_search: "{{ item.domain_search }}" + dns_servers: "{{ item.dns_servers }}" + domain_name: "{{ item.domain_name }}" + domain_search: "{{ item.domain_search }}" register: overlay -- debug: +- name: Print overlay name and uuid + ansible.builtin.debug: msg: - "name: {{ overlay.response.status.name }}" - "uuid: {{ overlay.subnet_uuid }}" diff --git a/examples/iaas/roles/pbr/meta/main.yml b/examples/iaas/roles/pbr/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/pbr/meta/main.yml +++ b/examples/iaas/roles/pbr/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/pbr/tasks/main.yml b/examples/iaas/roles/pbr/tasks/main.yml index 72428dd2b..6c1f2e67b 100644 --- a/examples/iaas/roles/pbr/tasks/main.yml +++ b/examples/iaas/roles/pbr/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include PBR task - include_tasks: pbr.yml + ansible.builtin.include_tasks: pbr.yml with_items: - - { vpc_name: "{{vpc_names[0]}}", priority: 101, nip: 10.1.2.0, prefix: 24 } - - { vpc_name: "{{vpc_names[1]}}", priority: 101, nip: 10.1.2.0, prefix: 24 } + - { vpc_name: "{{ vpc_names[0] }}", priority: 101, nip: 10.1.2.0, prefix: 24 } + - { vpc_name: "{{ vpc_names[1] }}", priority: 101, nip: 10.1.2.0, prefix: 24 } diff --git a/examples/iaas/roles/pbr/tasks/pbr.yml b/examples/iaas/roles/pbr/tasks/pbr.yml index 760c69621..1cd490972 100644 --- a/examples/iaas/roles/pbr/tasks/pbr.yml +++ b/examples/iaas/roles/pbr/tasks/pbr.yml @@ -1,22 +1,23 @@ --- - name: Create PBR for vpc "{{ item.vpc_name }}" - ntnx_pbrs: + nutanix.ncp.ntnx_pbrs: state: present vpc: name: "{{ item.vpc_name }}" priority: "{{ item.priority }}" source: network: - ip: "{{item.nip}}" - prefix: "{{item.prefix}}" + ip: "{{ item.nip }}" + prefix: "{{ item.prefix }}" destination: - external: True + external: true protocol: - any: True + any: true action: - deny: True + deny: true register: pbr -- debug: +- name: Print vpc name and pbr uuid + ansible.builtin.debug: msg: - - "vpc_name: {{ item.vpc_name }}" - - "uuid: {{ pbr.pbr_uuid }}" + - "vpc_name: {{ item.vpc_name }}" + - "uuid: {{ pbr.pbr_uuid }}" diff --git a/examples/iaas/roles/pbr_delete/meta/main.yml b/examples/iaas/roles/pbr_delete/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/pbr_delete/meta/main.yml +++ b/examples/iaas/roles/pbr_delete/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/pbr_delete/tasks/main.yml b/examples/iaas/roles/pbr_delete/tasks/main.yml index 0129299a5..209c40f68 100644 --- a/examples/iaas/roles/pbr_delete/tasks/main.yml +++ b/examples/iaas/roles/pbr_delete/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include PBR task - include_tasks: pbr_delete.yml + ansible.builtin.include_tasks: pbr_delete.yml with_items: - { pbr_uuid: 8c6ce427-a63d-482d-bf59-b8a14a062c1d } - { pbr_uuid: fb6fb539-7b24-48a1-b285-9a1fb8b97e5f } diff --git a/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml b/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml index dd2562cf1..d758c66f1 100644 --- a/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml +++ b/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml @@ -1,9 +1,10 @@ --- - name: Delete PBR "{{ item.pbr_uuid }}" - ntnx_pbrs: + nutanix.ncp.nutanix.ncp.ntnx_pbrs: state: absent pbr_uuid: "{{ item.pbr_uuid }}" register: pbr -- debug: +- name: Print pbr uuid + ansible.builtin.debug: msg: - - "uuid: {{ pbr.pbr_uuid }}" + - "uuid: {{ pbr.pbr_uuid }}" diff --git a/examples/iaas/roles/static_route/meta/main.yml b/examples/iaas/roles/static_route/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/static_route/meta/main.yml +++ b/examples/iaas/roles/static_route/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/static_route/tasks/main.yml b/examples/iaas/roles/static_route/tasks/main.yml index c4a177a8f..3bfe9cf24 100644 --- a/examples/iaas/roles/static_route/tasks/main.yml +++ b/examples/iaas/roles/static_route/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Inputs for static routes task - include_tasks: static_route.yml + ansible.builtin.include_tasks: static_route.yml with_items: - - { vpc_uuid: "{{vpc_uuids[0]}}", subnet_name: "{{external_subnet.name}}", destination: "10.2.2.0/24"} - - { vpc_uuid: "{{vpc_uuids[1]}}", subnet_name: "{{external_subnet.name}}", destination: "10.2.3.0/24"} + - { vpc_uuid: "{{ vpc_uuids[0] }}", subnet_name: "{{ external_subnet.name }}", destination: 10.2.2.0/24 } + - { vpc_uuid: "{{ vpc_uuids[1] }}", subnet_name: "{{ external_subnet.name }}", destination: 10.2.3.0/24 } diff --git a/examples/iaas/roles/static_route/tasks/static_route.yml b/examples/iaas/roles/static_route/tasks/static_route.yml index 924571431..fbe1202f9 100644 --- a/examples/iaas/roles/static_route/tasks/static_route.yml +++ b/examples/iaas/roles/static_route/tasks/static_route.yml @@ -1,14 +1,15 @@ --- - - name: Create static route - ntnx_static_routes: - state: present - vpc_uuid: "{{ item.vpc_uuid }}" - static_routes: - - destination: "{{ item.destination }}" - next_hop: - external_subnet_ref: - name: "{{ item.subnet_name }}" - register: static_route - - debug: - msg: - - "uuid: {{ static_route.response.metadata.uuid }}" +- name: Create static route + nutanix.ncp.ntnx_static_routes: + state: present + vpc_uuid: "{{ item.vpc_uuid }}" + static_routes: + - destination: "{{ item.destination }}" + next_hop: + external_subnet_ref: + name: "{{ item.subnet_name }}" + register: static_route +- name: Print static route uuid + ansible.builtin.debug: + msg: + - "uuid: {{ static_route.response.metadata.uuid }}" diff --git a/examples/iaas/roles/vm/meta/main.yml b/examples/iaas/roles/vm/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/vm/meta/main.yml +++ b/examples/iaas/roles/vm/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/vm/tasks/main.yml b/examples/iaas/roles/vm/tasks/main.yml index 7c3949250..829884979 100644 --- a/examples/iaas/roles/vm/tasks/main.yml +++ b/examples/iaas/roles/vm/tasks/main.yml @@ -1,8 +1,28 @@ --- - name: Inputs for vm task - include_tasks: vm.yml + ansible.builtin.include_tasks: vm.yml with_items: - - {name: "Prod-Wordpress-App", desc: "Prod-Wordpress-App", is_connected: True, subnet_name: "{{Prod_SubnetA.name}}", image_name: "wordpress-appserver", private_ip: ""} - - {name: "Prod-Wordpress-DB", desc: "Prod-Wordpress-DB", is_connected: True, subnet_name: "{{Prod_SubnetB.name}}", image_name: "wordpress-db", private_ip: 10.1.2.5} - - {name: "Dev-Wordpress-App", desc: "Dev-Wordpress-App", is_connected: True, subnet_name: "{{Dev_SubnetA.name}}", image_name: "wordpress-appserver", private_ip: ""} - - {name: "Dev-Wordpress-DB", desc: "Dev-Wordpress-DB", is_connected: True, subnet_name: "{{Dev_SubnetB.name}}", image_name: "wordpress-db", private_ip: 10.1.2.5} + - name: Prod-Wordpress-App + desc: Prod-Wordpress-App + is_connected: true + subnet_name: "{{ Prod_SubnetA.name }}" + image_name: wordpress-appserver + private_ip: "" + - name: Prod-Wordpress-DB + desc: Prod-Wordpress-DB + is_connected: true + subnet_name: "{{ Prod_SubnetB.name }}" + image_name: wordpress-db + private_ip: 10.1.2.5 + - name: Dev-Wordpress-App + desc: Dev-Wordpress-App + is_connected: true + subnet_name: "{{ Dev_SubnetA.name }}" + image_name: wordpress-appserver + private_ip: "" + - name: Dev-Wordpress-DB + desc: Dev-Wordpress-DB + is_connected: true + subnet_name: "{{ Dev_SubnetB.name }}" + image_name: wordpress-db + private_ip: 10.1.2.5 diff --git a/examples/iaas/roles/vm/tasks/vm.yml b/examples/iaas/roles/vm/tasks/vm.yml index ca2332e4c..e07e64578 100644 --- a/examples/iaas/roles/vm/tasks/vm.yml +++ b/examples/iaas/roles/vm/tasks/vm.yml @@ -1,6 +1,10 @@ --- -- name: Create "{{ item.name }}" VM - ntnx_vms: +- name: Start vm task + ansible.builtin.debug: + msg: Create "{{ item.name }}" VM + +- name: Create VM + nutanix.ncp.ntnx_vms: state: present name: "{{ item.name }}" desc: "{{ item.desc }}" @@ -12,13 +16,14 @@ name: "{{ item.subnet_name }}" private_ip: "{{ item.private_ip }}" disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ item.image_name }}" + - type: DISK + size_gb: 30 + bus: SATA + clone_image: + name: "{{ item.image_name }}" register: vm -- debug: +- name: Print vm name and uuid + ansible.builtin.debug: msg: - - "name: {{ vm.response.status.name }}" - - "uuid: {{ vm.vm_uuid }}" + - "name: {{ vm.response.status.name }}" + - "uuid: {{ vm.vm_uuid }}" diff --git a/examples/iaas/roles/vpc/meta/main.yml b/examples/iaas/roles/vpc/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/vpc/meta/main.yml +++ b/examples/iaas/roles/vpc/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/vpc/tasks/main.yml b/examples/iaas/roles/vpc/tasks/main.yml index 21ce27258..a15150041 100644 --- a/examples/iaas/roles/vpc/tasks/main.yml +++ b/examples/iaas/roles/vpc/tasks/main.yml @@ -1,8 +1,9 @@ --- -- set_fact: +- name: Define vpc_uuids variable + ansible.builtin.set_fact: vpc_uuids: [] - name: Inputs vpcs task - include_tasks: vpc.yml + ansible.builtin.include_tasks: vpc.yml with_items: - - { name: "{{vpc_names[0]}}", subnet_name: "{{external_subnet.name}}"} - - { name: "{{vpc_names[1]}}", subnet_name: "{{external_subnet.name}}"} + - { name: "{{ vpc_names[0] }}", subnet_name: "{{ external_subnet.name }}" } + - { name: "{{ vpc_names[1] }}", subnet_name: "{{ external_subnet.name }}" } diff --git a/examples/iaas/roles/vpc/tasks/vpc.yml b/examples/iaas/roles/vpc/tasks/vpc.yml index a87d5b5d2..9513ff964 100644 --- a/examples/iaas/roles/vpc/tasks/vpc.yml +++ b/examples/iaas/roles/vpc/tasks/vpc.yml @@ -1,14 +1,20 @@ --- -- name: Create {{ item.name }} VPC with external connectivity to "{{ item.subnet_name }}" - ntnx_vpcs: - state: present - name: "{{ item.name }}" - external_subnets: - - subnet_name: "{{ item.subnet_name }}" +- name: Start vpc task + ansible.builtin.debug: + msg: Create {{ item.name }} VPC with external connectivity to "{{ item.subnet_name }}" + +- name: Create VPC with external connectivity + nutanix.ncp.ntnx_vpcs: + state: present + name: "{{ item.name }}" + external_subnets: + - subnet_name: "{{ item.subnet_name }}" register: vpc -- debug: +- name: Print vpc response + ansible.builtin.debug: msg: - - "name: {{ vpc.response.status.name }}" - - "uuid: {{ vpc.vpc_uuid }}" -- set_fact: - vpc_uuids: "{{ vpc_uuids + [ vpc.vpc_uuid ] }}" + - "name: {{ vpc.response.status.name }}" + - "uuid: {{ vpc.vpc_uuid }}" +- name: Define vpc_uuids variable + ansible.builtin.set_fact: + vpc_uuids: "{{ vpc_uuids + [vpc.vpc_uuid] }}" diff --git a/examples/iaas/vars.yml b/examples/iaas/vars.yml index ff9daee05..86cf7a804 100644 --- a/examples/iaas/vars.yml +++ b/examples/iaas/vars.yml @@ -9,41 +9,41 @@ external_subnet: gip: 10.44.3.193 sip: 10.44.3.198 eip: 10.44.3.207 - eNat: True -vpc_names: ["Prod", "Dev"] -domain_search: ["calm.nutanix.com", "eng.nutanix.com"] -dns_servers: ["8.8.8.8", "8.8.8.4"] -domain_name: "calm.nutanix.com" + eNat: true +vpc_names: [Prod, Dev] +domain_search: [calm.nutanix.com, eng.nutanix.com] +dns_servers: [8.8.8.8, 8.8.8.4] +domain_name: calm.nutanix.com Prod_SubnetA: - name: Prod_SubnetA - vpc_name: Prod - nip: 10.1.1.0 - prefix: 24 - gip: 10.1.1.1 - sip: 10.1.1.2 - eip: 10.1.1.5 + name: Prod_SubnetA + vpc_name: Prod + nip: 10.1.1.0 + prefix: 24 + gip: 10.1.1.1 + sip: 10.1.1.2 + eip: 10.1.1.5 Prod_SubnetB: - name: Prod_SubnetB - vpc_name: Prod - nip: 10.1.2.0 - prefix: 24 - gip: 10.1.2.1 - sip: 10.1.2.2 - eip: 10.1.2.5 + name: Prod_SubnetB + vpc_name: Prod + nip: 10.1.2.0 + prefix: 24 + gip: 10.1.2.1 + sip: 10.1.2.2 + eip: 10.1.2.5 Dev_SubnetA: - name: Dev_SubnetA - vpc_name: Dev - nip: 10.1.1.0 - prefix: 24 - gip: 10.1.1.1 - sip: 10.1.1.2 - eip: 10.1.1.5 + name: Dev_SubnetA + vpc_name: Dev + nip: 10.1.1.0 + prefix: 24 + gip: 10.1.1.1 + sip: 10.1.1.2 + eip: 10.1.1.5 Dev_SubnetB: - name: Dev_SubnetB - vpc_name: Dev - nip: 10.1.2.0 - prefix: 24 - gip: 10.1.2.1 - sip: 10.1.2.2 - eip: 10.1.2.5 + name: Dev_SubnetB + vpc_name: Dev + nip: 10.1.2.0 + prefix: 24 + gip: 10.1.2.1 + sip: 10.1.2.2 + eip: 10.1.2.5 diff --git a/examples/images.yml b/examples/images.yml index 82f11491d..5624ac958 100644 --- a/examples/images.yml +++ b/examples/images.yml @@ -2,8 +2,6 @@ - name: Images playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,14 +10,14 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: image_uuid: "" source_path: "" source_uri: "" - clusters_name: "" + clusters_name: "" - - name: create image from local workstation - ntnx_images: + - name: Create image from local workstation + nutanix.ncp.ntnx_images: state: "present" source_path: "{{source_path}}" clusters: @@ -38,8 +36,8 @@ product_version: "1.2.0" wait: true - - name: create image from with source as remote server file location - ntnx_images: + - name: Create image from with source as remote server file location + nutanix.ncp.ntnx_images: state: "present" source_uri: "{{source_uri}}" clusters: @@ -58,8 +56,8 @@ product_version: "1.2.0" wait: true - - name: override categories of existing image - ntnx_images: + - name: Override categories of existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "{{image-uuid}}" categories: @@ -69,15 +67,15 @@ - Backup wait: true - - name: dettach all categories from existing image - ntnx_images: + - name: Dettach all categories from existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" remove_categories: true wait: true - - name: delete existing image - ntnx_images: + - name: Delete existing image + nutanix.ncp.ntnx_images: state: "absent" image_uuid: "00000000-0000-0000-0000-000000000000" wait: true diff --git a/examples/inventory/nutanix.yaml b/examples/inventory/nutanix.yaml index 89f3c20ce..16abac947 100644 --- a/examples/inventory/nutanix.yaml +++ b/examples/inventory/nutanix.yaml @@ -1,13 +1,14 @@ +--- plugin: nutanix.ncp.ntnx_prism_vm_inventory nutanix_hostname: nutanix_username: nutanix_password: validate_certs: false -data: {"offset": 0, "length": 1000} +data: { offset: 0, length: 1000 } groups: group_1: "'' in name" group_2: "''==name" keyed_groups: - - prefix: "host" - separator: ':' - key: "ansible_host" + - prefix: host + separator: ":" + key: ansible_host diff --git a/examples/karbon/cluster_info.yml b/examples/karbon/cluster_info.yml index b5e916b25..7c56ebf71 100644 --- a/examples/karbon/cluster_info.yml +++ b/examples/karbon/cluster_info.yml @@ -1,9 +1,7 @@ --- -- name: get k8s cluster info +- name: Get k8s cluster info hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,22 +10,23 @@ validate_certs: false tasks: - - set_fact: - cluster_name: + - name: Set cluster name + ansible.builtin.set_fact: + cluster_name: - - name: test getting cluster using name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - register: result + - name: Test getting cluster using name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + register: result - - name: test getting cluster with ssh config using cluster name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - fetch_ssh_credentials: true - register: result + - name: Test getting cluster with ssh config using cluster name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + fetch_ssh_credentials: true + register: result - - name: test getting cluster with kubeconfig config using cluster name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - fetch_kubeconfig: true - register: result + - name: Test getting cluster with kubeconfig config using cluster name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + fetch_kubeconfig: true + register: result diff --git a/examples/karbon/create_k8s_cluster.yml b/examples/karbon/create_k8s_cluster.yml index 689975485..ae17d9e4d 100644 --- a/examples/karbon/create_k8s_cluster.yml +++ b/examples/karbon/create_k8s_cluster.yml @@ -1,9 +1,7 @@ --- -- name: create k8s cluster +- name: Create k8s cluster hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,171 +10,171 @@ validate_certs: false tasks: - - set_fact: - cluster: - name: - uuid: - node_subnet: - name: - uuid: - storage_class: - name: - storage_container: - name: - cni: - node_cidr_mask_size: 24 - service_ipv4_cidr: "172.19.0.0/16" - pod_ipv4_cidr: "172.20.0.0/16" - karbon_name: test-module21 - k8s_version: "1.19.8-0" - host_os: "ntnx-1.0" - nutanix_cluster_password: - nutanix_cluster_username: - control_plane_virtual_ip: + - name: Set vars + ansible.builtin.set_fact: + cluster: + name: + uuid: + node_subnet: + name: + uuid: + storage_class: + name: + storage_container: + name: + cni: + node_cidr_mask_size: 24 + service_ipv4_cidr: 172.19.0.0/16 + pod_ipv4_cidr: 172.20.0.0/16 + karbon_name: test-module21 + k8s_version: 1.19.8-0 + host_os: ntnx-1.0 + nutanix_cluster_password: + nutanix_cluster_username: + control_plane_virtual_ip: + ############################# + - name: Create DEV cluster with Flannel network provider + nutanix.ncp.ntnx_karbon_clusters: + cluster: + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + name: "{{ node_subnet.name }}" + cluster_type: DEV + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Flannel + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Delete + storage_container: "{{ storage_container.name }}" + file_system: ext4 + flash_mode: false + register: result - ############################# - - name: create DEV cluster with Flannel network provider - ntnx_karbon_clusters: - cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - name: "{{node_subnet.name}}" - cluster_type: DEV - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Delete - storage_container: "{{storage_container.name}}" - file_system: ext4 - flash_mode: False - register: result + - name: Delete dev cluster + nutanix.ncp.ntnx_karbon_clusters: + state: absent + name: "{{ result.response.name }}" + register: result - - name: delete dev cluster - ntnx_karbon_clusters: - state: absent - name: "{{result.response.name}}" - register: result + - name: Create DEV cluster with Calico network provider + nutanix.ncp.ntnx_karbon_clusters: + cluster: + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + uuid: "{{ node_subnet.uuid }}" + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Calico + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 120 + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Retain + storage_container: "{{ storage_container.name }}" + file_system: xfs + flash_mode: true + register: result - - name: create DEV cluster with Calico network provider - ntnx_karbon_clusters: - cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - uuid: "{{node_subnet.uuid}}" - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Calico - custom_node_configs: - etcd: - num_instances: 1 + - name: Create worker node pool with subnet uuid + nutanix.ncp.ntnx_karbon_clusters_node_pools: + node_subnet: + uuid: + node_pool_name: "{{ karbon_name }}" + cluster_name: "{{ cluster.name }}" + pool_config: + num_instances: 2 cpu: 4 memory_gb: 8 disk_gb: 120 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 120 - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Retain - storage_container: "{{storage_container.name}}" - file_system: xfs - flash_mode: true - register: result - - - name: Create worker node pool with subnet uuid - ntnx_karbon_clusters_node_pools: - node_subnet: - uuid: "" - node_pool_name: "{{karbon_name}}" - cluster_name: "{{cluster.name}}" - pool_config: - num_instances: 2 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: update pool by increasing cpu,memory_gb,num_instances and add labels - ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "{{karbon_name}}" - cluster_name: "{{cluster.name}}" - pool_config: + - name: Update pool by increasing cpu,memory_gb,num_instances and add labels + nutanix.ncp.ntnx_karbon_clusters_node_pools: + wait: true + node_pool_name: "{{ karbon_name }}" + cluster_name: "{{ cluster.name }}" + pool_config: cpu: 6 memory_gb: 10 disk_gb: 150 num_instances: 4 - add_labels: - property1: "test-property1" - register: result - ignore_errors: true + add_labels: + property1: test-property1 + register: result + ignore_errors: true - - name: create prod cluster - ntnx_karbon_clusters: - cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - name: "{{node_subnet.name}}" - cluster_type: PROD - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Delete - storage_container: "{{storage_container.name}}" - file_system: ext4 - flash_mode: False - control_plane_virtual_ip: "{{control_plane_virtual_ip}}" - custom_node_configs: - etcd: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 240 - register: result + - name: Create prod cluster + nutanix.ncp.ntnx_karbon_clusters: + cluster: + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + name: "{{ node_subnet.name }}" + cluster_type: PROD + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Flannel + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Delete + storage_container: "{{ storage_container.name }}" + file_system: ext4 + flash_mode: false + control_plane_virtual_ip: "{{ control_plane_virtual_ip }}" + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 240 + register: result diff --git a/examples/karbon/create_registries.yml b/examples/karbon/create_registries.yml index 42c75e310..5992fbee8 100644 --- a/examples/karbon/create_registries.yml +++ b/examples/karbon/create_registries.yml @@ -1,9 +1,7 @@ --- -- name: create registeries +- name: Create registeries hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,30 +10,31 @@ validate_certs: false tasks: - - set_fact: - registry_name: - url: - port_number: - username: - password: + - name: Set vars + ansible.builtin.set_fact: + registry_name: + url: + port_number: + username: + password: - - name: create registry - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - port: "{{port_number}}" - register: result + - name: Create registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + port: "{{ port_number }}" + register: result - - name: delete registry - ntnx_karbon_registries: - name: "{{registry_name}}" - state: absent - register: result + - name: Delete registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + state: absent + register: result - - name: create registry with username and password - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - username: "{{username}}" - password: "{{password}}" - register: result + - name: Create registry with username and password + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + username: "{{ username }}" + password: "{{ password }}" + register: result diff --git a/examples/karbon/registries_info.yml b/examples/karbon/registries_info.yml index 81c2d8742..935658ee6 100644 --- a/examples/karbon/registries_info.yml +++ b/examples/karbon/registries_info.yml @@ -1,9 +1,7 @@ --- -- name: get registeries info +- name: Get registeries info hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,11 +10,11 @@ validate_certs: false tasks: - - name: test getting all registries - ntnx_karbon_registries_info: - register: registries + - name: Test getting all registries + nutanix.ncp.ntnx_karbon_registries_info: + register: registries - - name: test getting particular register using name - ntnx_karbon_registries_info: + - name: Test getting particular register using name + nutanix.ncp.ntnx_karbon_registries_info: registry_name: "{{ registries.response[1].name }}" - register: result + register: result diff --git a/examples/ndb/all_day2_actions.yml b/examples/ndb/all_day2_actions.yml index 4322a1302..35dc5e2e7 100644 --- a/examples/ndb/all_day2_actions.yml +++ b/examples/ndb/all_day2_actions.yml @@ -7,13 +7,9 @@ # 4. Restore database to previously created snapshot and latest snapshot # 5. Scale database # 6. Add/Remove linked databases - - -- name: perform day2 actions +- name: Perform day2 actions hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -22,110 +18,109 @@ validate_certs: false tasks: - ############################################ snapshots ########################################### - - name: create snapshot with minimal spec - ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}1" - time_machine_uuid: "{{time_machine_uuid}}" + - name: Create snapshot with minimal spec + nutanix.ncp.ntnx_ndb_database_snapshots: + name: "{{ snapshot_name }}1" + time_machine_uuid: "{{ time_machine_uuid }}" register: result - - name: create snapshot with expiry - ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}2" - time_machine_uuid: "{{time_machine_uuid}}" + - name: Create snapshot with expiry + nutanix.ncp.ntnx_ndb_database_snapshots: + name: "{{ snapshot_name }}2" + time_machine_uuid: "{{ time_machine_uuid }}" expiry_days: 4 register: result - - set_fact: - snapshot_uuid: "{{result.snapshot_uuid}}" + - name: Set snapshot uuid + ansible.builtin.set_fact: + snapshot_uuid: "{{ result.snapshot_uuid }}" - - name: rename snapshot - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2-updated" + - name: Rename snapshot + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2-updated" register: result - - name: update expiry - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + - name: Update expiry + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" expiry_days: 5 register: result - - name: remove expiry schedule - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + - name: Remove expiry schedule + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" remove_expiry: true register: result - name: Add expiry schedule and rename - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2" + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2" expiry_days: 6 register: result - ############################################ log catchup ###################################### - - name: perform log catchup for restore - ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + - name: Perform log catchup for restore + nutanix.ncp.ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result - - name: perform log catchup - ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + - name: Perform log catchup + nutanix.ncp.ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result ########################################### restore ########################################### - - name: perform using pitr timestamp - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" + - name: Perform using pitr timestamp + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" pitr_timestamp: "2023-01-02 11:02:22" - timezone: "UTC" + timezone: UTC register: result - - name: perform restore using latest snapshot - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + - name: Perform restore using latest snapshot + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result - - name: perform restore using snapshot uuid - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + - name: Perform restore using snapshot uuid + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result ########################################### scaling ########################################### - - name: extend database storage for scaling database - ntnx_ndb_database_scale: - db_uuid: "{{db_uuid}}" + - name: Extend database storage for scaling database + nutanix.ncp.ntnx_ndb_database_scale: + db_uuid: "{{ db_uuid }}" storage_gb: 2 - pre_update_cmd: "ls" - post_update_cmd: "ls -a" + pre_update_cmd: ls + post_update_cmd: ls -a register: result ############################################ add / remove linked databases ########################################### - - name: add databases in database instance - ntnx_ndb_linked_databases: - db_instance_uuid: "{{db_uuid}}" + - name: Add databases in database instance + nutanix.ncp.ntnx_ndb_linked_databases: + db_instance_uuid: "{{ db_uuid }}" databases: - test1 - test2 register: result - - name: remove databases in database instance - ntnx_ndb_linked_databases: - state: "absent" - db_instance_uuid: "{{db_uuid}}" - database_uuid: "{{linked_databases.test1}}" + - name: Remove databases in database instance + nutanix.ncp.ntnx_ndb_linked_databases: + state: absent + db_instance_uuid: "{{ db_uuid }}" + database_uuid: "{{ linked_databases.test1 }}" register: result diff --git a/examples/ndb/create_clone.yml b/examples/ndb/create_clone.yml index 468058423..5fd9f98e4 100644 --- a/examples/ndb/create_clone.yml +++ b/examples/ndb/create_clone.yml @@ -2,8 +2,6 @@ - name: Create clone hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,7 +10,8 @@ validate_certs: false tasks: - - set_fact: + - name: Set vars + ansible.builtin.set_fact: clone_db: name: db_params_profile: @@ -32,21 +31,21 @@ snapshot: uuid: - - name: create clone using snapshot - ntnx_ndb_database_clones: - name: "{{clone_db.name}}" - desc: "ansible-created-clone" + - name: Create clone using snapshot + nutanix.ncp.ntnx_ndb_database_clones: + name: "{{ clone_db.name }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm.name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm.password }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -54,43 +53,44 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm.password}}" + db_password: "{{ vm.password }}" time_machine: - name: "{{time_machine.name}}" - snapshot_uuid: "{{snapshot.uuid}}" + name: "{{ time_machine.name }}" + snapshot_uuid: "{{ snapshot.uuid }}" removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - - name: create clone using point in time - ntnx_ndb_database_clones: - name: "{{clone_db.name}}" - desc: "ansible-created-clone" + - name: Create clone using point in time + nutanix.ncp.ntnx_ndb_database_clones: + name: "{{ clone_db.name }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm.name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm.password }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -98,24 +98,25 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm.password}}" + db_password: "{{ vm.password }}" time_machine: - name: "{{time_machine.name}}" + name: "{{ time_machine.name }}" pitr_timestamp: "2023-02-28 12:00:00" - timestamp: "Asia/Calcutta" + timestamp: Asia/Calcutta removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_stretched_vlan.yml b/examples/ndb/create_stretched_vlan.yml index 29e6c9e20..54b19f85e 100644 --- a/examples/ndb/create_stretched_vlan.yml +++ b/examples/ndb/create_stretched_vlan.yml @@ -2,8 +2,6 @@ - name: Create stretched vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,15 +10,15 @@ validate_certs: false tasks: - - name: Create stretched vlan - ntnx_ndb_stretched_vlans: - name: "{{st_vlan.name}}" - desc: "{{st_vlan.desc}}" + nutanix.ncp.ntnx_ndb_stretched_vlans: + name: "{{ st_vlan.name }}" + desc: "{{ st_vlan.desc }}" vlans: - - "" - - "" + - + - register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_time_machine_cluster.yml b/examples/ndb/create_time_machine_cluster.yml index 95f23a22a..bf64731d9 100644 --- a/examples/ndb/create_time_machine_cluster.yml +++ b/examples/ndb/create_time_machine_cluster.yml @@ -2,8 +2,6 @@ - name: NDB time machine's cluster creation hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,16 +10,16 @@ validate_certs: false tasks: - - name: NDB time machine's cluster creation - ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" + nutanix.ncp.ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{ time_machine.uuid }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" sla: - name: "{{sla.name}}" - type: "{{type}}" + name: "{{ sla.name }}" + type: "{{ type }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_vlan.yml b/examples/ndb/create_vlan.yml index a77864d95..29207f24f 100644 --- a/examples/ndb/create_vlan.yml +++ b/examples/ndb/create_vlan.yml @@ -2,8 +2,6 @@ - name: Create Dhcp ndb vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,33 +10,34 @@ validate_certs: false tasks: - - name: Create Dhcp ndb vlan - ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + nutanix.ncp.ntnx_ndb_vlans: + name: "{{ ndb_vlan.name }}" vlan_type: DHCP cluster: - uuid: "{{cluster.uuid}}" + uuid: "{{ cluster.uuid }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - name: Create Static ndb vlan - ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + nutanix.ncp.ntnx_ndb_vlans: + name: "{{ ndb_vlan.name }}" vlan_type: Static - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/db_server_vms.yml b/examples/ndb/db_server_vms.yml index 7ae35cc47..faa0f288a 100644 --- a/examples/ndb/db_server_vms.yml +++ b/examples/ndb/db_server_vms.yml @@ -2,8 +2,6 @@ - name: NDB db server vms hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,44 +10,43 @@ validate_certs: false tasks: - - - name: create spec for db server vm using time machine - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - name: "ansible-created-vm1-from-time-machine" - desc: "ansible-created-vm1-from-time-machine-time-machine" + - name: Create spec for db server vm using time machine + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + name: ansible-created-vm1-from-time-machine + desc: ansible-created-vm1-from-time-machine-time-machine time_machine: - uuid: "test_uuid" - snapshot_uuid: "test_snapshot_uuid" + uuid: test_uuid + snapshot_uuid: test_snapshot_uuid compute_profile: - uuid: "test_compute_uuid" + uuid: test_compute_uuid network_profile: - uuid: "test_network_uuid" + uuid: test_network_uuid cluster: - uuid: "test_cluster_uuid" - password: "test_password" - pub_ssh_key: "test_public_key" - database_type: "postgres_database" + uuid: test_cluster_uuid + password: test_password + pub_ssh_key: test_public_key + database_type: postgres_database automated_patching: maintenance_window: - uuid: "test_window_uuid" + uuid: test_window_uuid tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: check_mode_result - - name: create spec for db server vm using software profile and names of profile - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True + - name: Create spec for db server vm using software profile and names of profile + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -60,25 +57,25 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - name: create db server vm using software profile - ntnx_ndb_db_server_vms: - wait: True + - name: Create db server vm using software profile + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -89,232 +86,226 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - - name: update db server vm name, desc, credentials, tags - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - reset_name_in_ntnx_cluster: True - reset_desc_in_ntnx_cluster: True + - name: Update db server vm name, desc, credentials, tags + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" + name: "{{ vm1_name_updated }}" + desc: ansible-created-vm1-updated-desc + reset_name_in_ntnx_cluster: true + reset_desc_in_ntnx_cluster: true update_credentials: - - username: "{{vm_username}}" - password: "{{vm_password}}" + - username: "{{ vm_username }}" + password: "{{ vm_password }}" tags: ansible-db-server-vms: ansible-updated register: result - - name: create spec for update db server vm credentials - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" + - name: Create spec for update db server vm credentials + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" update_credentials: - - username: "user" - password: "pass" + - username: user + password: pass register: result - - name: List NDB db_servers - ntnx_ndb_db_servers_info: + nutanix.ncp.ntnx_ndb_db_servers_info: register: db_servers - - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: filters: load_metrics: true - load_databases: True + load_databases: true value_type: name - value: "{{db_servers.response[0].name}}" + value: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's ip - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's ip + nutanix.ncp.ntnx_ndb_db_servers_info: filters: value_type: ip - value: "{{db_servers.response[0].ipAddresses[0]}}" + value: "{{ db_servers.response[0].ipAddresses[0] }}" register: result - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: - name: "{{db_servers.response[0].name}}" + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: + name: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's id - ntnx_ndb_db_servers_info: - uuid: "{{db_servers.response[0].id}}" + - name: Get NDB db_servers using it's id + nutanix.ncp.ntnx_ndb_db_servers_info: + uuid: "{{ db_servers.response[0].id }}" register: result - - name: get NDB db_servers using ip - ntnx_ndb_db_servers_info: - server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + - name: Get NDB db_servers using ip + nutanix.ncp.ntnx_ndb_db_servers_info: + server_ip: "{{ db_servers.response[0].ipAddresses[0] }}" register: result ################################### maintenance tasks update tasks ############################# - - name: create spec for adding maintenance window tasks to db server vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for adding maintenance window tasks to db server vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" - - uuid: "test_vm_1" + - name: "{{ vm1_name_updated }}" + - uuid: test_vm_1 db_server_clusters: - - uuid: "test_cluter_1" - - uuid: "test_cluter_2" + - uuid: test_cluter_1 + - uuid: test_cluter_2 maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result - - name: create spec for removing maintenance window tasks from above created vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for removing maintenance window tasks from above created vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - - - name: remove maintenance tasks - ntnx_ndb_maintenance_tasks: + - name: Remove maintenance tasks + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - name: Add maitenance window task for vm - ntnx_ndb_maintenance_tasks: + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" + - name: "{{ vm1_name_updated }}" maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result ################################### DB server VM unregistration tasks ############################# - - name: generate check mode spec for unregister with default values - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Generate check mode spec for unregister with default values + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" register: result - - name: genereate check mode spec for delete vm with vgs and snapshots - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + - name: Genereate check mode spec for delete vm with vgs and snapshots + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result - - name: unregister vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: False - soft_remove: True - delete_vgs: True - delete_vm_snapshots: True + - name: Unregister vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: false + soft_remove: true + delete_vgs: true + delete_vm_snapshots: true register: result ################################### DB server VM Registration tasks ############################# - - - name: generate spec for registeration of the previous unregistered vm using check mode - check_mode: yes - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Generate spec for registeration of the previous unregistered vm using check mode + check_mode: true + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: - software_path: "{{postgres.software_home}}" - private_ssh_key: "check-key" - username: "{{vm_username}}" + software_path: "{{ postgres.software_home }}" + private_ssh_key: check-key + username: "{{ vm_username }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" - working_directory: "/check" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F + working_directory: /check register: result - - name: register the previous unregistered vm - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Register the previous unregistered vm + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: listener_port: 5432 - software_path: "{{postgres.software_home}}" - username: "{{vm_username}}" - password: "{{vm_password}}" + software_path: "{{ postgres.software_home }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result ################################### DB server VM Delete tasks ############################# - - - name: unregister db server vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Unregister db server vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" delete_from_cluster: false - delete_vgs: True - delete_vm_snapshots: True + delete_vgs: true + delete_vm_snapshots: true register: result diff --git a/examples/ndb/provision_database_on_registered_db_server.yml b/examples/ndb/provision_database_on_registered_db_server.yml index ca196cc88..110d010f1 100644 --- a/examples/ndb/provision_database_on_registered_db_server.yml +++ b/examples/ndb/provision_database_on_registered_db_server.yml @@ -2,8 +2,6 @@ - name: Single instance postgres database creation on registered db server hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,9 +10,8 @@ validate_certs: false tasks: - - name: Create single instance postgres database on registered db server vm - ntnx_ndb_databases: + nutanix.ncp.ntnx_ndb_databases: name: POSTGRES_DATABASE_ANSIBLE @@ -46,5 +43,6 @@ register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/provision_postgres_ha_instance_with_ips.yml b/examples/ndb/provision_postgres_ha_instance_with_ips.yml index 00e95fc68..e9c3621b8 100644 --- a/examples/ndb/provision_postgres_ha_instance_with_ips.yml +++ b/examples/ndb/provision_postgres_ha_instance_with_ips.yml @@ -4,8 +4,6 @@ - name: Create stretched vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -14,64 +12,64 @@ validate_certs: false tasks: - - name: create HA instance postgres database with static IP assignments to vms and cluster IP - ntnx_ndb_databases: + - name: Create HA instance postgres database with static IP assignments to vms and cluster IP + nutanix.ncp.ntnx_ndb_databases: wait: true timeout: 5400 - name: "" - desc: "ansible-created-db-desc" + name: + desc: ansible-created-db-desc db_params_profile: - name: "" + name: db_server_cluster: new_cluster: - name: "" + name: cluster: - name: "" + name: ips: - cluster: - name: "" - ip: "" + name: + ip: software_profile: - name: "" + name: network_profile: - name: "" + name: compute_profile: - name: "" - password: "" - pub_ssh_key: "" + name: + password: + pub_ssh_key: vms: - - name: "vm-1" - node_type: "database" - role: "Primary" - ip: "" + - name: vm-1 + node_type: database + role: Primary + ip: - - name: "vm-2" - node_type: "database" - role: "Secondary" - ip: "" + - name: vm-2 + node_type: database + role: Secondary + ip: - - name: "vm-3" - node_type: "database" - role: "Secondary" - ip: "" + - name: vm-3 + node_type: database + role: Secondary + ip: - - name: "vm-ha-proxy1" - node_type: "haproxy" - ip: "" + - name: vm-ha-proxy1 + node_type: haproxy + ip: - - name: "vm-ha-proxy2" - node_type: "haproxy" - ip: "" + - name: vm-ha-proxy2 + node_type: haproxy + ip: postgres: - type: "ha" + type: ha db_name: testAnsible - db_password: "" + db_password: db_size: 200 - patroni_cluster_name: "" + patroni_cluster_name: ha_proxy: provision_virtual_ip: true @@ -79,7 +77,7 @@ name: TM2 desc: TM-desc sla: - name: "" + name: schedule: daily: "11:10:02" weekly: WEDNESDAY @@ -88,22 +86,23 @@ log_catchup: 30 snapshots_per_day: 2 clusters: - - name: "" + - name: tags: - ansible-databases: "ha-instance-dbs" + ansible-databases: ha-instance-dbs automated_patching: maintenance_window: - name: "" + name: tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - debug: + - name: Print output + ansible.builtin.debug: msg: "{{ result }}" diff --git a/examples/ndb/refresh_clone.yml b/examples/ndb/refresh_clone.yml index 3806fb9d9..a420d28f4 100644 --- a/examples/ndb/refresh_clone.yml +++ b/examples/ndb/refresh_clone.yml @@ -2,8 +2,6 @@ - name: Create Refresh clone hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,29 +10,31 @@ validate_certs: false tasks: - - set_fact: + - name: Set vars + ansible.builtin.set_fact: clone_db: uuid: snapshot: uuid: - - name: create spec for refresh clone to a pitr timestamp - check_mode: yes - ntnx_ndb_database_clone_refresh: - uuid: "{{clone_db.uuid}}" + - name: Create spec for refresh clone to a pitr timestamp + check_mode: true + nutanix.ncp.ntnx_ndb_database_clone_refresh: + uuid: "{{ clone_db.uuid }}" pitr_timestamp: "2023-02-04 07:29:36" - timezone: "UTC" + timezone: UTC register: output + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - - debug: - msg: "{{output}}" - - - name: refresh db clone - ntnx_ndb_database_clone_refresh: - uuid: "{{clone_db.uuid}}" - snapshot_uuid: "{{snapshot.uuid}}" + - name: Refresh db clone + nutanix.ncp.ntnx_ndb_database_clone_refresh: + uuid: "{{ clone_db.uuid }}" + snapshot_uuid: "{{ snapshot.uuid }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/registr_cluster.yml b/examples/ndb/registr_cluster.yml index 80108160d..c425abba4 100644 --- a/examples/ndb/registr_cluster.yml +++ b/examples/ndb/registr_cluster.yml @@ -2,8 +2,6 @@ - name: NDB cluster creation hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,34 +10,34 @@ validate_certs: false tasks: - - name: NDB cluster creation - ntnx_ndb_clusters: - name: "{{cluster.name}}" - desc: "{{cluster.desc}}" - name_prefix: "{{cluster.name_prefix}}" - cluster_ip: "{{cluster.cluster_ip}}" + nutanix.ncp.ntnx_ndb_clusters: + name: "{{ cluster.name }}" + desc: "{{ cluster.desc }}" + name_prefix: "{{ cluster.name_prefix }}" + cluster_ip: "{{ cluster.cluster_ip }}" cluster_credentials: - username: "{{cluster_credentials.username}}" - password: "{{cluster_credentials.password}}" + username: "{{ cluster_credentials.username }}" + password: "{{ cluster_credentials.password }}" agent_network: dns_servers: - - "{{agent_network.dns_servers[0]}}" - - "{{agent_network.dns_servers[1]}}" + - "{{ agent_network.dns_servers[0] }}" + - "{{ agent_network.dns_servers[1] }}" ntp_servers: - - "{{agent_network.ntp_servers[0]}}" - - "{{agent_network.ntp_servers[1]}}" - - "{{agent_network.ntp_servers[2]}}" - - "{{agent_network.ntp_servers[3]}}" + - "{{ agent_network.ntp_servers[0] }}" + - "{{ agent_network.ntp_servers[1] }}" + - "{{ agent_network.ntp_servers[2] }}" + - "{{ agent_network.ntp_servers[3] }}" vlan_access: prism_vlan: - vlan_name: "{{prism_vlan.vlan_name}}" - vlan_type: "{{prism_vlan.vlan_type}}" - static_ip: "{{prism_vlan.static_ip}}" - gateway: "{{prism_vlan.gateway}}" - subnet_mask: "{{prism_vlan.subnet_mask}}" - storage_container: "{{storage_container.name}}" + vlan_name: "{{ prism_vlan.vlan_name }}" + vlan_type: "{{ prism_vlan.vlan_type }}" + static_ip: "{{ prism_vlan.static_ip }}" + gateway: "{{ prism_vlan.gateway }}" + subnet_mask: "{{ prism_vlan.subnet_mask }}" + storage_container: "{{ storage_container.name }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/single_instance_postgress_database.yml b/examples/ndb/single_instance_postgress_database.yml index 8f8f83339..62f8e5351 100644 --- a/examples/ndb/single_instance_postgress_database.yml +++ b/examples/ndb/single_instance_postgress_database.yml @@ -2,8 +2,6 @@ - name: Single instance postgres database creation with new db server VM hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,9 +10,8 @@ validate_certs: false tasks: - - name: Create single instance postgres database - ntnx_ndb_databases: + nutanix.ncp.ntnx_ndb_databases: name: POSTGRES_DATABASE_ANSIBLE @@ -33,7 +30,7 @@ name: DEFAULT_OOB_POSTGRESQL_NETWORK compute_profile: name: DEFAULT_OOB_SMALL_COMPUTE - pub_ssh_key: "" + pub_ssh_key: postgres: listener_port: "5432" @@ -56,5 +53,6 @@ register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/soft_delete_database_instance.yml b/examples/ndb/soft_delete_database_instance.yml index 07b15fab1..0ba663012 100644 --- a/examples/ndb/soft_delete_database_instance.yml +++ b/examples/ndb/soft_delete_database_instance.yml @@ -2,8 +2,6 @@ - name: Soft delete single instance database and time machine associated hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,14 +10,14 @@ validate_certs: false tasks: - - name: Soft delete single instance database and time machine associated - ntnx_ndb_databases: - state: "absent" + nutanix.ncp.ntnx_ndb_databases: + state: absent db_uuid: c0a4433a-49f2-40f3-ae52-d88788d2824b soft_delete: true delete_time_machine: true register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/software_profiles.yml b/examples/ndb/software_profiles.yml index fa22d873c..3af55b2b2 100644 --- a/examples/ndb/software_profiles.yml +++ b/examples/ndb/software_profiles.yml @@ -10,8 +10,6 @@ - name: Create software profiles hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -20,168 +18,168 @@ validate_certs: false tasks: - - name: create software profile create spec - check_mode: yes - ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "{{profile1_name}}-desc" - type: "software" - database_type: "postgres" + - name: Create software profile create spec + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile1_name }}" + desc: "{{ profile1_name }}-desc" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" clusters: - - name: "" - - uuid: "" + - name: + - uuid: register: result - - name: create software profile with base version and cluster instance topology. Replicate to multiple clusters - ntnx_ndb_profiles: - name: "{{profile1_name}}-replicated" - desc: "{{profile1_name}}-desc-replicated" - type: "software" - database_type: "postgres" + - name: Create software profile with base version and cluster instance topology. Replicate to multiple clusters + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile1_name }}-replicated" + desc: "{{ profile1_name }}-desc-replicated" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "" - - uuid: "" + - name: + - uuid: register: result - - name: create software profile with base version and single instance topology - ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "{{profile2_name}}-desc" - type: "software" - database_type: "postgres" + - name: Create software profile with base version and single instance topology + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile2_name }}" + desc: "{{ profile2_name }}-desc" + type: software + database_type: postgres software: - topology: "single" - name: "v1.0" - desc: "v1.0-desc" + topology: single + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "" + - name: register: result - - name: update software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - name: "{{profile1_name}}-updated1" - desc: "{{profile1_name}}-desc-updated" + - name: Update software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + name: "{{ profile1_name }}-updated1" + desc: "{{ profile1_name }}-desc-updated" register: result - - name: create software profile version spec - check_mode: yes - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create software profile version spec + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" register: result - - name: create software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" register: result - - name: create spec for update software profile version - check_mode: yes - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create spec for update software profile version + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - name: update software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Update software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - name: publish software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Publish software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - publish: True + version_uuid: "{{ version_uuid }}" + publish: true register: result - - name: unpublish software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Unpublish software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" + version_uuid: "{{ version_uuid }}" publish: false register: result - - name: deprecate software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Deprecate software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - deprecate: True + version_uuid: "{{ version_uuid }}" + deprecate: true register: result - - name: delete software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Delete software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - state: "absent" + version_uuid: "{{ version_uuid }}" + state: absent register: result - - name: replicate software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Replicate software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" register: result - - name: delete software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - state: "absent" + - name: Delete software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + state: absent register: result diff --git a/examples/pbr.yml b/examples/pbr.yml index 3e36f8097..eca3ac302 100644 --- a/examples/pbr.yml +++ b/examples/pbr.yml @@ -2,8 +2,6 @@ - name: PBR playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,28 +10,28 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: cluster_name: "" cluster_uuid: "" priority: "" vpc_uuid: "" - - name: create PBR with vpc uuid with any source or destination or protocol with deny action - ntnx_pbrs: - state: present - priority: "{{ priority }}" - vpc: - uuid: "{{ vpc_uuid }}" - source: - any: True - destination: - any: True - action: - deny: True - protocol: - any: True + - name: Create PBR with vpc uuid with any source or destination or protocol with deny action + nutanix.ncp.ntnx_pbrs: + state: present + priority: "{{ priority }}" + vpc: + uuid: "{{ vpc_uuid }}" + source: + any: true + destination: + any: true + action: + deny: true + protocol: + any: true register: result - name: Delete pbrs - ntnx_pbrs: + nutanix.ncp.ntnx_pbrs: state: absent pbr_uuid: "{{ result.pbr_uuid }}" diff --git a/examples/pbr_info.yml b/examples/pbr_info.yml index d59512893..ae712a1ad 100644 --- a/examples/pbr_info.yml +++ b/examples/pbr_info.yml @@ -2,8 +2,6 @@ - name: PBR_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,17 +10,16 @@ validate_certs: false tasks: - - name: List pbrs using length and offset - ntnx_pbrs_info: + nutanix.ncp.ntnx_pbrs_info: length: 1 offset: 0 register: result - ignore_errors: True + ignore_errors: true - name: List pbrs using ascending priority sorting - ntnx_pbrs_info: + nutanix.ncp.ntnx_pbrs_info: sort_order: "ASCENDING" sort_attribute: "priority" register: result - ignore_errors: True + ignore_errors: true diff --git a/examples/permissions_info.yml b/examples/permissions_info.yml index 8b8bc3960..d84376659 100644 --- a/examples/permissions_info.yml +++ b/examples/permissions_info.yml @@ -2,31 +2,29 @@ - name: PC permissions hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false tasks: - - name: get all permissions - ntnx_permissions_info: - register: op1 + - name: Get all permissions + nutanix.ncp.ntnx_permissions_info: + register: op1 - - name: get permissions using filter - ntnx_permissions_info: - filter: - name: - register: op2 + - name: Get permissions using filter + nutanix.ncp.ntnx_permissions_info: + filter: + name: + register: op2 - - name: get permission using uuid - ntnx_permissions_info: - permission_uuid: - register: op3 + - name: Get permission using uuid + nutanix.ncp.ntnx_permissions_info: + permission_uuid: + register: op3 - - name: output - debug: - msg: "{{ op3 }}" + - name: Output + ansible.builtin.debug: + msg: "{{ op3 }}" diff --git a/examples/projects_crud.yml b/examples/projects_crud.yml index ff785196a..1a715edf5 100644 --- a/examples/projects_crud.yml +++ b/examples/projects_crud.yml @@ -1,8 +1,6 @@ -- name: projects crud playbook. Here we will create, update, read and delete the project. +- name: Projects crud playbook. Here we will create, update, read and delete the project. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a project - ntnx_projects: + nutanix.ncp.ntnx_projects: name: "test-ansible-project-1" desc: "desc-123" subnets: @@ -31,8 +29,8 @@ - name: register: project1 - - name: update project - ntnx_projects: + - name: Update project + nutanix.ncp.ntnx_projects: state: present project_uuid: "{{project1.project_uuid}}" name: "test-ansible-project-1" @@ -45,16 +43,16 @@ register: updated_project - name: Read the updated project - ntnx_projects_info: + nutanix.ncp.ntnx_projects_info: project_uuid: "{{updated_project.project_uuid}}" register: project_info - name: Print the project details - debug: + ansible.builtin.debug: msg: "{{project_info}}" - name: Delete the project - ntnx_projects: + nutanix.ncp.ntnx_projects: state: absent project_uuid: "{{updated_project.project_uuid}}" register: op diff --git a/examples/projects_with_role_mapping.yml b/examples/projects_with_role_mapping.yml index c66fb47d5..e289cc271 100644 --- a/examples/projects_with_role_mapping.yml +++ b/examples/projects_with_role_mapping.yml @@ -1,8 +1,6 @@ -- name: projects crud playbook. Here we will create, update, read and delete the project with role mappings. +- name: Projects crud playbook. Here we will create, update, read and delete the project with role mappings. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a project with role mappings - ntnx_projects: + nutanix.ncp.ntnx_projects: name: "test-ansible-project-1" desc: "desc-123" clusters: @@ -30,18 +28,18 @@ - name: accounts: - name: - collaboration: True + collaboration: true role_mappings: - user: uuid: role: name: "Project Admin" - user_group: - uuid: + uuid: role: name: "Developer" - user: - uuid: + uuid: role: name: "Consumer" - user: @@ -57,13 +55,13 @@ name: "Consumer" register: project1 - - name: update role mappings of project - ntnx_projects: + - name: Update role mappings of project + nutanix.ncp.ntnx_projects: state: present project_uuid: "{{project1.project_uuid}}" name: "test-ansible-project-1" desc: "test-ansible-project-1-updated" - collaboration: True + collaboration: true role_mappings: - user: uuid: @@ -76,17 +74,17 @@ register: updated_project - name: Read the updated project - ntnx_projects_info: + nutanix.ncp.ntnx_projects_info: project_uuid: "{{updated_project.project_uuid}}" include_acps: true register: project_info - name: Print the project details - debug: + ansible.builtin.debug: msg: "{{project_info}}" - name: Delete the project - ntnx_projects: + nutanix.ncp.ntnx_projects: state: absent project_uuid: "{{updated_project.project_uuid}}" register: op diff --git a/examples/roles_crud.yml b/examples/roles_crud.yml index b01c02eca..d364c804f 100644 --- a/examples/roles_crud.yml +++ b/examples/roles_crud.yml @@ -1,8 +1,6 @@ - name: Roles crud playbook. Here we will create, update, read and delete the role. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -10,12 +8,12 @@ nutanix_password: validate_certs: false tasks: - - name: get some permissions for adding in roles - ntnx_permissions_info: + - name: Get some permissions for adding in roles + nutanix.ncp.ntnx_permissions_info: register: permissions - name: Create a role with 2 permissions. Here we will be using name or uuid for referenceing permissions - ntnx_roles: + nutanix.ncp.ntnx_roles: state: present name: test-ansible-role-1 desc: @@ -26,7 +24,7 @@ register: role1 - name: Update role - ntnx_roles: + nutanix.ncp.ntnx_roles: state: present role_uuid: "{{ role1.role_uuid }}" name: test-ansible-role-1 @@ -36,16 +34,16 @@ register: updated_role1 - name: Read the updated role - ntnx_roles_info: + nutanix.ncp.ntnx_roles_info: role_uuid: "{{ updated_role1.role_uuid }}" register: role1_info - name: Print the role details - debug: + ansible.builtin.debug: msg: "{{role1_info}}" - name: Delete the role. - ntnx_roles: + nutanix.ncp.ntnx_roles: state: absent role_uuid: "{{ updated_role1.role_uuid }}" wait: true diff --git a/examples/static_routes.yml b/examples/static_routes.yml index 846168e3a..c3a54a6ce 100644 --- a/examples/static_routes.yml +++ b/examples/static_routes.yml @@ -2,8 +2,6 @@ - name: Static Routes playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,15 +10,15 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vpc_uuid: "" vpn_uuid: "" external_nat_subnet: name: "" - uuid: "" + uuid: "" - - name: create static routes and default static routes with external nat subnet - ntnx_static_routes: + - name: Create static routes and default static routes with external nat subnet + nutanix.ncp.ntnx_static_routes: vpc_uuid: "{{ vpc_uuid }}" static_routes: - destination: "0.0.0.0/0" @@ -40,7 +38,7 @@ vpn_connection_ref: uuid: "{{ vpn_uuid }}" - - name: remove all routes excluding dynamic and local routes - ntnx_static_routes: + - name: Remove all routes excluding dynamic and local routes + nutanix.ncp.ntnx_static_routes: vpc_uuid: "{{ vpc_uuid }}" remove_all_routes: true diff --git a/examples/subnet.yml b/examples/subnet.yml index c77be095a..68003406b 100644 --- a/examples/subnet.yml +++ b/examples/subnet.yml @@ -1,8 +1,6 @@ - name: Subnet playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: cluster_name: "" cluster_uuid: "" virtual_switch_name: "" @@ -30,71 +28,71 @@ vpc_name: "" vpc_uuid: "" - - name: 'VLAN subnet with IPAM, IP pools and DHCP' - ntnx_subnets: + - name: "VLAN subnet with IPAM, IP pools and DHCP" + nutanix.ncp.ntnx_subnets: state: present name: VLAN subnet with IPAM IP pools and DHCP vlan_subnet: vlan_id: 29 virtual_switch: - name: '{{ virtual_switch_name }}' + name: "{{ virtual_switch_name }}" cluster: - name: '{{ cluster_name }}' + name: "{{ cluster_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" dhcp: - dns_servers: '{{ dns_servers }}' - domain_search: '{{ domain_search }}' - domain_name: '{{ domain_name }}' - tftp_server_name: '{{ tftp_server_name }}' - boot_file: '{{ boot_file }}' - dhcp_server_ip: '{{ dhcp_server_address }}' + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file: "{{ boot_file }}" + dhcp_server_ip: "{{ dhcp_server_address }}" register: result ignore_errors: true - name: External subnet with NAT - ntnx_subnets: + nutanix.ncp.ntnx_subnets: state: present - name: ' External subnet with NAT ' + name: " External subnet with NAT " external_subnet: vlan_id: 30 enable_nat: true cluster: - name: '{{ cluster_name }}' + name: "{{ cluster_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" register: result ignore_errors: true - name: Overlay Subnet with IP_pools and DHCP - ntnx_subnets: + nutanix.ncp.ntnx_subnets: state: present name: Overlay Subnet with IP_pools and DHCP overlay_subnet: vpc: - name: '{{ vpc_name }}' + name: "{{ vpc_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" dhcp: - dns_servers: '{{ dns_servers }}' - domain_search: '{{ domain_search }}' - domain_name: '{{ domain_name }}' - tftp_server_name: '{{ tftp_server_name }}' - boot_file_name: '{{ boot_file }}' + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file_name: "{{ boot_file }}" register: result ignore_errors: true diff --git a/examples/subnet_info.yml b/examples/subnet_info.yml index ce5bb045d..e4cf5e36d 100644 --- a/examples/subnet_info.yml +++ b/examples/subnet_info.yml @@ -2,8 +2,6 @@ - name: Subnet_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -13,25 +11,25 @@ tasks: - name: List subnets using subnet_type filter criteria - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: filter: - subnet_type: "VLAN" + subnet_type: "VLAN" kind: subnet register: result - ignore_errors: True + ignore_errors: true - name: List subnets using length, offset and vlan_id ascending sorting - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: length: 1 offset: 2 sort_order: "ASCENDING" sort_attribute: "vlan_id" check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: List subnets filter and custom_filter - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: filter: name: custom_filter: diff --git a/examples/user-groups.yml b/examples/user-groups.yml index 9dcc96e91..2075ee485 100644 --- a/examples/user-groups.yml +++ b/examples/user-groups.yml @@ -1,9 +1,7 @@ --- -- name: user_group playbook +- name: User_group playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,8 +9,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: distinguished_name: "" principal_name: "" directory_service_uuid: "" @@ -20,31 +18,31 @@ project: uuid: "" - - name: create user group - ntnx_user_groups: - distinguished_name: "{{distinguished_name}}" - project: - uuid: "{{project.uuid}}" - categories: - Environment: - - "Dev" - register: result + - name: Create user group + nutanix.ncp.ntnx_user_groups: + distinguished_name: "{{distinguished_name}}" + project: + uuid: "{{project.uuid}}" + categories: + Environment: + - "Dev" + register: result - - name: delete user group - ntnx_user_groups: - state: absent - user_group_uuid: "{{result.user_group_uuid}}" - register: result + - name: Delete user group + nutanix.ncp.ntnx_user_groups: + state: absent + user_group_uuid: "{{result.user_group_uuid}}" + register: result - - name: create user group with idp - ntnx_user_groups: - idp: - idp_uuid: "{{identity_provider_uuid}}" - group_name: test_group_987 - register: result + - name: Create user group with idp + nutanix.ncp.ntnx_user_groups: + idp: + idp_uuid: "{{identity_provider_uuid}}" + group_name: test_group_987 + register: result - - name: delete user group - ntnx_user_groups: - state: absent - user_group_uuid: "{{result.user_group_uuid}}" - register: result + - name: Delete user group + nutanix.ncp.ntnx_user_groups: + state: absent + user_group_uuid: "{{result.user_group_uuid}}" + register: result diff --git a/examples/user.yml b/examples/user.yml index e70b61b14..1afca3252 100644 --- a/examples/user.yml +++ b/examples/user.yml @@ -1,9 +1,7 @@ --- -- name: users playbook +- name: Users playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,51 +9,51 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: directory_service_uuid: "" principal_name: "" project: uuid: "" identity_provider_uuid: "" - - name: create local user - ntnx_users: - principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" - register: result + - name: Create local user + nutanix.ncp.ntnx_users: + principal_name: "{{principal_name}}" + directory_service_uuid: "{{directory_service_uuid}}" + register: result - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" - - name: create local user with project and categories - ntnx_users: - principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" - project: - uuid: "{{project.uuid}}" - categories: - Environment: - - "Dev" - AppType: - - "Default" - register: result + - name: Create local user with project and categories + nutanix.ncp.ntnx_users: + principal_name: "{{principal_name}}" + directory_service_uuid: "{{directory_service_uuid}}" + project: + uuid: "{{project.uuid}}" + categories: + Environment: + - "Dev" + AppType: + - "Default" + register: result - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" - - name: create idp user - ntnx_users: - identity_provider_uuid: "{{identity_provider_uuid}}" - username: testing_user - register: result - ignore_errors: true + - name: Create idp user + nutanix.ncp.ntnx_users: + identity_provider_uuid: "{{identity_provider_uuid}}" + username: testing_user + register: result + ignore_errors: true - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" diff --git a/examples/vm.yml b/examples/vm.yml index f88ab7064..e6c83b471 100644 --- a/examples/vm.yml +++ b/examples/vm.yml @@ -2,8 +2,6 @@ - name: VM playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,8 +9,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -20,55 +18,56 @@ password: "" fqdn: "" - - name: Create Cloud-init Script file - copy: - dest: "cloud_init.yml" - content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: "{{ fqdn }}" + - name: Create Cloud-init Script file + ansible.builtin.copy: + mode: "0644" + dest: "cloud_init.yml" + content: | + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: "{{ fqdn }}" - - name: create Vm - ntnx_vms: - state: present - name: "ansible_automation_demo" - desc: "ansible_vm_description" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{cluster_name}}" - networks: - - is_connected: True - subnet: - name: "{{ subnet_name }}" - # mention cluster only when there are multiple subnets with same name accross clusters - # and subnet name is set above - cluster: - name: "{{cluster_name}}" - disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ image_name }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: output + - name: Create Vm + nutanix.ncp.ntnx_vms: + state: present + name: "ansible_automation_demo" + desc: "ansible_vm_description" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{cluster_name}}" + networks: + - is_connected: true + subnet: + name: "{{ subnet_name }}" + # mention cluster only when there are multiple subnets with same name accross clusters + # and subnet name is set above + cluster: + name: "{{cluster_name}}" + disks: + - type: "DISK" + size_gb: 30 + bus: "SATA" + clone_image: + name: "{{ image_name }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + register: output - - name: output of vm created - debug: - msg: '{{ output }}' + - name: Output of vm created + ansible.builtin.debug: + msg: "{{ output }}" - - name: delete VM - ntnx_vms: - state: absent - vm_uuid: "{{output.vm_uuid}}" + - name: Delete VM + nutanix.ncp.ntnx_vms: + state: absent + vm_uuid: "{{output.vm_uuid}}" diff --git a/examples/vm_info.yml b/examples/vm_info.yml index 309102064..6ae7dc8e3 100644 --- a/examples/vm_info.yml +++ b/examples/vm_info.yml @@ -2,8 +2,6 @@ - name: VM_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,34 +10,34 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vm_name: "" - name: List vms using name filter criteria - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter: vm_name: "{{ vm_name }}" kind: vm register: result - ignore_errors: True + ignore_errors: true - name: List vms using FIQL filter string - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter_string: "vm_name=={{vm.name}};power_state==off" register: result - ignore_errors: True + ignore_errors: true - name: List vms using length, offset and ascending vm_name sorting - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: length: 10 offset: 1 sort_order: "ASCENDING" sort_attribute: "vm_name" register: result - ignore_errors: True + ignore_errors: true - name: List vms using filter and custom_filter - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter: vm_name: custom_filter: diff --git a/examples/vm_operations.yml b/examples/vm_operations.yml index c6bef4dbc..54b1f24d7 100644 --- a/examples/vm_operations.yml +++ b/examples/vm_operations.yml @@ -3,8 +3,6 @@ - name: VM operations playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,31 +10,31 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: script_path: "" subnet_name: "" vm_uuid: "" - - name: hard power off the vm - ntnx_vms: + - name: Hard power off the vm + nutanix.ncp.ntnx_vms: state: hard_poweroff vm_uuid: "{{ vm_uuid }}" - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: create_ova_image while vm is on - ntnx_vms_ova: + - name: Create_ova_image while vm is on + nutanix.ncp.ntnx_vms_ova: state: present src_vm_uuid: "{{ vm_uuid }}" name: integration_test_VMDK_ova file_format: VMDK wait: true - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: clone vm while it's off also add network and script - ntnx_vms_clone: + - name: Clone vm while it's off also add network and script + nutanix.ncp.ntnx_vms_clone: state: present src_vm_uuid: "{{ vm_uuid }}" networks: @@ -46,6 +44,6 @@ guest_customization: type: "cloud_init" script_path: "{{ script_path }}" - is_overridable: True - register: result - ignore_errors: true + is_overridable: true + register: result + ignore_errors: true diff --git a/examples/vm_update.yml b/examples/vm_update.yml index 45de3d642..33865a528 100644 --- a/examples/vm_update.yml +++ b/examples/vm_update.yml @@ -3,8 +3,6 @@ - name: VM update playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,8 +10,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -26,56 +24,56 @@ remove_disk_uuid: "" subnet_uuid: "" - - name: Update VM - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: updated - desc: updated - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ image_name }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 3 - bus: PCI - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network_dhcp_uuid }}" - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network_static_ip }}" - register: result + - name: Update VM + nutanix.ncp.ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: updated + desc: updated + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ image_name }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 3 + bus: PCI + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network_dhcp_uuid }}" + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network_static_ip }}" + register: result - - name: Update VM by deleting and editing disks and subnets - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: update diks - desc: update disks - disks: - - type: "DISK" - uuid: "{{ disk_uuid }}" - size_gb: 30 - - state: absent - uuid: "{{ remove_disk_uuid }}" - networks: - - state: absent - uuid: "{{ subnet_uuid }}" - register: result + - name: Update VM by deleting and editing disks and subnets + nutanix.ncp.ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: update diks + desc: update disks + disks: + - type: "DISK" + uuid: "{{ disk_uuid }}" + size_gb: 30 + - state: absent + uuid: "{{ remove_disk_uuid }}" + networks: + - state: absent + uuid: "{{ subnet_uuid }}" + register: result - - name: Update VM by deleting it - ntnx_vms: - state: absent - vm_uuid: "{{ vm_uuid }}" - register: result + - name: Update VM by deleting it + nutanix.ncp.ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result diff --git a/examples/vpc.yml b/examples/vpc.yml index 3af08fa53..5bf24dc1e 100644 --- a/examples/vpc.yml +++ b/examples/vpc.yml @@ -2,8 +2,6 @@ - name: VPC playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,21 +10,21 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: external_subnet_name: "" vm_name: "" - name: Create min VPC with subnet name - ntnx_vpcs: + nutanix.ncp.ntnx_vpcs: state: present - wait: True + wait: true name: MinVPC external_subnets: - subnet_name: "{{ external_subnet.name }}" register: result - name: Delete all created vpcs - ntnx_vpcs: + nutanix.ncp.ntnx_vpcs: state: absent vpc_uuid: "{{ result.vpc_uuid }}" register: result diff --git a/examples/vpc_info.yml b/examples/vpc_info.yml index 652d157fd..583a2ba6f 100644 --- a/examples/vpc_info.yml +++ b/examples/vpc_info.yml @@ -2,8 +2,6 @@ - name: VPC_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,24 +10,23 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vpc_name: "" - name: List VPC using name filter criteria - ntnx_vpcs_info: + nutanix.ncp.ntnx_vpcs_info: filter: name: "{{ vpc_name }}" kind: vpc register: result - ignore_errors: True - + ignore_errors: true - name: List VPC using length, offset and descending name sorting - ntnx_vpcs_info: + nutanix.ncp.ntnx_vpcs_info: length: 4 offset: 1 sort_order: "DESCENDING" sort_attribute: "name" check_mode: true register: result - ignore_errors: True + ignore_errors: true From 19ff5eb9e9ea72ca243ca4e0401991c81232177e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sun, 29 Sep 2024 16:53:17 +0200 Subject: [PATCH 05/15] plugins and tests spell checked (#491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Thomas Sjögren --- plugins/doc_fragments/ntnx_operations.py | 4 +-- plugins/doc_fragments/ntnx_vms_base.py | 2 +- plugins/inventory/ntnx_prism_vm_inventory.py | 2 +- plugins/module_utils/entity.py | 10 +++---- .../ndb/database_engines/database_engine.py | 2 +- .../ndb/database_engines/db_engine_factory.py | 2 +- .../module_utils/ndb/database_instances.py | 2 +- plugins/module_utils/ndb/db_server_vm.py | 6 ++-- .../ndb/profiles/profile_types.py | 4 +-- plugins/module_utils/ndb/time_machines.py | 2 +- plugins/module_utils/prism/acps.py | 2 +- plugins/module_utils/prism/images.py | 2 +- plugins/module_utils/prism/permissions.py | 2 +- .../module_utils/prism/projects_internal.py | 2 +- .../module_utils/prism/protection_rules.py | 2 +- plugins/module_utils/prism/static_routes.py | 2 +- plugins/module_utils/prism/subnets.py | 2 +- plugins/modules/ntnx_acps.py | 2 +- plugins/modules/ntnx_categories.py | 2 +- plugins/modules/ntnx_floating_ips_info.py | 4 +-- plugins/modules/ntnx_foundation.py | 26 ++++++++--------- plugins/modules/ntnx_foundation_central.py | 2 +- .../modules/ntnx_foundation_image_upload.py | 2 +- .../ntnx_foundation_node_network_info.py | 2 +- .../modules/ntnx_image_placement_policy.py | 2 +- plugins/modules/ntnx_images.py | 4 +-- plugins/modules/ntnx_karbon_clusters_info.py | 2 +- .../modules/ntnx_karbon_registries_info.py | 2 +- .../ntnx_ndb_database_clone_refresh.py | 2 +- plugins/modules/ntnx_ndb_database_clones.py | 2 +- .../modules/ntnx_ndb_database_snapshots.py | 4 +-- plugins/modules/ntnx_ndb_databases.py | 8 +++--- plugins/modules/ntnx_ndb_db_servers_info.py | 2 +- plugins/modules/ntnx_ndb_maintenance_tasks.py | 2 +- .../modules/ntnx_ndb_maintenance_window.py | 6 ++-- plugins/modules/ntnx_ndb_profiles.py | 8 +++--- plugins/modules/ntnx_ndb_profiles_info.py | 2 +- plugins/modules/ntnx_ndb_register_database.py | 6 ++-- plugins/modules/ntnx_ndb_slas.py | 4 +-- .../modules/ntnx_ndb_time_machines_info.py | 4 +-- plugins/modules/ntnx_ndb_vlans.py | 2 +- plugins/modules/ntnx_pbrs.py | 6 ++-- plugins/modules/ntnx_protection_rules.py | 2 +- plugins/modules/ntnx_recovery_plan_jobs.py | 6 ++-- plugins/modules/ntnx_recovery_plans.py | 10 +++---- plugins/modules/ntnx_security_rules.py | 2 +- plugins/modules/ntnx_service_groups.py | 2 +- plugins/modules/ntnx_static_routes.py | 6 ++-- plugins/modules/ntnx_user_groups.py | 2 +- plugins/modules/ntnx_users.py | 4 +-- plugins/modules/ntnx_vms.py | 18 ++++++------ .../ntnx_address_groups/tasks/update.yml | 2 +- .../ntnx_categories/tasks/all_operations.yml | 2 +- .../ntnx_foundation/tasks/image_nodes.yml | 2 +- .../tasks/negative_scenarios.yml | 6 ++-- .../tasks/get_aos.yml | 2 +- .../tasks/configure_ipmi.yml | 2 +- .../tasks/image_nodes.yml | 2 +- .../tasks/create_key.yml | 6 ++-- .../tasks/key_info.yml | 8 +++--- .../tasks/get_cluster_info.yml | 6 ++-- .../tasks/get_node_info.yml | 6 ++-- .../tasks/discover_nodes.yml | 6 ++-- .../tasks/get_hypervisors.yml | 2 +- .../tasks/negative_scenarios.yml | 2 +- .../tasks/upload.yml | 4 +-- .../tasks/get_info.yml | 2 +- .../tasks/image_nodes.yml | 6 ++-- .../tasks/crud.yml | 4 +-- .../tasks/negative_scenarios.yml | 24 ++++++++-------- .../ntnx_karbon_registries/tasks/create.yml | 2 +- .../tasks/negativ_scenarios.yml | 2 +- .../tasks/tests.yml | 2 +- .../targets/ntnx_ndb_clusters/tasks/CRUD.yml | 15 +++++----- .../tasks/all_actions.yml | 6 ++-- .../tasks/tests.yml | 2 +- .../ntnx_ndb_db_server_vms/tasks/crud.yml | 6 ++-- .../tasks/crud.yml | 2 +- .../tasks/network_profile.yml | 2 +- .../ntnx_ndb_software_profiles/tasks/crud.yml | 2 +- .../targets/ntnx_ova/tasks/create_ova.yml | 6 ++-- .../ntnx_projects/tasks/create_project.yml | 2 +- .../tasks/projects_with_role_mappings.yml | 6 ++-- .../ntnx_projects/tasks/update_project.yml | 2 +- .../tasks/crud.yml | 2 +- .../tasks/get_security_rules.yml | 4 +-- .../ntnx_static_routes/tasks/create.yml | 4 +-- .../targets/ntnx_user_groups/tasks/create.yml | 2 +- .../targets/ntnx_vms_clone/tasks/create.yml | 6 ++-- .../tasks/negative_scenarios.yml | 4 +-- .../targets/nutanix_vms/tasks/create.yml | 12 ++++---- .../targets/nutanix_vms/tasks/delete.yml | 6 ++-- .../nutanix_vms/tasks/negtaive_scenarios.yml | 4 +-- .../nutanix_vms/tasks/negtaive_vm_update.yml | 14 +++++----- .../nutanix_vms/tasks/vm_operations.yml | 28 +++++++++---------- .../targets/nutanix_vms/tasks/vm_update.yml | 16 +++++------ .../nutanix_vms_info/tasks/list_vms.yml | 6 ++-- .../targets/prepare_ndb_env/vars/main.yml | 4 +-- 98 files changed, 236 insertions(+), 237 deletions(-) diff --git a/plugins/doc_fragments/ntnx_operations.py b/plugins/doc_fragments/ntnx_operations.py index 50ecb80a0..0b4af7c28 100644 --- a/plugins/doc_fragments/ntnx_operations.py +++ b/plugins/doc_fragments/ntnx_operations.py @@ -10,13 +10,13 @@ class ModuleDocFragment(object): - # Plugin options for ntnx CRUD opperations + # Plugin options for ntnx CRUD operations DOCUMENTATION = r""" options: state: description: - Specify state - - If C(state) is set to C(present) then the opperation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. diff --git a/plugins/doc_fragments/ntnx_vms_base.py b/plugins/doc_fragments/ntnx_vms_base.py index 635168013..c2586dbdd 100644 --- a/plugins/doc_fragments/ntnx_vms_base.py +++ b/plugins/doc_fragments/ntnx_vms_base.py @@ -72,7 +72,7 @@ class ModuleDocFragment(object): - absent subnet: description: - - Name or UUID of the subnet to which the VM should be connnected + - Name or UUID of the subnet to which the VM should be connected type: dict suboptions: name: diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index dd6d7fe9b..4a085861f 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -93,7 +93,7 @@ def jsonify(self, data): class InventoryModule(BaseInventoryPlugin, Constructable): - """Nutanix VM dynamic invetory module for ansible""" + """Nutanix VM dynamic inventory module for ansible""" NAME = "nutanix.ncp.ntnx_prism_vm_inventory" diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index 953b7828d..63a79c7f9 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -221,7 +221,7 @@ def list( return resp - # "params" can be used to override module.params to create spec by other modules backened + # "params" can be used to override module.params to create spec by other modules backends def get_spec(self, old_spec=None, params=None, **kwargs): spec = copy.deepcopy(old_spec) or self._get_default_spec() @@ -344,7 +344,7 @@ def _build_url_with_query(self, url, query): def _fetch_url( self, url, method, data=None, raise_error=True, no_response=False, timeout=30 ): - # only jsonify if content-type supports, added to avoid incase of form-url-encodeded type data + # only jsonify if content-type supports, added to avoid in case of form-url-encodeded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) @@ -362,10 +362,10 @@ def _fetch_url( body = None - # buffer size with ref. to max read size of http.client.HTTPResponse.read() defination + # buffer size with ref. to max read size of http.client.HTTPResponse.read() definition buffer_size = 65536 - # From ansible-core>=2.13, incase of http error, urllib.HTTPError object is returned in resp + # From ansible-core>=2.13, in case of http error, urllib.HTTPError object is returned in resp # as per the docs of ansible we need to use body in that case. if not resp or status_code >= 400: # get body containing error @@ -500,7 +500,7 @@ def _filter_entities(entities, custom_filters): return filtered_entities -# Read files in chunks and yeild it +# Read files in chunks and yield it class CreateChunks(object): def __init__(self, filename, chunk_size=1 << 13): self.filename = filename diff --git a/plugins/module_utils/ndb/database_engines/database_engine.py b/plugins/module_utils/ndb/database_engines/database_engine.py index aade70764..6ebb16f96 100644 --- a/plugins/module_utils/ndb/database_engines/database_engine.py +++ b/plugins/module_utils/ndb/database_engines/database_engine.py @@ -29,7 +29,7 @@ def build_spec_db_instance_register_action_arguments(self, payload, config): def build_spec_db_server_vm_register_action_arguments(self, payload, config): """ - Implement this method to add database engine specific properties for registeration database server vm + Implement this method to add database engine specific properties for registration database server vm """ return payload, None diff --git a/plugins/module_utils/ndb/database_engines/db_engine_factory.py b/plugins/module_utils/ndb/database_engines/db_engine_factory.py index 1467b42ef..40f69f092 100644 --- a/plugins/module_utils/ndb/database_engines/db_engine_factory.py +++ b/plugins/module_utils/ndb/database_engines/db_engine_factory.py @@ -20,7 +20,7 @@ def get_engine_type(module): if type in module.params: return type, None - return None, "Input doesn't conatains config for allowed engine types of databases" + return None, "Input doesn't contains config for allowed engine types of databases" def create_db_engine(module, engine_type=None, db_architecture=None): diff --git a/plugins/module_utils/ndb/database_instances.py b/plugins/module_utils/ndb/database_instances.py index 77bacde4a..1f5208afd 100644 --- a/plugins/module_utils/ndb/database_instances.py +++ b/plugins/module_utils/ndb/database_instances.py @@ -281,7 +281,7 @@ def get_engine_type(self): return ( None, - "Input doesn't conatains config for allowed engine types of databases", + "Input doesn't contains config for allowed engine types of databases", ) def get_db_engine_spec(self, payload, params=None, **kwargs): diff --git a/plugins/module_utils/ndb/db_server_vm.py b/plugins/module_utils/ndb/db_server_vm.py index dee8548ba..a46ba1f4d 100644 --- a/plugins/module_utils/ndb/db_server_vm.py +++ b/plugins/module_utils/ndb/db_server_vm.py @@ -451,7 +451,7 @@ def get_spec_registered_vm_for_db_instance_registration( if not vm_info.get("ipAddresses", []): return None, "No IP address found for given db server vm" - # picking first IP of db server vm for registraion + # picking first IP of db server vm for registration payload["vmIp"] = vm_info["ipAddresses"][0] elif params.get("ip"): @@ -572,7 +572,7 @@ def build_spec_software_profile(self, payload, profile): return payload, None def build_spec_network_profile(self, payload, profile): - # set network prfile + # set network profile network_profile = NetworkProfile(self.module) uuid, err = network_profile.get_profile_uuid(profile) if err: @@ -631,7 +631,7 @@ def build_spec_vms(self, payload, vms, **kwargs): # noqa: C901 cluster = Cluster(self.module) clusters = cluster.get_all_clusters_name_uuid_map() - # spec with default vlaues + # spec with default values spec = { "properties": [], "vmName": "", diff --git a/plugins/module_utils/ndb/profiles/profile_types.py b/plugins/module_utils/ndb/profiles/profile_types.py index 1aadd8e54..d51c7c6c3 100644 --- a/plugins/module_utils/ndb/profiles/profile_types.py +++ b/plugins/module_utils/ndb/profiles/profile_types.py @@ -233,7 +233,7 @@ def _build_spec_multi_networks(self, payload, vlans): cluster_name = clusters_uuid_name_map[cluster_uuid] if not cluster_name: - return None, "Pleae provide uuid or name for getting cluster info" + return None, "Please provide uuid or name for getting cluster info" properties_map["CLUSTER_NAME_" + str(i)] = cluster_name properties_map["CLUSTER_ID_" + str(i)] = clusters_name_uuid_map[ @@ -485,7 +485,7 @@ def get_profile_type(module): if type in module.params: return type, None - return None, "Input doesn't conatains config for allowed profile types of databases" + return None, "Input doesn't contains config for allowed profile types of databases" def get_profile_type_obj(module, profile_type=None): # -> tuple[Profile, str]: diff --git a/plugins/module_utils/ndb/time_machines.py b/plugins/module_utils/ndb/time_machines.py index 7f78858c7..bc1e566a9 100644 --- a/plugins/module_utils/ndb/time_machines.py +++ b/plugins/module_utils/ndb/time_machines.py @@ -170,7 +170,7 @@ def get_spec(self, old_spec, params=None, **kwargs): if err: return None, err - # set destination clusters incase of HA instance + # set destination clusters in case of HA instance if params.get("clusters"): cluster_uuids = [] diff --git a/plugins/module_utils/prism/acps.py b/plugins/module_utils/prism/acps.py index 5ae0fe327..dd5f6d800 100644 --- a/plugins/module_utils/prism/acps.py +++ b/plugins/module_utils/prism/acps.py @@ -135,7 +135,7 @@ def build_role_permissions_based_context(self, role_uuid): if permission.get("name"): role_permissions_names.append(permission["name"]) - # Get predefined premissions to entity access expressions from constants + # Get predefined permissions to entity access expressions from constants expressions_dict = CONSTANTS.EntityFilterExpressionList.PERMISSION_TO_ACCESS_MAP permission_names = expressions_dict.keys() diff --git a/plugins/module_utils/prism/images.py b/plugins/module_utils/prism/images.py index 43c0bff4a..169ff24e4 100644 --- a/plugins/module_utils/prism/images.py +++ b/plugins/module_utils/prism/images.py @@ -20,7 +20,7 @@ def __init__(self, module, upload_image=False): "Accept": "application/json", } - # add checksum headers if given incase of local upload + # add checksum headers if given in case of local upload checksum = module.params.get("checksum") if checksum and module.params.get("source_path"): additional_headers["X-Nutanix-Checksum-Type"] = checksum[ diff --git a/plugins/module_utils/prism/permissions.py b/plugins/module_utils/prism/permissions.py index 32e8326ce..5a701bc8a 100644 --- a/plugins/module_utils/prism/permissions.py +++ b/plugins/module_utils/prism/permissions.py @@ -28,7 +28,7 @@ def get_uuid(self, value, key="name", raise_error=True, no_response=False): if entity["spec"]["name"] == value: return entity["metadata"]["uuid"] - # Incase there are more entities to check + # In case there are more entities to check while resp["total_matches"] > resp["length"] + resp["offset"]: filter_spec["length"] = self.entities_limitation filter_spec["offset"] = filter_spec["offset"] + self.entities_limitation diff --git a/plugins/module_utils/prism/projects_internal.py b/plugins/module_utils/prism/projects_internal.py index a086e5864..4c7ced086 100644 --- a/plugins/module_utils/prism/projects_internal.py +++ b/plugins/module_utils/prism/projects_internal.py @@ -365,7 +365,7 @@ def _build_spec_role_mappings(self, payload, role_mappings): _acp = ACP(self.module) # First check existing acps of project w.r.t to role mapping, if UPDATE/DELETE of acp is required - # Incase its a UPDATE acp for role we pop the entry from role_user_groups_map, + # In case its a UPDATE acp for role we pop the entry from role_user_groups_map, # so that we are left with roles for which new acps are to be created. for acp in payload["spec"]["access_control_policy_list"]: diff --git a/plugins/module_utils/prism/protection_rules.py b/plugins/module_utils/prism/protection_rules.py index ed4bd751a..5808eaae3 100644 --- a/plugins/module_utils/prism/protection_rules.py +++ b/plugins/module_utils/prism/protection_rules.py @@ -110,7 +110,7 @@ def _build_spec_schedules(self, payload, schedules): ): return ( None, - "rpo, rpo_unit, snapshot_type and atleast one policy are required fields for aysynchronous snapshot schedule", + "rpo, rpo_unit, snapshot_type and at least one policy are required fields for aysynchronous snapshot schedule", ) spec["recovery_point_objective_secs"], err = convert_to_secs( diff --git a/plugins/module_utils/prism/static_routes.py b/plugins/module_utils/prism/static_routes.py index 5627e943b..86304b963 100644 --- a/plugins/module_utils/prism/static_routes.py +++ b/plugins/module_utils/prism/static_routes.py @@ -51,7 +51,7 @@ def _build_default_route_spec(self, payload, next_hop): return payload, None def _build_spec_static_routes(self, payload, inp_static_routes): - # since static route list has to be overriden + # since static route list has to be overridden if payload["spec"]["resources"].get("default_route_nexthop"): payload["spec"]["resources"].pop("default_route_nexthop") static_routes_list = [] diff --git a/plugins/module_utils/prism/subnets.py b/plugins/module_utils/prism/subnets.py index 1eee1e030..d7eac4632 100644 --- a/plugins/module_utils/prism/subnets.py +++ b/plugins/module_utils/prism/subnets.py @@ -159,7 +159,7 @@ def get_subnet_uuid(config, module): name = config.get("name") or config.get("subnet_name") uuid = "" - # incase subnet of particular cluster is needed + # in case subnet of particular cluster is needed if config.get("cluster_uuid"): filter_spec = {"filter": "{0}=={1}".format("name", name)} resp = subnet.list(data=filter_spec) diff --git a/plugins/modules/ntnx_acps.py b/plugins/modules/ntnx_acps.py index afd57b89d..6ca411489 100644 --- a/plugins/modules/ntnx_acps.py +++ b/plugins/modules/ntnx_acps.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_acps -short_description: acp module which suports acp Create, update and delete operations +short_description: acp module which supports acp Create, update and delete operations version_added: 1.4.0 description: 'Create, Update, Delete acp' options: diff --git a/plugins/modules/ntnx_categories.py b/plugins/modules/ntnx_categories.py index a2855894e..9b01609d2 100644 --- a/plugins/modules/ntnx_categories.py +++ b/plugins/modules/ntnx_categories.py @@ -16,7 +16,7 @@ options: remove_values: description: - - it indicates to remove all values of the specfied category + - it indicates to remove all values of the specified category - This attribute can be only used with C(state) is absent - This attribute is mutually exclusive with C(values) when state is absent type: bool diff --git a/plugins/modules/ntnx_floating_ips_info.py b/plugins/modules/ntnx_floating_ips_info.py index bb0389e5f..1ac7b0819 100644 --- a/plugins/modules/ntnx_floating_ips_info.py +++ b/plugins/modules/ntnx_floating_ips_info.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_floating_ips_info -short_description: Floting ips info module +short_description: Floating ips info module version_added: 1.0.0 description: 'Get floating_ip info' options: @@ -21,7 +21,7 @@ default: floating_ip fip_uuid: description: - - Floting ip UUID + - Floating ip UUID type: str extends_documentation_fragment: - nutanix.ncp.ntnx_credentials diff --git a/plugins/modules/ntnx_foundation.py b/plugins/modules/ntnx_foundation.py index f5b7a3d15..8c373fcb9 100644 --- a/plugins/modules/ntnx_foundation.py +++ b/plugins/modules/ntnx_foundation.py @@ -149,13 +149,13 @@ ipmi_password: description: - ipmi password, override default_ipmi_password - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_user: description: - ipmi user, override default_ipmi_user - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_netmask: @@ -180,7 +180,7 @@ required: false ipv6_address: description: - - ipv6 address, required incase of using cvm for imaging + - ipv6 address, required in case of using cvm for imaging type: str required: false device_hint: @@ -197,7 +197,7 @@ required: false current_network_interface: description: - - current network interface, required incase of using cvm for imaging + - current network interface, required in case of using cvm for imaging type: str required: false rdma_passthrough: @@ -295,7 +295,7 @@ required: false other_config: description: - - Auxillary lacp configurations. Applicable only for AHV + - Auxiliary lacp configurations. Applicable only for AHV type: list elements: str required: false @@ -327,7 +327,7 @@ required: false discovery_mode: description: - - discover and use existing network informatio pulled from internal info apis + - discover and use existing network information pulled from internal info apis - mutually exclusive with manual_mode - can override certain fields, which are pulled during discovery type: dict @@ -427,12 +427,12 @@ required: false ipv6_address: description: - - ipv6 address, required incase of using cvm for imaging + - ipv6 address, required in case of using cvm for imaging type: str required: false current_network_interface: description: - - current network interface, required incase of using cvm for imaging + - current network interface, required in case of using cvm for imaging type: str required: false cluster_id: @@ -443,13 +443,13 @@ ipmi_password: description: - ipmi password, override default_ipmi_password - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_user: description: - ipmi user, override default_ipmi_user - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false device_hint: @@ -549,7 +549,7 @@ required: false other_config: description: - - Auxillary lacp configurations. Applicable only for AHV + - Auxiliary lacp configurations. Applicable only for AHV type: list elements: str required: false @@ -792,12 +792,12 @@ required: false default_ipmi_user: description: - - default ipmi username, required either at node leve or here incase of ipmi based imaging + - default ipmi username, required either at node level or here in case of ipmi based imaging type: str required: false default_ipmi_password: description: - - default ipmi password, required either at node leve or here incase of ipmi based imaging + - default ipmi password, required either at node level or here in case of ipmi based imaging type: str required: false skip_hypervisor: diff --git a/plugins/modules/ntnx_foundation_central.py b/plugins/modules/ntnx_foundation_central.py index 7c8acdb76..7e30db41c 100644 --- a/plugins/modules/ntnx_foundation_central.py +++ b/plugins/modules/ntnx_foundation_central.py @@ -363,7 +363,7 @@ """ RETURN = r""" -respone: +response: description: Sample response when only Imaging is done. returned: always type: dict diff --git a/plugins/modules/ntnx_foundation_image_upload.py b/plugins/modules/ntnx_foundation_image_upload.py index 719f66eaa..bd77547ed 100644 --- a/plugins/modules/ntnx_foundation_image_upload.py +++ b/plugins/modules/ntnx_foundation_image_upload.py @@ -17,7 +17,7 @@ source: description: - local full path of installer file where the ansible playbook runs - - mandatory incase of upload i.e. state=present + - mandatory in case of upload i.e. state=present type: str required: false filename: diff --git a/plugins/modules/ntnx_foundation_node_network_info.py b/plugins/modules/ntnx_foundation_node_network_info.py index 35f48856e..2feef029a 100644 --- a/plugins/modules/ntnx_foundation_node_network_info.py +++ b/plugins/modules/ntnx_foundation_node_network_info.py @@ -85,7 +85,7 @@ def get_node_network_details(module, result): timeout = module.params.get("timeout") resp = node_network_details.retrieve(nodes, timeout) if not resp: - result["error"] = "Faied to retrieve node network details" + result["error"] = "Failed to retrieve node network details" module.fail_json( msg="Failed to retrieve node network details via foundation", **result ) diff --git a/plugins/modules/ntnx_image_placement_policy.py b/plugins/modules/ntnx_image_placement_policy.py index 471d3c2e4..2d7da6e3e 100644 --- a/plugins/modules/ntnx_image_placement_policy.py +++ b/plugins/modules/ntnx_image_placement_policy.py @@ -84,7 +84,7 @@ description: - When set will remove all categories attached to the policy. - Mutually exclusive ith C(categories) - - It doesnot remove C(image_categories) or C(cluster_categories) + - It does not remove C(image_categories) or C(cluster_categories) required: false type: bool default: false diff --git a/plugins/modules/ntnx_images.py b/plugins/modules/ntnx_images.py index de98f2f66..0d3d64b50 100644 --- a/plugins/modules/ntnx_images.py +++ b/plugins/modules/ntnx_images.py @@ -67,7 +67,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to image + - set this flag to remove detach all categories attached to image - mutually_exclusive with C(categories) type: bool required: false @@ -196,7 +196,7 @@ - Backup wait: true - - name: dettach all categories from existing image + - name: detach all categories from existing image ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" diff --git a/plugins/modules/ntnx_karbon_clusters_info.py b/plugins/modules/ntnx_karbon_clusters_info.py index bd4a8b51b..c5c9f14dd 100644 --- a/plugins/modules/ntnx_karbon_clusters_info.py +++ b/plugins/modules/ntnx_karbon_clusters_info.py @@ -139,7 +139,7 @@ returned: if fetch_kubeconfig is true type: str certificate: - description: ssh certifcate + description: ssh certificate returned: if fetch_ssh_credentials is true type: str expiry_time: diff --git a/plugins/modules/ntnx_karbon_registries_info.py b/plugins/modules/ntnx_karbon_registries_info.py index 6f11e446d..ae280f41f 100644 --- a/plugins/modules/ntnx_karbon_registries_info.py +++ b/plugins/modules/ntnx_karbon_registries_info.py @@ -94,7 +94,7 @@ def get_registries(module, result): # If there is no registries, # response will be empty list causing error in entity class - # so do status code checks here incase of other failures. + # so do status code checks here in case of other failures. # During failures response is of type dict else its list resp = registry.read(raise_error=False) if isinstance(resp, dict) and resp.get("code") >= 300: diff --git a/plugins/modules/ntnx_ndb_database_clone_refresh.py b/plugins/modules/ntnx_ndb_database_clone_refresh.py index e1e488b08..d7bbf67a7 100644 --- a/plugins/modules/ntnx_ndb_database_clone_refresh.py +++ b/plugins/modules/ntnx_ndb_database_clone_refresh.py @@ -11,7 +11,7 @@ module: ntnx_ndb_database_clone_refresh short_description: module for database clone refresh. version_added: 1.8.0 -description: moudle for refreshing database clone to certain point in time or snapshot. +description: module for refreshing database clone to certain point in time or snapshot. options: uuid: description: diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py index 181894119..c7277b46e 100644 --- a/plugins/modules/ntnx_ndb_database_clones.py +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -127,7 +127,7 @@ use_authorized_server: description: - - conifgure authorized database server VM for hosting database clone + - configure authorized database server VM for hosting database clone type: dict suboptions: name: diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py index 751ae1f8e..210e64a8e 100644 --- a/plugins/modules/ntnx_ndb_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -25,13 +25,13 @@ type: str name: description: - - name of snaphsot. + - name of snapshot. - required for create - update is allowed type: str clusters: description: - - list of clusters incase snapshots needs to be replicated to secondary clusters + - list of clusters in case snapshots needs to be replicated to secondary clusters - if secondary clusters of time machines are mentioned, then this module won't track the replication process - clusters changes are not considered during update, for replication use ntnx_ndb_replicate_database_snapshots type: list diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index 1c8560164..733f1b6a9 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -331,7 +331,7 @@ - allowed for HA instance type: description: - - if its a HA or singe instance + - if its a HA or single instance - mandatory for creation type: str choices: ["single", "ha"] @@ -386,7 +386,7 @@ cluster: description: - cluster where they will be hosted - - this will overide default cluster provided for all vms + - this will override default cluster provided for all vms type: dict suboptions: name: @@ -402,7 +402,7 @@ network_profile: description: - network profile details - - this will overide default network profile provided for all vms + - this will override default network profile provided for all vms type: dict suboptions: name: @@ -418,7 +418,7 @@ compute_profile: description: - compute profile details for the node - - this will overide default compute profile provided for all vms + - this will override default compute profile provided for all vms type: dict suboptions: name: diff --git a/plugins/modules/ntnx_ndb_db_servers_info.py b/plugins/modules/ntnx_ndb_db_servers_info.py index fd18f8f20..91d8057d9 100644 --- a/plugins/modules/ntnx_ndb_db_servers_info.py +++ b/plugins/modules/ntnx_ndb_db_servers_info.py @@ -59,7 +59,7 @@ type: bool value: description: - - vlaue as per C(value_type) + - value as per C(value_type) type: str value_type: description: diff --git a/plugins/modules/ntnx_ndb_maintenance_tasks.py b/plugins/modules/ntnx_ndb_maintenance_tasks.py index 1b6411f0a..8e9c3f111 100644 --- a/plugins/modules/ntnx_ndb_maintenance_tasks.py +++ b/plugins/modules/ntnx_ndb_maintenance_tasks.py @@ -101,7 +101,7 @@ tasks: [] register: result -- name: Add maitenance window task for vm +- name: Add maintenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - name: "{{vm1_name_updated}}" diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py index b8981627c..f8299636b 100644 --- a/plugins/modules/ntnx_ndb_maintenance_window.py +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -42,7 +42,7 @@ type: int start_time: description: - - start time of maintenance in formate 'hh:mm:ss' + - start time of maintenance in format 'hh:mm:ss' type: str timezone: description: @@ -51,11 +51,11 @@ type: str week_of_month: description: - - week of month for maitenance + - week of month for maintenance type: str day_of_week: description: - - day of week for maitenance + - day of week for maintenance type: str extends_documentation_fragment: diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index a617e0433..d0a3de9e5 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -286,7 +286,7 @@ checkpoint_completion_target: description: - checkpoint completion target - - deafult is 0.5 + - default is 0.5 type: float autovacuum_freeze_max_age: description: @@ -311,7 +311,7 @@ autovacuum_max_workers: description: - autovacuum max workers - - deafult is 3 + - default is 3 type: int autovacuum_vacuum_cost_delay: description: @@ -929,7 +929,7 @@ def check_profile_idempotency(old_spec, new_spec): if len(new_clusters) != len(old_clusters): return False - # update if availibility of cluster is required + # update if availability of cluster is required for cluster in new_clusters: if cluster not in old_clusters: return False @@ -1081,7 +1081,7 @@ def create_profile(module, result): result["response"] = resp uuid = resp.get("id") - # incase there is process of replication triggered, operation info is recieved + # in case there is process of replication triggered, operation info is received if profile_type == "software" and not uuid: uuid = resp.get("entityId") diff --git a/plugins/modules/ntnx_ndb_profiles_info.py b/plugins/modules/ntnx_ndb_profiles_info.py index f3a354dc3..ffed1b3b8 100644 --- a/plugins/modules/ntnx_ndb_profiles_info.py +++ b/plugins/modules/ntnx_ndb_profiles_info.py @@ -24,7 +24,7 @@ type: str version_id: description: - - vrsion uuid + - version uuid type: str latest_version: description: diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py index 503e23bfb..0c8b963c9 100644 --- a/plugins/modules/ntnx_ndb_register_database.py +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -192,7 +192,7 @@ default: "5432" db_name: description: - - intial database that would be added + - initial database that would be added type: str required: true db_password: @@ -280,7 +280,7 @@ """ EXAMPLES = r""" -- name: regsiter database from registered vm +- name: register database from registered vm ntnx_ndb_register_database: wait: true @@ -314,7 +314,7 @@ register: result -- name: register database from unregistred vm +- name: register database from unregistered vm ntnx_ndb_register_database: wait: true name: "{{db1_name}}" diff --git a/plugins/modules/ntnx_ndb_slas.py b/plugins/modules/ntnx_ndb_slas.py index d858bcc6b..72efbc9bb 100644 --- a/plugins/modules/ntnx_ndb_slas.py +++ b/plugins/modules/ntnx_ndb_slas.py @@ -10,9 +10,9 @@ DOCUMENTATION = r""" --- module: ntnx_ndb_slas -short_description: moudle for creating, updating and deleting slas +short_description: module for creating, updating and deleting slas version_added: 1.8.0 -description: moudle for creating, updating and deleting slas +description: module for creating, updating and deleting slas options: name: description: diff --git a/plugins/modules/ntnx_ndb_time_machines_info.py b/plugins/modules/ntnx_ndb_time_machines_info.py index 8fce6fdae..952d1c61a 100644 --- a/plugins/modules/ntnx_ndb_time_machines_info.py +++ b/plugins/modules/ntnx_ndb_time_machines_info.py @@ -49,7 +49,7 @@ type: bool value: description: - - value correponding to C(value_type) + - value corresponding to C(value_type) type: str value_type: description: @@ -91,7 +91,7 @@ nutanix_username: "" nutanix_password: "" validate_certs: false - uuid: "" + uuid: "" register: result """ RETURN = r""" diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py index c77f65d43..709844912 100644 --- a/plugins/modules/ntnx_ndb_vlans.py +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -28,7 +28,7 @@ type: str vlan_type: description: - - wheather the vlan is mannaged or no + - whether the vlan is managed or not - update allowed type: str choices: ["DHCP", "Static"] diff --git a/plugins/modules/ntnx_pbrs.py b/plugins/modules/ntnx_pbrs.py index 497d0ac20..c4e8feeb4 100644 --- a/plugins/modules/ntnx_pbrs.py +++ b/plugins/modules/ntnx_pbrs.py @@ -51,7 +51,7 @@ type: bool network: description: - - Traffic from specfic network address + - Traffic from specific network address - Mutually exclusive with C(any) and C(external) type: dict suboptions: @@ -77,7 +77,7 @@ type: bool network: description: - - Traffic to specfic network address + - Traffic to specific network address - Mutually exclusive with C(any) and C(external) type: dict suboptions: @@ -93,7 +93,7 @@ suboptions: any: description: - - Any protcol number + - Any protocol number - Mutually exclusive with C(tcp) and C(udp) and C(number) and C(icmp) type: bool tcp: diff --git a/plugins/modules/ntnx_protection_rules.py b/plugins/modules/ntnx_protection_rules.py index 01c8b9aff..2f36ae8c6 100644 --- a/plugins/modules/ntnx_protection_rules.py +++ b/plugins/modules/ntnx_protection_rules.py @@ -655,7 +655,7 @@ def check_rule_idempotency(rule_spec, update_spec): ].get("category_filter"): return False - # check if availibility zones have updated + # check if availability zones have updated if len(rule_spec["spec"]["resources"]["ordered_availability_zone_list"]) != len( update_spec["spec"]["resources"]["ordered_availability_zone_list"] ): diff --git a/plugins/modules/ntnx_recovery_plan_jobs.py b/plugins/modules/ntnx_recovery_plan_jobs.py index a1acfb586..a115d39b5 100644 --- a/plugins/modules/ntnx_recovery_plan_jobs.py +++ b/plugins/modules/ntnx_recovery_plan_jobs.py @@ -76,11 +76,11 @@ Type of action performed by the Recovery Plan Job. VALIDATE - Performs the validation of the Recovery Plan. The validation includes checks for the presence of entities, networks, categories etc. referenced in the Recovery - Plan. MIGRATE - VM would be powered off on the sourece before migrating it + Plan. MIGRATE - VM would be powered off on the source before migrating it to the recovery Availability Zone. FAILOVER - Restore the entity from the recovery points on the recovery Availability Zone. TEST_FAILOVER - Same as FAILOVER but on a test network. LIVE_MIGRATE - Migrate without powering - off the VM. CLEANUP - for cleaning entities created usnig test failover + off the VM. CLEANUP - for cleaning entities created using test failover type: str required: true choices: @@ -463,7 +463,7 @@ def get_module_spec(): def get_recovery_plan_job_uuid(module, task_uuid): """ This function extracts recovery plan job uuid from task status. - It polls for 10 mins untill the recovery plan job uuid comes up in task response. + It polls for 10 mins until the recovery plan job uuid comes up in task response. """ task = Task(module) timeout = time.time() + 600 diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index fc39156f5..ed9e1afaa 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -141,7 +141,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -189,7 +189,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -246,7 +246,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -294,7 +294,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -1045,7 +1045,7 @@ def check_recovery_plan_idempotency(old_spec, update_spec): if config not in old_ip_assignments: return False - # comparing availibility zones + # comparing availability zones if ( old_spec["spec"]["resources"]["parameters"]["availability_zone_list"] != update_spec["spec"]["resources"]["parameters"]["availability_zone_list"] diff --git a/plugins/modules/ntnx_security_rules.py b/plugins/modules/ntnx_security_rules.py index 6787af1e9..84f78faeb 100644 --- a/plugins/modules/ntnx_security_rules.py +++ b/plugins/modules/ntnx_security_rules.py @@ -9,7 +9,7 @@ DOCUMENTATION = r""" module: ntnx_security_rules -short_description: security_rule module which suports security_rule CRUD operations +short_description: security_rule module which supports security_rule CRUD operations version_added: 1.3.0 description: 'Create, Update, Delete security_rule' options: diff --git a/plugins/modules/ntnx_service_groups.py b/plugins/modules/ntnx_service_groups.py index 8882494dd..855288373 100644 --- a/plugins/modules/ntnx_service_groups.py +++ b/plugins/modules/ntnx_service_groups.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_service_groups -short_description: service_groups module which suports service_groups CRUD operations +short_description: service_groups module which supports service_groups CRUD operations version_added: 1.4.0 description: 'Create, Update, Delete service_group' options: diff --git a/plugins/modules/ntnx_static_routes.py b/plugins/modules/ntnx_static_routes.py index edc2e0504..9152a933a 100644 --- a/plugins/modules/ntnx_static_routes.py +++ b/plugins/modules/ntnx_static_routes.py @@ -32,9 +32,9 @@ default: false static_routes: description: - - list of static routes to be overriden in vpc. + - list of static routes to be overridden in vpc. - mutually exclusive with C(remove_all_routes) - - required incase remove_all_categories is not given + - required in case remove_all_categories is not given - default static route can be mentioned in this with destination - 0.0.0.0/0 - Only one default static route is allowed required: false @@ -44,7 +44,7 @@ destination: description: - destination prefix eg. 10.2.3.0/24 - - for defaut static route give 0.0.0.0/0 + - for default static route give 0.0.0.0/0 required: true type: str next_hop: diff --git a/plugins/modules/ntnx_user_groups.py b/plugins/modules/ntnx_user_groups.py index 641f4a2dd..f549eca37 100644 --- a/plugins/modules/ntnx_user_groups.py +++ b/plugins/modules/ntnx_user_groups.py @@ -46,7 +46,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to user_group + - set this flag to remove detach all categories attached to user_group - mutually_exclusive with C(categories) type: bool required: false diff --git a/plugins/modules/ntnx_users.py b/plugins/modules/ntnx_users.py index 90708d627..79c7a3b57 100644 --- a/plugins/modules/ntnx_users.py +++ b/plugins/modules/ntnx_users.py @@ -46,7 +46,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to user + - set this flag to remove detach all categories attached to user - mutually_exclusive with C(categories) type: bool required: false @@ -62,7 +62,7 @@ description: The UserPrincipalName of the user from the directory service. project: type: dict - description: project that belogs to + description: project that belongs to suboptions: name: type: str diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 65c7daf88..0cb748498 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -17,14 +17,14 @@ state: description: - Specify state - - If C(state) is set to C(present) then the opperation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. - - If C(state) is set to C(power_on) then the opperation will be power on the VM - - If C(state) is set to C(power_off) then the opperation will be power off the VM - - If C(state) is set to C(soft_shutdown) then the opperation will be soft shutdown the VM - - If C(state) is set to C(hard_poweroff) then the opperation will be hard poweroff the VM + - If C(state) is set to C(power_on) then the operation will power on the VM + - If C(state) is set to C(power_off) then the operation will power off the VM + - If C(state) is set to C(soft_shutdown) then the operation will shutdown the VM + - If C(state) is set to C(hard_poweroff) then the operation will hard poweroff the VM choices: - present - absent @@ -100,7 +100,7 @@ suboptions: name: description: - - Storage containter Name + - Storage container Name - Mutually exclusive with C(uuid) type: str uuid: @@ -199,7 +199,7 @@ empty_cdrom: True cores_per_vcpu: 1 - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type + - name: VM with different disk types and different sizes with UEFI boot type ntnx_vms: state: present name: VM with UEFI boot type @@ -441,14 +441,14 @@ state: soft_shutdown vm_uuid: "{{ vm.vm_uuid }}" - - name: Create VM with minimum requiremnts with hard_poweroff opperation + - name: Create VM with minimum requirements with hard_poweroff operation ntnx_vms: state: hard_poweroff name: integration_test_opperations_vm cluster: name: "{{ cluster.name }}" - - name: Create VM with minimum requiremnts with poweroff opperation + - name: Create VM with minimum requirements with poweroff operation ntnx_vms: state: power_off name: integration_test_opperations_vm diff --git a/tests/integration/targets/ntnx_address_groups/tasks/update.yml b/tests/integration/targets/ntnx_address_groups/tasks/update.yml index f4ebedc50..6107bc286 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/update.yml @@ -34,7 +34,7 @@ that: - test_ag.response is defined - test_ag.changed == True - fail_msg: "Unable to create adress group" + fail_msg: "Unable to create address group" success_msg: "Address group created susccessfully" diff --git a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml index 679532a42..3d546343c 100644 --- a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml +++ b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml @@ -131,7 +131,7 @@ fail_msg: "Fail: unable to update existing category by deleting all values " success_msg: "Passed: update existing category by deleting all values finished successfully" ################# -- name: Delte the category +- name: Delete the category ntnx_categories: state: "absent" name: "{{first_category.name}}" diff --git a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml index b27e210df..65560e6cc 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml @@ -62,7 +62,7 @@ - first_cluster.response.cluster_urls is defined - first_cluster.response.cluster_urls.0.name=="test-cluster" fail_msg: " Fail : unable to create cluster with three node" - success_msg: "Succes: cluster with three node created successfully " + success_msg: "Success: cluster with three node created successfully " # when: false # make it true or remove to unskip task ###################################################### diff --git a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml index 86472bf65..08555d845 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml @@ -44,7 +44,7 @@ - result.response.blocks.0.nodes.0.node_position=="{{IBIS_node.node1.node_position}}" - result.response.clusters.0.cluster_name=="test-cluster" fail_msg: " Fail : check_mode fail" - success_msg: "Succes: returned response as expected" + success_msg: "Success: returned response as expected" ################################### - debug: msg: start negative_scenarios for ntnx_foundation @@ -82,7 +82,7 @@ - result.changed==false - result.failed==true fail_msg: " Fail : image node with wrong serial done successfully " - success_msg: "Succes: unable to image node with wrong serial " + success_msg: "Success: unable to image node with wrong serial " ################################### - name: Image nodes with wrong hypervisor ntnx_foundation: @@ -116,4 +116,4 @@ - result.failed==true - "result.msg=='value of hypervisor must be one of: kvm, hyperv, xen, esx, ahv, got: phoenix found in blocks -> nodes -> discovery_mode -> discovery_override'" fail_msg: " Fail : Image nodes with wrong hypervisor done successfully " - success_msg: "Succes: unable to image node with wrong hypervisor" + success_msg: "Success: unable to image node with wrong hypervisor" diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml index d2cca917b..f9600af78 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml @@ -14,4 +14,4 @@ - result.failed==false - result.changed==false fail_msg: " Fail : unable to get aos_packages " - success_msg: "Succes: got aos_packages successfully " + success_msg: "Success: got aos_packages successfully " diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml index 9988683a3..19d5e37c6 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml @@ -24,4 +24,4 @@ - result.response.blocks.0.nodes.0.ipmi_configure_successful==true - result.response.blocks.0.nodes.0.ipmi_message is defined fail_msg: "bmc ipmi configure was failed with error result.error" - success_msg: "bmc ipmi configure was successfull" + success_msg: "bmc ipmi configure was successful" diff --git a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml index 113c4d3a3..461982fcd 100644 --- a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml @@ -51,5 +51,5 @@ - result.failed==false - result.changed==true fail_msg: "fail: Unable to image nodes or create cluster " - success_msg: "succes: Imaging and cluster created successfully " + success_msg: "success: Imaging and cluster created successfully " # when: false # make it true or remove to unskip task diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml index 92943493e..08325b1b4 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml @@ -16,7 +16,7 @@ - result.changed==false - result.response.alias=="test" fail_msg: "fail: Unable to create api key with check_mode: " - success_msg: "succes: api key with check_mode: " + success_msg: "success: api key with check_mode: " - name: Generate random alias for api key set_fact: @@ -37,7 +37,7 @@ - result.changed==true - result.response.key_uuid is defined fail_msg: "fail: Unable to create api key " - success_msg: "succes: api key created successfully " + success_msg: "success: api key created successfully " - ntnx_foundation_central_api_keys: alias: "{{random_alias.0}}" @@ -52,4 +52,4 @@ - result.status_code==400 - result.error is defined fail_msg: "fail: created duplicate api key with same alias " - success_msg: "succes: returned error as expected " + success_msg: "success: returned error as expected " diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml index c26afa9f1..7e369b81b 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml @@ -21,7 +21,7 @@ - key.response.key_uuid is defined - key.response.api_key is defined fail_msg: "fail: Unable to create api key " - success_msg: "succes: api key created successfully " + success_msg: "success: api key created successfully " - name: get api key with key_uuid ntnx_foundation_central_api_keys_info: @@ -35,7 +35,7 @@ - result.response is defined - result.response.alias=="{{random_alias.0}}" fail_msg: "fail: Unable to get api key with key_uuid" - success_msg: "succes: get api key with key_uuid " + success_msg: "success: get api key with key_uuid " - name: get api key with alias ntnx_foundation_central_api_keys_info: @@ -49,7 +49,7 @@ - result.response is defined - result.response.0.key_uuid== key.response.key_uuid fail_msg: "fail: Unable to get api key with alias name" - success_msg: "succes: get api key with alias name " + success_msg: "success: get api key with alias name " - name: get api key with custom filter ntnx_foundation_central_api_keys_info: @@ -68,4 +68,4 @@ - result.response.api_keys.0 is defined - result.response.api_keys.0.api_key == key.response.api_key fail_msg: "fail: unable to get api key with custom filter " - success_msg: "succes: get api key with custom filter successfully " + success_msg: "success: get api key with custom filter successfully " diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml index cb248215e..294de071d 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml @@ -16,7 +16,7 @@ - clusters.failed==false - clusters.response is defined fail_msg: "fail: unable to get all imaged,archived cluster " - success_msg: "succes: get all imaged,archived cluster sucessfuly " + success_msg: "success: get all imaged,archived cluster successfully " - name: get imaged cluster using image_cluster_uuid @@ -33,7 +33,7 @@ - result.response is defined - result.response.imaged_cluster_uuid == "{{clusters.response.imaged_clusters.0.imaged_cluster_uuid}}" fail_msg: "fail: unable to get imaged cluster using image_cluster_uuid " - success_msg: "succes: get imaged cluster using image_cluster_uuid sucessfuly " + success_msg: "success: get imaged cluster using image_cluster_uuid successfully " - name: get imaged cluster using custom filter ntnx_foundation_central_imaged_clusters_info: @@ -49,7 +49,7 @@ - result.failed==false - result.response.imaged_clusters is defined fail_msg: "fail: unable to get imaged cluster using custom filter " - success_msg: "succes: get imaged cluster using custom filter sucessfully" + success_msg: "success: get imaged cluster using custom filter successfully" # still offset and length diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml index 7fc30a286..4e30b3294 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml @@ -15,7 +15,7 @@ - nodes.response.imaged_nodes is defined - nodes.response.metadata.length > 0 fail_msg: "fail: unable to get all imaged nodes " - success_msg: "succes: get all imaged nodes sucessfully " + success_msg: "success: get all imaged nodes successfully " - name: get node by uuid ntnx_foundation_central_imaged_nodes_info: @@ -31,7 +31,7 @@ - result.response is defined - result.response.node_serial == nodes.response.imaged_nodes.0.node_serial fail_msg: "fail: unable to get node by uuid" - success_msg: "succes: get node by uuid successfully " + success_msg: "success: get node by uuid successfully " - name: get imaged node using custom filter ntnx_foundation_central_imaged_nodes_info: @@ -48,6 +48,6 @@ - result.response.imaged_nodes.0.imaged_node_uuid == nodes.response.imaged_nodes.0.imaged_node_uuid - result.response.metadata.length <=1 fail_msg: "fail: unable to get imaged node using custom filter " - success_msg: "succes: get imaged node using custom filter sucessfully" + success_msg: "success: get imaged node using custom filter successfully" # still offset and length and filter diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml index 973b93d41..aa1ffd92e 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml @@ -15,7 +15,7 @@ - result.blocks.0.nodes.0.configured==false - result.blocks.0.nodes.0.ipv6_address is defined fail_msg: " Fail : unable to Discover nodes " - success_msg: "Succes: Discover nodes finished successfully " + success_msg: "Success: Discover nodes finished successfully " - name: Discover all nodes ntnx_foundation_discover_nodes_info: @@ -30,7 +30,7 @@ - result.changed==false - result.blocks.0.nodes.0.ipv6_address is defined fail_msg: " Fail : unable to discover all nodes " - success_msg: "Succes: Discover all nodes finished successfully " + success_msg: "Success: Discover all nodes finished successfully " # - name: Discover nodes and include network info # api fail # ntnx_foundation_discover_nodes_info: @@ -44,4 +44,4 @@ # - result.failed==false # - result.changed==false # fail_msg: " Fail : unable to discover nodes and include network info " -# success_msg: "Succes: Discover nodes and include network info finished successfully " +# success_msg: "Success: Discover nodes and include network info finished successfully " diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml index 500b09e7c..18a3c5587 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml @@ -21,4 +21,4 @@ - result.failed==false - result.changed==false fail_msg: " Fail : unable to get hypervisor_images_info " - success_msg: "Succes: got hypervisor_images_info successfully " + success_msg: "Success: got hypervisor_images_info successfully " diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml index 6794b80fd..d3957ac6b 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml @@ -15,4 +15,4 @@ - result.changed==false - "result.msg == 'value of installer_type must be one of: kvm, esx, hyperv, xen, nos, got: wrong installler type'" fail_msg: " Fail : image uploaded with wrong installer type" - success_msg: "Succes: returned error as expected " + success_msg: "Success: returned error as expected " diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml index 3cc90a7f5..00555ee16 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml @@ -24,7 +24,7 @@ - result.failed==false - result.changed==true fail_msg: " Fail : unable to upload image with nos installer_type " - success_msg: "Succes: upload image with nos installer_type successfully " + success_msg: "Success: upload image with nos installer_type successfully " - name: Delete Image with nos installer_type ntnx_foundation_image_upload: @@ -41,4 +41,4 @@ - result.failed==false - result.changed==true fail_msg: " Fail : unable to delete image with nos installer_type " - success_msg: "Succes: image with nos installer_type deleted successfully " + success_msg: "Success: image with nos installer_type deleted successfully " diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml index b9faec447..5c8327f3e 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml +++ b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml @@ -23,4 +23,4 @@ - result.nodes.0.ipmi_gateway is defined - result.nodes.0.hypervisor_hostname is defined fail_msg: " Fail : unable to get node network info " - success_msg: "Succes: Got node network info successfully " + success_msg: "Success: Got node network info successfully " diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml index 46056d0da..8ed9ee396 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml @@ -114,7 +114,7 @@ - spec.changed==false - spec.response == expected_spec fail_msg: " Fail : unable to create spec for imaging nodes" - success_msg: "Succes: spec generated successfully" + success_msg: "Success: spec generated successfully" - name: Image nodes without cluster creation ntnx_foundation: @@ -154,7 +154,7 @@ - result.failed==false - result.changed==true fail_msg: " Fail : unable to image nodes" - success_msg: "Succes: node imaging done successfully" + success_msg: "Success: node imaging done successfully" - name: Image nodes and create cluster out of it ntnx_foundation: @@ -209,6 +209,6 @@ - result.changed==true - result.response.cluster_urls is defined fail_msg: " Fail : unable to image nodes and create cluster" - success_msg: "Succes: cluster and node imaging done successfully" + success_msg: "Success: cluster and node imaging done successfully" ###################################################### diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml index bfbc770df..5afafa993 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml @@ -345,7 +345,7 @@ memory_gb: 8 # for etcd min 8 disk_gb: 120 add_labels: - propert.-+]y5: "string" + property.-+]y5: "string" propert5: "string" property4: "string+-.3-@" register: result @@ -373,7 +373,7 @@ property1: "test-property1" property2: "test-property2" property3: "test-property3" - propert.-+]y5: "string" + property.-+]y5: "string" register: result ignore_errors: true diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml index 3cc7c5541..f0c9477e0 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml @@ -16,7 +16,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -46,8 +46,8 @@ - result.failed == true - result.error == "cpu cannot be less then 4" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with cpu less than minimum" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with cpu less than minimum" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with memory_gb less than minimum ntnx_karbon_clusters: @@ -59,7 +59,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -89,8 +89,8 @@ - result.failed == true - result.error == "memory_gb cannot be less then 8" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with memory_gb size less than minimum" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with memory_gb size less than minimum" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for master nodes ntnx_karbon_clusters: @@ -102,7 +102,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -132,8 +132,8 @@ - result.failed == true - result.error == "value of masters.num_instances must be 1 or 2" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with wrong num_instances for master nodes" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with wrong num_instances for master nodes" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for etcd nodes ntnx_karbon_clusters: @@ -145,7 +145,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -175,6 +175,6 @@ - result.failed == true - result.error == "value of etcd.num_instances must be 1, 3 or 5" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with wrong num_instances for etcd nodes" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with wrong num_instances for etcd nodes" + success_msg: " Pass: Returned as expected" ############################# diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml index cf88b97bf..d0d3afc6e 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml @@ -46,7 +46,7 @@ - result.changed == true - result.response.name == "{{registry_name}}" - result.response.uuid is defined - fail_msg: "Fail: Unable to create registery" + fail_msg: "Fail: Unable to create registry" success_msg: "Pass: create registry finished successfully" ################################################################ - name: delete registry diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml index cbe281e4d..705149710 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml @@ -16,5 +16,5 @@ - result.response is defined - result.failed == true - result.changed == false - fail_msg: "Fail: create registery with wrong port number finished successfully" + fail_msg: "Fail: create registry with wrong port number finished successfully" success_msg: "Pass: Returned as expected " diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml index 57efb4ece..6cdf767e8 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml @@ -3,7 +3,7 @@ # This playbook will test below cases: # 1. Create HA instance spec with check mode and minimal spec # 2. Create HA postgres database instance with multicluster nodes -# 3. Create HA postgres database instance with static IP and cluster IP assigments +# 3. Create HA postgres database instance with static IP and cluster IP assignments - debug: msg: "start ndb databases test flow for testing high availability databases" diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml index 232112e75..859075c8d 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -2,7 +2,6 @@ - debug: msg: Start testing ntnx_ndb_clusters - - name: Register cluster with prism_vlan in check mode ntnx_ndb_clusters: name: "{{cluster.cluster3.name}}" @@ -50,8 +49,8 @@ - result.response.networksInfo[0].networkInfo[2].value == "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - result.response.networksInfo[0].networkInfo[3].value == "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - result.response.networksInfo[0].type== "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - fail_msg: "fail: Wring with check mode for registring cluster" - success_msg: "pass: retunred as expected" + fail_msg: "fail: Wring with check mode for registering cluster" + success_msg: "pass: Returned as expected" - name: Register cluster with prism_vlan ntnx_ndb_clusters: @@ -139,10 +138,10 @@ - result.response.password is defined - result.cluster_uuid is defined fail_msg: "fail: update cluster credeential while check_mode" - success_msg: "pass: retunred as expected" + success_msg: "pass: Returned as expected" ################################################################ -- name: Negative Secnarios update storage container +- name: Negative Scenarios update storage container ntnx_ndb_clusters: uuid: "{{result.cluster_uuid}}" storage_container: "{{cluster.cluster3.storage_container}}" @@ -161,7 +160,7 @@ ################################################################ -- name: Negative Secnarios update vlan access +- name: Negative Scenarios update vlan access ntnx_ndb_clusters: uuid: "{{result.cluster_uuid}}" vlan_access: @@ -186,7 +185,7 @@ ################################################################ -- name: Negative Secnarios update agent network +- name: Negative Scenarios update agent network ntnx_ndb_clusters: uuid: "{{result.cluster_uuid}}" agent_network: @@ -213,7 +212,7 @@ ################################################################ -- name: Negative Secnarios update agent network +- name: Negative Scenarios update agent network ntnx_ndb_clusters: uuid: "{{result.cluster_uuid}}" name_prefix: "{{cluster.cluster3.name_prefix}}" diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index 7387bff35..273b84718 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -401,7 +401,7 @@ that: - result == expected_result fail_msg: "Unable to create restore using pitr timestamp spec" - success_msg: "Spec for databas restore using pitr timetsmap created successfully" + success_msg: "Spec for database restore using pitr timetsmap created successfully" - name: create restore database spec with latest snapshot @@ -436,7 +436,7 @@ that: - result == expected_result fail_msg: "Unable to create restore using latest snapshot spec" - success_msg: "Spec for databas restore using latest snapshot created successfully" + success_msg: "Spec for database restore using latest snapshot created successfully" @@ -472,7 +472,7 @@ that: - result == expected_result fail_msg: "Unable to create restore using snapshot uuid spec" - success_msg: "Spec for databas restore using snapshot uuid created successfully" + success_msg: "Spec for database restore using snapshot uuid created successfully" - name: perform restore using latest snapshot diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml index 464f71e61..73de26640 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml @@ -618,7 +618,7 @@ success_msg: "single instance postgres database register spec created successfully" -- name: regsiter previously unregistered database from previously created VM +- name: register previously unregistered database from previously created VM ntnx_ndb_register_database: wait: true diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml index 7f3bf3844..60c7fd4e8 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml @@ -663,7 +663,7 @@ success_msg: "maintenance tasks for given db server vm removed successfully" -- name: Add maitenance window task for vm +- name: Add maintenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - name: "{{vm1_name_updated}}" @@ -750,7 +750,7 @@ success_msg: "DB server VM unregister spec generated successfully" -- name: genereate check mode spec for delete vm with vgs and snapshots +- name: generate check mode spec for delete vm with vgs and snapshots check_mode: yes ntnx_ndb_db_server_vms: state: "absent" @@ -798,7 +798,7 @@ ################################### DB server VM Registration tests ############################# -- name: generate spec for registeration of the previous unregistered vm using check mode +- name: generate spec for registration of the previous unregistered vm using check mode check_mode: yes ntnx_ndb_register_db_server_vm: ip: "{{vm_ip}}" diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml index efaa5eb49..a73653dd9 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml @@ -1,7 +1,7 @@ --- - debug: - msg: "start ndb database maintenance winndow tests" + msg: "start ndb database maintenance window tests" - name: Generate random name set_fact: diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml index 6ed797ebc..27549a765 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml @@ -128,7 +128,7 @@ # - result.response.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" # - result.response.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" # fail_msg: "Fail: unable to verify create of multiple cluster network profile " -# success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" +# success_msg: "Pass: verify create of multiple cluster network profile finished successfully" # - set_fact: # todelete: "{{ todelete + [ result.profile_uuid ] }}" diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index aef0c0daf..aec103c53 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -233,7 +233,7 @@ - result.response.profile.name == "{{profile1_name}}-updated1" - result.response.profile.description == "{{profile1_name}}-desc-updated" - fail_msg: "Fail: Update didnt get skipped due to no state changes" + fail_msg: "Fail: Update didn't get skipped due to no state changes" success_msg: "Pass: Update skipped successfully due to no state changes" - name: create software profile version spec diff --git a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml index 685690e4a..8b66c26a8 100644 --- a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml +++ b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml @@ -1,7 +1,7 @@ - debug: msg: Start testing create ova for vm -- name: VM with minimum requiremnts +- name: VM with minimum requirements ntnx_vms: state: present name: integration_test_ova_vm @@ -15,8 +15,8 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts ' - success_msg: 'Success: VM with minimum requiremnts created successfully ' + fail_msg: 'Fail: Unable to create VM with minimum requirements ' + success_msg: 'Success: VM with minimum requirements created successfully ' ######################################### - name: create_ova_image with check mode ntnx_vms_ova: diff --git a/tests/integration/targets/ntnx_projects/tasks/create_project.yml b/tests/integration/targets/ntnx_projects/tasks/create_project.yml index 220cc38b1..c4265c4dc 100644 --- a/tests/integration/targets/ntnx_projects/tasks/create_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/create_project.yml @@ -136,7 +136,7 @@ todelete: "{{ todelete + [ result.project_uuid ] }}" ################################################################# -- name: Create Project with alredy existing project name +- name: Create Project with already existing project name ntnx_projects: name: "{{ project.name }}" register: result diff --git a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml index 2dd21fe6a..dca268c0d 100644 --- a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml +++ b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml @@ -276,7 +276,7 @@ that: - result.changed == false - "'Nothing to update' in result.msg" - fail_msg: "Project update didnt got skipped for update spec same as existing project" + fail_msg: "Project update didn't got skipped for update spec same as existing project" success_msg: "Project got skipped successfully for no change in spec" @@ -302,7 +302,7 @@ that: - result.changed == false - "'Project with given name already exists' in result.msg" - fail_msg: "Project creation didnt failed for existing name" + fail_msg: "Project creation didn't failed for existing name" success_msg: "Project creation failed as expected" ################################################################# @@ -332,4 +332,4 @@ - result.changed == true - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" fail_msg: "Unable to delete user group " - success_msg: "user group deletd successfully" + success_msg: "user group deleted successfully" diff --git a/tests/integration/targets/ntnx_projects/tasks/update_project.yml b/tests/integration/targets/ntnx_projects/tasks/update_project.yml index 4d88442d1..1919e2c8c 100644 --- a/tests/integration/targets/ntnx_projects/tasks/update_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/update_project.yml @@ -154,7 +154,7 @@ that: - result.changed == false - "'Nothing to update' in result.msg" - fail_msg: "Project update didnt got skipped for update spec same as existing project" + fail_msg: "Project update didn't got skipped for update spec same as existing project" success_msg: "Project got skipped successfully for no change in spec" ################################################################# diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml index 6a35bc2c2..7b8f22eb5 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml @@ -530,7 +530,7 @@ - recovery_plan.response.status.resources.stage_list[1]["stage_work"] == exepected_stage_work_1 - recovery_plan.response.status.resources.stage_list[0]["delay_time_secs"] == 2 - fail_msg: 'Unable to updae recovery plans' + fail_msg: 'Unable to update recovery plans' success_msg: 'Recovery plan updated successfully' diff --git a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml index fe02bd1bd..d8396b751 100644 --- a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml +++ b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml @@ -41,7 +41,7 @@ fail_msg: ' fail: unable to get security rules ' success_msg: 'pass: get all security rules successfully ' ################################### -- name: getting particlar security rule using security_rule_uuid +- name: getting particular security rule using security_rule_uuid ntnx_security_rules_info: security_rule_uuid: '{{ first_rule.response.metadata.uuid }}' register: result @@ -55,7 +55,7 @@ - result.failed == false - result.response.status.state == 'COMPLETE' - first_rule.response.metadata.uuid == result.response.metadata.uuid - fail_msg: ' fail : unable to get particlar security rule using security_rule_uuid' + fail_msg: ' fail : unable to get particular security rule using security_rule_uuid' success_msg: 'pass: getting security rule using security_rule_uuid succesfuly' ################################### - name: getting all security rules sorted diff --git a/tests/integration/targets/ntnx_static_routes/tasks/create.yml b/tests/integration/targets/ntnx_static_routes/tasks/create.yml index 16c81ed50..a677ca55d 100644 --- a/tests/integration/targets/ntnx_static_routes/tasks/create.yml +++ b/tests/integration/targets/ntnx_static_routes/tasks/create.yml @@ -93,11 +93,11 @@ - result.response.status.resources.static_routes_list[0]["destination"] == "10.2.4.0/24" - result.response.status.resources.static_routes_list[0]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" fail_msg: "Static routes overriding failed" - success_msg: "Static routes overriden successfully" + success_msg: "Static routes overridden successfully" ########################################################################################################### -- name: Netgative scenario of cretaing multiple default routes +- name: Netgative scenario of creating multiple default routes ntnx_static_routes: vpc_uuid: "{{ vpc.uuid }}" static_routes: diff --git a/tests/integration/targets/ntnx_user_groups/tasks/create.yml b/tests/integration/targets/ntnx_user_groups/tasks/create.yml index 31fb156e3..7a11b4a28 100644 --- a/tests/integration/targets/ntnx_user_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_user_groups/tasks/create.yml @@ -143,7 +143,7 @@ - result.changed == true - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" fail_msg: "Unable to delete user group " - success_msg: "user group deletd successfully" + success_msg: "user group deleted successfully" # - name: create user group with idp diff --git a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml index cf168c02f..9d08f1480 100644 --- a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml +++ b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml @@ -12,7 +12,7 @@ expire: False fqdn: myNutanixVM -- name: VM with minimum requiremnts to clone +- name: VM with minimum requirements to clone ntnx_vms: state: present name: integration_test_clone_vm @@ -32,8 +32,8 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts to clone ' - success_msg: 'Succes: VM with minimum requiremnts created successfully ' + fail_msg: 'Fail: Unable to create VM with minimum requirements to clone ' + success_msg: 'Succes: VM with minimum requirements created successfully ' ############################## - name: clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off diff --git a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml index ecb702975..c392c795d 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml @@ -1,7 +1,7 @@ - debug: msg: "Started Negative Creation Cases" - - name: Unknow virtual switch name + - name: Unknown virtual switch name ntnx_subnets: state: present name: VLAN subnet without IPAM @@ -21,7 +21,7 @@ - result.msg=="Failed generating subnet spec" success_msg: ' Success: returned error as expected ' ############################################################### - - name: Unknow virtual switch uuid + - name: Unknown virtual switch uuid ntnx_subnets: state: present name: VLAN subnet with IPAM diff --git a/tests/integration/targets/nutanix_vms/tasks/create.yml b/tests/integration/targets/nutanix_vms/tasks/create.yml index 9da6dfc96..d2f99f460 100644 --- a/tests/integration/targets/nutanix_vms/tasks/create.yml +++ b/tests/integration/targets/nutanix_vms/tasks/create.yml @@ -367,7 +367,7 @@ todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' when: result.response.status.state == 'COMPLETE' ################################################################################################### - - name: VM with minimum requiremnts + - name: VM with minimum requirements ntnx_vms: state: present name: MinReqVM @@ -381,8 +381,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' - set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' @@ -498,7 +498,7 @@ todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' when: result.response.status.state == 'COMPLETE' ######################################################################################### - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type + - name: VM with different disk types and different sizes with UEFI boot type ntnx_vms: state: present name: VM with UEFI boot type @@ -543,8 +543,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with diffrent disk types and diffrent sizes with UEFI boot type ' - success_msg: ' VM with diffrent disk types and diffrent sizes with UEFI boot type created successfully ' + fail_msg: ' Unable to create VM with different disk types and different sizes with UEFI boot type ' + success_msg: ' VM with different disk types and different sizes with UEFI boot type created successfully ' - set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' diff --git a/tests/integration/targets/nutanix_vms/tasks/delete.yml b/tests/integration/targets/nutanix_vms/tasks/delete.yml index b1cf3046d..c3faaf636 100644 --- a/tests/integration/targets/nutanix_vms/tasks/delete.yml +++ b/tests/integration/targets/nutanix_vms/tasks/delete.yml @@ -1,5 +1,5 @@ --- -- name: VM with minimum requiremnts +- name: VM with minimum requirements ntnx_vms: state: present name: MinReqVM @@ -13,8 +13,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' - name: Delete VM ntnx_vms: diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml index f003d4459..0488155ff 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml @@ -166,7 +166,7 @@ success_msg: ' Success: returned error as expected ' fail_msg: ' Fail VM created successfully with unknown network name ' ################################################################################### - - name: Unknow Image name + - name: Unknown Image name ntnx_vms: state: present name: unknown image_vm @@ -256,7 +256,7 @@ success_msg: ' Success: returned error as expected ' fail_msg: ' Fail: VM created successfully with image size is less than actual ' ################################################################################# - - name: Unknow storage container name + - name: Unknown storage container name ntnx_vms: state: present name: unknown storage container diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml index a17b52529..49adf7614 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml @@ -1,4 +1,4 @@ -- name: create VM with minimum requiremnts to update +- name: create VM with minimum requirements to update ntnx_vms: state: present name: update vm @@ -43,8 +43,8 @@ - vm.response.status.state == 'COMPLETE' - vm.vm_uuid - vm.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' - name: update vm without change any value ntnx_vms: @@ -119,7 +119,7 @@ - debug: msg: Start negative update scenarios tests for disks -############ negative test : Decrase size +############ negative test : Decrease size - name: Update VM by decreasing the size of the disk that contains the image with SCSI bus type ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" @@ -242,7 +242,7 @@ fail_msg: ' Fail: decreasing the size of the IDE disk' success_msg: ' Success: returned error as expected ' ################ -- name: Update VM by change ths bus type of ide disk +- name: Update VM by change the bus type of ide disk ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" disks: @@ -258,9 +258,9 @@ - result.msg == ' parameters are mutually exclusive: uuid|bus found in disks ' - result.failed == True success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail: Update VM by change ths bus type of ide disk sucessfuly ' + fail_msg: ' Fail: Update VM by change the bus type of ide disk successfully ' ############ -- name: Update VM by adding IDE disk while vm is on +- name: Update VM by adding IDE disk while vm is on ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" disks: diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml index d64f9f755..de2a1304a 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml @@ -1,9 +1,9 @@ - debug: - msg: Start testing VM with different opperations + msg: Start testing VM with different operations - set_fact: todelete: [] -- name: VM with minimum requiremnts +- name: VM with minimum requirements ntnx_vms: state: present name: integration_test_opperations_vm @@ -23,11 +23,11 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' ############################################ -- name: VM with minimum requiremnts with check mode +- name: VM with minimum requirements with check mode ntnx_vms: state: present name: integration_test_opperations_vm @@ -116,7 +116,7 @@ # success_msg: ' VM soft_shutdown successfully ' ########################################### ############################### -# - name: VM with minimum requiremnts and soft_shutdown +# - name: VM with minimum requirements and soft_shutdown # ntnx_vms: # state: present # name: integration_test_opperations_vm @@ -139,13 +139,13 @@ # - result.response.status.state == 'COMPLETE' # - result.response.status.resources.power_state == 'OFF' # - result.response.status.resources.power_state_mechanism.mechanism == 'ACPI' -# fail_msg: ' Unable to create VM with minimum requiremnts and soft_shutdown ' -# success_msg: ' VM with minimum requiremnts created successfully and soft_shutdown ' +# fail_msg: ' Unable to create VM with minimum requirements and soft_shutdown ' +# success_msg: ' VM with minimum requirements created successfully and soft_shutdown ' # - set_fact: # todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' -- name: Create VM with minimum requiremnts with hard_poweroff opperation +- name: Create VM with minimum requirements with hard_poweroff operation ntnx_vms: state: hard_poweroff name: integration_test_opperations_vm @@ -161,13 +161,13 @@ - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'OFF' - result.response.status.resources.power_state_mechanism.mechanism == 'HARD' - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: ' Unable to create VM with minimum requirements with hard_poweroff operation ' + success_msg: ' VM with minimum requirements and hard_poweroff state created successfully ' - set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' -- name: Create VM with minimum requiremnts with hard_poweroff opperation without wait +- name: Create VM with minimum requirements with hard_poweroff operation without wait ntnx_vms: state: hard_poweroff name: integration_test_opperations_vm_111 @@ -184,8 +184,8 @@ - result.response.status.state == 'COMPLETE' or result.response.status.state == 'PENDING' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: ' Unable to create VM with minimum requirements with hard_poweroff operation ' + success_msg: ' VM with minimum requirements and hard_poweroff state created successfully ' - set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml index fa0ccd60e..dd6a49f23 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml @@ -1,6 +1,6 @@ # ########################### UPDATE_VM ################################ -- name: create VM with minimum requiremnts to update +- name: create VM with minimum requirements to update ntnx_vms: state: present name: update vm @@ -23,8 +23,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' #################################################################### - name: update vm by set owner by uuid ntnx_vms: @@ -135,7 +135,7 @@ fail_msg: ' Unable to update categories attached to vm' success_msg: ' VM categories updated successfully ' -- name: remove all categoies attached to vm +- name: remove all categories attached to vm ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" remove_categories: true @@ -156,7 +156,7 @@ ################################################################### - debug: msg: Start update tests for disks -##### CRUD opperation for SCSI disks +##### CRUD operation for SCSI disks - name: Update VM by adding SCSI disks ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -236,7 +236,7 @@ fail_msg: ' Unable to update vm by removing SCSI disks ' success_msg: ' VM updated successfully by removing SCSI disks ' ####### -##### CRUD opperation for PCI disks +##### CRUD operation for PCI disks - name: Update VM by adding PCI disks ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -296,7 +296,7 @@ - result.response.status.state == "COMPLETE" fail_msg: ' Unable to update vm by removing PCI disks with force_power_off ' success_msg: ' VM updated successfully by removing PCI disks with force_power_off ' -##### CRUD opperation for IDE disks +##### CRUD operation for IDE disks - name: Update VM by adding IDE disks with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -364,7 +364,7 @@ fail_msg: ' Unable to update vm by removing IDE disks with force_power_off' success_msg: ' VM updated successfully by removing IDE disks with force_power_off' ####### -##### CRUD opperation for SATA disks +##### CRUD operation for SATA disks - name: Update VM by adding SATA disks with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" diff --git a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml index 93b21f003..05b029742 100644 --- a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml +++ b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml @@ -1,7 +1,7 @@ - set_fact: todelete: [] -- name: Creat anohter VM with same name +- name: Creat another VM with same name ntnx_vms: name: "{{ vm.name }}" cluster: @@ -15,8 +15,8 @@ that: - output.response is defined - output.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: ' Unable to create VM with minimum requirements ' + success_msg: ' VM with minimum requirements created successfully ' - set_fact: todelete: '{{ todelete + [ output["response"]["metadata"]["uuid"] ] }}' diff --git a/tests/integration/targets/prepare_ndb_env/vars/main.yml b/tests/integration/targets/prepare_ndb_env/vars/main.yml index bc3e181b2..5f1af010c 100644 --- a/tests/integration/targets/prepare_ndb_env/vars/main.yml +++ b/tests/integration/targets/prepare_ndb_env/vars/main.yml @@ -129,7 +129,7 @@ cluster_ips: vm_password: "TEST_VM_PASSWORD" vm_username: "TEST_VM_USERNAME" -# exitsing db server VM reference for software profile tests +# existing db server VM reference for software profile tests db_server_vm: name: "TEST_DB_SERVER_VM_NAME" uuid: "TEST_DB_SERVER_VM_UUID" @@ -181,4 +181,4 @@ ndb_vlan: updated_primary_dns: "TEST_UPDATED_PRIMARY_DNS" updated_secondary_dns: "TEST_UPDATED_SECONDARY_DNS" -todelete: [] \ No newline at end of file +todelete: [] From ab9f1e9debe248277541f85e2269b08c5c84231d Mon Sep 17 00:00:00 2001 From: DemoYeti <164791169+DemoYeti@users.noreply.github.com> Date: Sun, 29 Sep 2024 11:22:37 -0400 Subject: [PATCH 06/15] doc: format markdown table (#492) --- README.md | 194 +++++++++++++++++++++++++++--------------------------- 1 file changed, 97 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index 76b92b3a0..d4d167d41 100644 --- a/README.md +++ b/README.md @@ -153,103 +153,103 @@ ansible-playbook examples/iaas/iaas.yml ## Modules -| Name | Description | -| --- | --- | -| ntnx_acps | Create, Update, Delete acp. | -| ntnx_acps_info | Get acp info. | -| ntnx_address_groups | Create, Update, Delete Nutanix address groups. | -| ntnx_address_groups_info | Get address groups info. | -| ntnx_categories | Create, Update, Delete categories | -| ntnx_categories_info | Get categories info. | -| ntnx_clusters_info | Get cluster info. | -| ntnx_floating_ips | Create or delete a Floating Ip. | -| ntnx_floating_ips_info | List existing Floating_Ips. | -| ntnx_hosts_info | Get host info. | -| ntnx_images | Create, update or delete a image. | -| ntnx_images_info | List existing images. | -| ntnx_image_placement_policy | Create, update or delete a image placement policy. | -| ntnx_image_placement_policies_info | List existing image placement policies. | -| ntnx_karbon_clusters | Create, Delete k8s clusters | -| ntnx_karbon_clusters_info | Get clusters info. | -| ntnx_karbon_clusters_node_pools | Update node pools of kubernetes cluster | -| ntnx_karbon_registries | Create, Delete a karbon private registry entry | -| ntnx_karbon_registries_info | Get karbon private registry registry info. | -| ntnx_pbrs | Create or delete a PBR. | -| ntnx_pbrs_info | List existing PBRs. | -| ntnx_permissions_info | List permissions info | -| ntnx_projects | create, update and delete pc projects | -| ntnx_projects_info | Get projects info. | -| ntnx_protection_rules | create, update and delete pc protection rules | -| ntnx_protection_rules_info | Get pc protection rules info. | -| ntnx_recovery_plans | create, update and delete pc recovery plans | -| ntnx_recovery_plans_info | Get pc recovery plans info. | -| ntnx_recovery_plan_jobs | create and perform action on pc recovery plans | -| ntnx_recovery_plan_jobs_info | Get pc recovery plan jobs info. | -| ntnx_roles | Create, Update, Delete Nutanix roles | -| ntnx_roles_info | Get roles info. | -| ntnx_security_rules | Create, update or delete a Security Rule. | -| ntnx_security_rules_info | List existing Security Rules. | -| ntnx_service_groups | Create, Update, Delete service_group | -| ntnx_service_groups_info | Get service groups info. | -| ntnx_static_routes | Update static routes of a vpc. | -| ntnx_static_routes_info | List existing static routes of a vpc. | -| ntnx_subnets | Create or delete a Subnet. | -| ntnx_subnets_info | List existing Subnets. | -| ntnx_user_groups | Create, Delete user_groups. | -| ntnx_user_groups_info | Get user groups info. | -| ntnx_users | Create, Delete users | -| ntnx_users_info | Get users info. | -| ntnx_vms | Create or delete a VM. | -| ntnx_vms_clone | Clone VM. | -| ntnx_vms_ova | Create OVA image from VM. | -| ntnx_vms_info | List existing VMs. | -| ntnx_vpcs | Create or delete a VPC. | -| ntnx_vpcs_info | List existing VPCs. | -| ntnx_foundation | Image nodes and create new cluster. | -| ntnx_foundation_aos_packages_info | List the AOS packages uploaded to Foundation. | -| ntnx_foundation_bmc_ipmi_config | Configure IPMI IP address on BMC of nodes. | -| ntnx_foundation_discover_nodes_info | List the nodes discovered by Foundation. | -| ntnx_foundation_hypervisor_images_info | List the hypervisor images uploaded to Foundation. | -| ntnx_foundation_image_upload | Upload hypervisor or AOS image to Foundation VM. | -| ntnx_foundation_node_network_info | Get node network information discovered by Foundation. | -| ntnx_foundation_central | Create a cluster out of nodes registered with Foundation Central. | -| ntnx_foundation_central_api_keys | Create a new api key which will be used by remote nodes to authenticate with Foundation Central. | -| ntnx_foundation_central_api_keys_info | List all the api keys created in Foundation Central. | -| ntnx_foundation_central_imaged_clusters_info | List all the clusters created using Foundation Central. | -| ntnx_foundation_central_imaged_nodes_info | List all the nodes registered with Foundation Central. | -| ntnx_ndb_databases_info | Get ndb database instance info | -| ntnx_ndb_clones_info | Get ndb database clones info. | -| ntnx_ndb_time_machines_info | Get ndb time machines info. | -| ntnx_ndb_profiles_info | Get ndb profiles info. | -| ntnx_ndb_db_servers_info | Get ndb database server vms info. | -| ntnx_ndb_databases | Create, update and delete database instances. | -| ntnx_ndb_register_database | Register database instance. | -| ntnx_ndb_db_server_vms | Create, update and delete database server vms. | -| ntnx_ndb_clusters_info | Get clusters info. | -| ntnx_ndb_clusters | Create, update and delete clusters in NDB | -| ntnx_ndb_snapshots_info | Get snapshots info | -| ntnx_ndb_vlans | Create, update and delete vlans | -| ntnx_ndb_vlans_info | Get vlans info in NDB | -| ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | -| ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | -| ntnx_ndb_tags | Create, update and delete tags | -| ntnx_ndb_tags_info | Get tags info | -| ntnx_ndb_database_clones | Create, update and delete database clones | -| ntnx_ndb_database_snapshots | Create, update and delete database snapshots | -| ntnx_ndb_database_clone_refresh | Perform database clone refresh | -| ntnx_ndb_authorize_db_server_vms | authorize database server vms with time machines | -| ntnx_ndb_profiles | create, update and delete all kind of profiles | -| ntnx_ndb_database_log_catchup | perform log catchup | -| ntnx_ndb_database_restore | perform database restore | -| ntnx_ndb_database_scale | perform database scaling | -| ntnx_ndb_linked_databases | Add and remove linked databases of database instance | -| ntnx_ndb_replicate_database_snapshots | replicate snapshots accross clusters in time machines | -| ntnx_ndb_register_db_server_vm | register database server vm | -| ntnx_ndb_maintenance_tasks | Add and remove maintenance tasks in window | -| ntnx_ndb_maintenance_window | Create, update and delete maintenance window | -| ntnx_ndb_maintenance_windows_info | Get maintenance window info | -| ntnx_ndb_slas | Create, update and delete sla | -| ntnx_ndb_slas_info | Get slas info | +| Name | Description | +|----------------------------------------------|--------------------------------------------------------------------------------------------------| +| ntnx_acps | Create, Update, Delete acp. | +| ntnx_acps_info | Get acp info. | +| ntnx_address_groups | Create, Update, Delete Nutanix address groups. | +| ntnx_address_groups_info | Get address groups info. | +| ntnx_categories | Create, Update, Delete categories | +| ntnx_categories_info | Get categories info. | +| ntnx_clusters_info | Get cluster info. | +| ntnx_floating_ips | Create or delete a Floating Ip. | +| ntnx_floating_ips_info | List existing Floating_Ips. | +| ntnx_hosts_info | Get host info. | +| ntnx_images | Create, update or delete a image. | +| ntnx_images_info | List existing images. | +| ntnx_image_placement_policy | Create, update or delete a image placement policy. | +| ntnx_image_placement_policies_info | List existing image placement policies. | +| ntnx_karbon_clusters | Create, Delete k8s clusters | +| ntnx_karbon_clusters_info | Get clusters info. | +| ntnx_karbon_clusters_node_pools | Update node pools of kubernetes cluster | +| ntnx_karbon_registries | Create, Delete a karbon private registry entry | +| ntnx_karbon_registries_info | Get karbon private registry registry info. | +| ntnx_pbrs | Create or delete a PBR. | +| ntnx_pbrs_info | List existing PBRs. | +| ntnx_permissions_info | List permissions info | +| ntnx_projects | create, update and delete pc projects | +| ntnx_projects_info | Get projects info. | +| ntnx_protection_rules | create, update and delete pc protection rules | +| ntnx_protection_rules_info | Get pc protection rules info. | +| ntnx_recovery_plans | create, update and delete pc recovery plans | +| ntnx_recovery_plans_info | Get pc recovery plans info. | +| ntnx_recovery_plan_jobs | create and perform action on pc recovery plans | +| ntnx_recovery_plan_jobs_info | Get pc recovery plan jobs info. | +| ntnx_roles | Create, Update, Delete Nutanix roles | +| ntnx_roles_info | Get roles info. | +| ntnx_security_rules | Create, update or delete a Security Rule. | +| ntnx_security_rules_info | List existing Security Rules. | +| ntnx_service_groups | Create, Update, Delete service_group | +| ntnx_service_groups_info | Get service groups info. | +| ntnx_static_routes | Update static routes of a vpc. | +| ntnx_static_routes_info | List existing static routes of a vpc. | +| ntnx_subnets | Create or delete a Subnet. | +| ntnx_subnets_info | List existing Subnets. | +| ntnx_user_groups | Create, Delete user_groups. | +| ntnx_user_groups_info | Get user groups info. | +| ntnx_users | Create, Delete users | +| ntnx_users_info | Get users info. | +| ntnx_vms | Create or delete a VM. | +| ntnx_vms_clone | Clone VM. | +| ntnx_vms_ova | Create OVA image from VM. | +| ntnx_vms_info | List existing VMs. | +| ntnx_vpcs | Create or delete a VPC. | +| ntnx_vpcs_info | List existing VPCs. | +| ntnx_foundation | Image nodes and create new cluster. | +| ntnx_foundation_aos_packages_info | List the AOS packages uploaded to Foundation. | +| ntnx_foundation_bmc_ipmi_config | Configure IPMI IP address on BMC of nodes. | +| ntnx_foundation_discover_nodes_info | List the nodes discovered by Foundation. | +| ntnx_foundation_hypervisor_images_info | List the hypervisor images uploaded to Foundation. | +| ntnx_foundation_image_upload | Upload hypervisor or AOS image to Foundation VM. | +| ntnx_foundation_node_network_info | Get node network information discovered by Foundation. | +| ntnx_foundation_central | Create a cluster out of nodes registered with Foundation Central. | +| ntnx_foundation_central_api_keys | Create a new api key which will be used by remote nodes to authenticate with Foundation Central. | +| ntnx_foundation_central_api_keys_info | List all the api keys created in Foundation Central. | +| ntnx_foundation_central_imaged_clusters_info | List all the clusters created using Foundation Central. | +| ntnx_foundation_central_imaged_nodes_info | List all the nodes registered with Foundation Central. | +| ntnx_ndb_databases_info | Get ndb database instance info | +| ntnx_ndb_clones_info | Get ndb database clones info. | +| ntnx_ndb_time_machines_info | Get ndb time machines info. | +| ntnx_ndb_profiles_info | Get ndb profiles info. | +| ntnx_ndb_db_servers_info | Get ndb database server vms info. | +| ntnx_ndb_databases | Create, update and delete database instances. | +| ntnx_ndb_register_database | Register database instance. | +| ntnx_ndb_db_server_vms | Create, update and delete database server vms. | +| ntnx_ndb_clusters_info | Get clusters info. | +| ntnx_ndb_clusters | Create, update and delete clusters in NDB | +| ntnx_ndb_snapshots_info | Get snapshots info | +| ntnx_ndb_vlans | Create, update and delete vlans | +| ntnx_ndb_vlans_info | Get vlans info in NDB | +| ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | +| ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | +| ntnx_ndb_tags | Create, update and delete tags | +| ntnx_ndb_tags_info | Get tags info | +| ntnx_ndb_database_clones | Create, update and delete database clones | +| ntnx_ndb_database_snapshots | Create, update and delete database snapshots | +| ntnx_ndb_database_clone_refresh | Perform database clone refresh | +| ntnx_ndb_authorize_db_server_vms | authorize database server vms with time machines | +| ntnx_ndb_profiles | create, update and delete all kind of profiles | +| ntnx_ndb_database_log_catchup | perform log catchup | +| ntnx_ndb_database_restore | perform database restore | +| ntnx_ndb_database_scale | perform database scaling | +| ntnx_ndb_linked_databases | Add and remove linked databases of database instance | +| ntnx_ndb_replicate_database_snapshots | replicate snapshots accross clusters in time machines | +| ntnx_ndb_register_db_server_vm | register database server vm | +| ntnx_ndb_maintenance_tasks | Add and remove maintenance tasks in window | +| ntnx_ndb_maintenance_window | Create, update and delete maintenance window | +| ntnx_ndb_maintenance_windows_info | Get maintenance window info | +| ntnx_ndb_slas | Create, update and delete sla | +| ntnx_ndb_slas_info | Get slas info | ## Inventory Plugins From 120ba18e70177d33727d672fb287e391c0b0f51a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Mon, 30 Sep 2024 11:43:12 +0200 Subject: [PATCH 07/15] add msg if API response can't be converted (#486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add msg if API response cant be converted Signed-off-by: Thomas Sjögren * add default response Signed-off-by: Thomas Sjögren * catch ValueError and increase msg length check Signed-off-by: Thomas Sjögren * revert msg length check Signed-off-by: Thomas Sjögren * align with code standard Signed-off-by: Thomas Sjögren * fix syntax Signed-off-by: Thomas Sjögren * revert back to the beginning Signed-off-by: Thomas Sjögren --------- Signed-off-by: Thomas Sjögren --- plugins/module_utils/entity.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index 63a79c7f9..b4c256703 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -411,8 +411,13 @@ def _fetch_url( return {"status_code": status_code} if resp_json is None: + if info.get("msg"): + resp_json_msg = "{}".format(info.get("msg")) + else: + resp_json_msg = "Failed to convert API response to json" + self.module.fail_json( - msg="Failed to convert API response to json", + msg=resp_json_msg, status_code=status_code, error=body, response=resp_json, From 3b13ae20e6a066610e752b6acb8487b43d6a1c80 Mon Sep 17 00:00:00 2001 From: stahnjones <129926630+stahnjones@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:09:45 -0400 Subject: [PATCH 08/15] issue#481 fix invalid dn check for OU vs group or user (#482) * change config check to start with ou= for OU groups don't start with ou= but may be within an OU so would contain ou= in the string. This change makes it so groups don't have to be in the root of the AD. * Update ntnx_protection_rules.py with start_time check in idempotency check * update to use get function of dict in case start_time is undefined --- plugins/module_utils/prism/user_groups.py | 2 +- plugins/modules/ntnx_protection_rules.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/prism/user_groups.py b/plugins/module_utils/prism/user_groups.py index 1eb921b1c..c208d62d4 100644 --- a/plugins/module_utils/prism/user_groups.py +++ b/plugins/module_utils/prism/user_groups.py @@ -43,7 +43,7 @@ def _build_spec_project(self, payload, config): return payload, None def _build_spec_user_distinguished_name(self, payload, config): - if "ou=" in config: + if config[0:3] == "ou=": payload["spec"]["resources"]["directory_service_ou"] = { "distinguished_name": config } diff --git a/plugins/modules/ntnx_protection_rules.py b/plugins/modules/ntnx_protection_rules.py index 2f36ae8c6..c08fb50c7 100644 --- a/plugins/modules/ntnx_protection_rules.py +++ b/plugins/modules/ntnx_protection_rules.py @@ -680,6 +680,10 @@ def check_rule_idempotency(rule_spec, update_spec): ): return False + #check if start_time has been updated + if rule_spec["spec"]["resources"].get("start_time") != update_spec["spec"]["resources"].get("start_time"): + return False + return True From 854442f9a8824cac80200522032e399456c8517a Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Tue, 1 Oct 2024 18:02:49 +0300 Subject: [PATCH 09/15] Adding fix for updating empty CD ROM with clone image attribute (#505) * Adding fix for updating empty CD ROM with clone image attribute * Adding example and test for updating VM with cloning image into CD ROM * refactoring examples * Creating seperate example file for updating empty CD ROM Moving update empty CD ROM test to CRUD operation for IDE disks section * minor lint fix * resolving comments * Adding disk count assertion to assert that number of disks does not change when updating empty CD ROM --- examples/vm_update.yml | 106 ++++++------ examples/vm_update_cdrom.yml | 41 +++++ plugins/module_utils/prism/vms.py | 3 +- .../targets/nutanix_vms/tasks/vm_update.yml | 159 ++++++++++++------ 4 files changed, 199 insertions(+), 110 deletions(-) create mode 100644 examples/vm_update_cdrom.yml diff --git a/examples/vm_update.yml b/examples/vm_update.yml index 45de3d642..6e66cf108 100644 --- a/examples/vm_update.yml +++ b/examples/vm_update.yml @@ -12,8 +12,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -26,56 +26,56 @@ remove_disk_uuid: "" subnet_uuid: "" - - name: Update VM - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: updated - desc: updated - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ image_name }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 3 - bus: PCI - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network_dhcp_uuid }}" - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network_static_ip }}" - register: result + - name: Update VM + ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: updated + desc: updated + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ image_name }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 3 + bus: PCI + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network_dhcp_uuid }}" + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network_static_ip }}" + register: result - - name: Update VM by deleting and editing disks and subnets - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: update diks - desc: update disks - disks: - - type: "DISK" - uuid: "{{ disk_uuid }}" - size_gb: 30 - - state: absent - uuid: "{{ remove_disk_uuid }}" - networks: - - state: absent - uuid: "{{ subnet_uuid }}" - register: result + - name: Update VM by deleting and editing disks and subnets + ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: update disks + desc: update disks + disks: + - type: "DISK" + uuid: "{{ disk_uuid }}" + size_gb: 30 + - state: absent + uuid: "{{ remove_disk_uuid }}" + networks: + - state: absent + uuid: "{{ subnet_uuid }}" + register: result - - name: Update VM by deleting it - ntnx_vms: - state: absent - vm_uuid: "{{ vm_uuid }}" - register: result + - name: Update VM by deleting it + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result diff --git a/examples/vm_update_cdrom.yml b/examples/vm_update_cdrom.yml new file mode 100644 index 000000000..f85080f3f --- /dev/null +++ b/examples/vm_update_cdrom.yml @@ -0,0 +1,41 @@ +########################### UPDATE_VM_CDROM ################################ +--- +- name: Create a VM with empty CD ROM and Update that disk with image + hosts: localhost + gather_facts: false + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + tasks: + - name: Setting Variables + ansible.builtin.set_fact: + vm_uuid: "" + disk_uuid: "" + + - name: Create VM with empty CD ROM + nutanix.ncp.ntnx_vms: + name: "VM with empty CD ROM" + cluster: + name: "{{ cluster.name }}" + categories: + Environment: + - Production + disks: + - type: "CDROM" + bus: "IDE" + empty_cdrom: true + register: result + + - name: Update VM by cloning image into CD ROM + nutanix.ncp.ntnx_vms: + vm_uuid: "{{ result.vm_uuid }}" + name: "VM with CD ROM updated" + disks: + - type: "CDROM" + uuid: "{{ result.response.spec.resources.disk_list[0].uuid }}" + clone_image: + name: "{{ iso_image_name }}" + register: result diff --git a/plugins/module_utils/prism/vms.py b/plugins/module_utils/prism/vms.py index b4522add6..89c3221ed 100644 --- a/plugins/module_utils/prism/vms.py +++ b/plugins/module_utils/prism/vms.py @@ -497,7 +497,8 @@ def _generate_disk_spec( if error: return None, error - disk["data_source_reference"]["uuid"] = uuid + disk.setdefault("data_source_reference", {})["uuid"] = uuid + disk.setdefault("data_source_reference", {})["kind"] = "image" if ( not disk.get("storage_config", {}) diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml index dd6a49f23..86fed5585 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml @@ -7,8 +7,8 @@ cluster: name: "{{ cluster.name }}" categories: - Environment: - - Production + Environment: + - Production vcpus: 5 cores_per_vcpu: 5 memory_gb: 5 @@ -23,8 +23,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requirements ' - success_msg: ' VM with minimum requirements created successfully ' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " #################################################################### - name: update vm by set owner by uuid ntnx_vms: @@ -44,8 +44,8 @@ - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" - result.response.metadata.owner_reference.kind == "user" - fail_msg: ' Unable to update vm by setting owner ' - success_msg: ' VM updated successfully by setting owner ' + fail_msg: " Unable to update vm by setting owner " + success_msg: " VM updated successfully by setting owner " #################################################################### - debug: @@ -69,8 +69,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off ' - success_msg: ' VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off " + success_msg: " VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off " - name: increase values for memory, vcpus and corespervcpu ntnx_vms: @@ -87,8 +87,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for memory, vcpus ' - success_msg: ' VM updated successfully by increase values for memory, vcpus ' + fail_msg: " Unable to update vm by increase values for memory, vcpus " + success_msg: " VM updated successfully by increase values for memory, vcpus " - name: increase values for corespervcpu with force_power_off ntnx_vms: @@ -105,8 +105,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for corespervcpu with force_power_off' - success_msg: ' VM updated successfully by increase values for corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by increase values for corespervcpu with force_power_off" + success_msg: " VM updated successfully by increase values for corespervcpu with force_power_off " #################################################################### - debug: @@ -116,10 +116,10 @@ ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" categories: - Environment: - - Dev - AppType: - - Default + Environment: + - Dev + AppType: + - Default register: result ignore_errors: true @@ -132,8 +132,8 @@ - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping["Environment"] == ["Dev"] - result.response.metadata.categories_mapping["AppType"] == ["Default"] - fail_msg: ' Unable to update categories attached to vm' - success_msg: ' VM categories updated successfully ' + fail_msg: " Unable to update categories attached to vm" + success_msg: " VM categories updated successfully " - name: remove all categories attached to vm ntnx_vms: @@ -150,8 +150,8 @@ - result.task_uuid - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping == {} - fail_msg: ' Unable to remove all categories attached to vm' - success_msg: ' All VM categories removed successfully ' + fail_msg: " Unable to remove all categories attached to vm" + success_msg: " All VM categories removed successfully " ################################################################### - debug: @@ -184,8 +184,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SCSI disks ' - success_msg: ' VM updated successfully by adding SCSI disks ' + fail_msg: " Unable to update vm by adding SCSI disks " + success_msg: " VM updated successfully by adding SCSI disks " - name: Update VM by increasing the size of the SCSI disks ntnx_vms: @@ -210,8 +210,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SCSI disks ' - success_msg: ' VM updated successfully by increasing the size of the SCSI disks ' + fail_msg: " Unable to update vm by increasing the size of the SCSI disks " + success_msg: " VM updated successfully by increasing the size of the SCSI disks " - name: Update VM by removing SCSI disks ntnx_vms: @@ -233,8 +233,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SCSI disks ' - success_msg: ' VM updated successfully by removing SCSI disks ' + fail_msg: " Unable to update vm by removing SCSI disks " + success_msg: " VM updated successfully by removing SCSI disks " ####### ##### CRUD operation for PCI disks - name: Update VM by adding PCI disks @@ -254,8 +254,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding PCI disks ' - success_msg: ' VM updated successfully by adding PCI disks ' + fail_msg: " Unable to update vm by adding PCI disks " + success_msg: " VM updated successfully by adding PCI disks " - name: Update VM by increasing the size of the PCI disks ntnx_vms: @@ -274,8 +274,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the PCI disks ' - success_msg: ' VM updated successfully by increasing the size of the PCI disks ' + fail_msg: " Unable to update vm by increasing the size of the PCI disks " + success_msg: " VM updated successfully by increasing the size of the PCI disks " - name: Update VM by removing PCI disks with force_power_off ntnx_vms: @@ -294,8 +294,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing PCI disks with force_power_off ' - success_msg: ' VM updated successfully by removing PCI disks with force_power_off ' + fail_msg: " Unable to update vm by removing PCI disks with force_power_off " + success_msg: " VM updated successfully by removing PCI disks with force_power_off " ##### CRUD operation for IDE disks - name: Update VM by adding IDE disks with force_power_off ntnx_vms: @@ -318,8 +318,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding IDE disks with force_power_off ' - success_msg: ' VM updated successfully by adding IDE disks with force_power_off ' + fail_msg: " Unable to update vm by adding IDE disks with force_power_off " + success_msg: " VM updated successfully by adding IDE disks with force_power_off " - name: Update VM by increasing the size of the IDE disks with force_power_off ntnx_vms: @@ -339,8 +339,54 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the IDE disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the IDE disks with force_power_off ' + fail_msg: " Unable to update vm by increasing the size of the IDE disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the IDE disks with force_power_off " + +- name: Get UUID of CDROM + ansible.builtin.set_fact: + cdrom_uuid: "{{ result.response.spec.resources.disk_list | json_query(query) }}" + vars: + query: "[?device_properties.device_type == 'CDROM'].uuid" + ignore_errors: true + +- name: Get number of disks attached to VM + ansible.builtin.set_fact: + disk_count: "{{ result.response.spec.resources.disk_list | length }}" + ignore_errors: true + +- name: Update VM by cloning image into CD ROM + ntnx_vms: + vm_uuid: "{{ result.vm_uuid }}" + disks: + - type: "CDROM" + uuid: "{{ cdrom_uuid[0] }}" + clone_image: + name: "{{ centos }}" + register: result + ignore_errors: true + +- name: Get index of CDROM + ansible.builtin.set_fact: + item_index: "{{ index }}" + loop: "{{ result.response.spec.resources.disk_list }}" + loop_control: + index_var: index + when: item.uuid == cdrom_uuid[0] + no_log: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.vm_uuid + - result.task_uuid + - result.response.spec.resources.disk_list[item_index].device_properties.device_type == "CDROM" + - result.response.spec.resources.disk_list[item_index].data_source_reference.kind == "image" + - result.response.spec.resources.disk_list[item_index].data_source_reference.uuid is defined + - result.response.spec.resources.disk_list | length == {{ disk_count }} + - result.response.status.state == "COMPLETE" + fail_msg: " Unable to update vm by cloning image into CD ROM " + success_msg: " VM updated successfully by cloning image into CD ROM" - name: Update VM by removing IDE disks with force_power_off ntnx_vms: @@ -361,8 +407,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing IDE disks with force_power_off' - success_msg: ' VM updated successfully by removing IDE disks with force_power_off' + fail_msg: " Unable to update vm by removing IDE disks with force_power_off" + success_msg: " VM updated successfully by removing IDE disks with force_power_off" ####### ##### CRUD operation for SATA disks - name: Update VM by adding SATA disks with force_power_off @@ -383,8 +429,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SATA disks with force_power_off' - success_msg: ' VM updated successfully by adding SATA disks with force_power_off' + fail_msg: " Unable to update vm by adding SATA disks with force_power_off" + success_msg: " VM updated successfully by adding SATA disks with force_power_off" - name: Update VM by increasing the size of the SATA disks with force_power_off ntnx_vms: @@ -404,8 +450,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SATA disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the SATA disks with force_power_off' + fail_msg: " Unable to update vm by increasing the size of the SATA disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the SATA disks with force_power_off" - name: Update VM by removing SATA disks with force_power_off ntnx_vms: @@ -424,8 +470,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SATA disks with force_power_off' - success_msg: ' VM updated successfully by removing SATA disks with force_power_off ' + fail_msg: " Unable to update vm by removing SATA disks with force_power_off" + success_msg: " VM updated successfully by removing SATA disks with force_power_off " # #################################################################### - debug: @@ -452,8 +498,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding subnets ' - success_msg: ' VM updated successfully by adding subnets' + fail_msg: " Unable to update vm by adding subnets " + success_msg: " VM updated successfully by adding subnets" - name: Update VM by editing a subnet is_connected ntnx_vms: @@ -474,8 +520,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by editing a subnet ' - success_msg: ' VM updated successfully by editing a subnet ' + fail_msg: " Unable to update vm by editing a subnet " + success_msg: " VM updated successfully by editing a subnet " - name: Update VM by change the private ip for subnet ntnx_vms: @@ -496,8 +542,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing private_ip for subnet ' - success_msg: ' VM updated successfully by editing private_ip for subnet' + fail_msg: " Unable to update vm by editing private_ip for subnet " + success_msg: " VM updated successfully by editing private_ip for subnet" - name: Update VM by change vlan subnet ntnx_vms: @@ -522,8 +568,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing a subnet vlan ' - success_msg: ' VM updated successfully by editing a subnet vlan ' + fail_msg: " Unable to update vm by editing a subnet vlan " + success_msg: " VM updated successfully by editing a subnet vlan " - name: Update VM by deleting a subnet ntnx_vms: @@ -543,8 +589,9 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by deleting a subnet ' - success_msg: ' VM updated successfully by deleting a subnet ' + fail_msg: " Unable to update vm by deleting a subnet " + success_msg: " VM updated successfully by deleting a subnet " + # #################################################################### - name: Update VM by deleting it @@ -560,5 +607,5 @@ - result.response.status == 'SUCCEEDED' - result.vm_uuid - result.task_uuid - fail_msg: 'Fail: Unable to delete created vm ' - success_msg: 'Success: Vm deleted sucessfully' + fail_msg: "Fail: Unable to delete created vm " + success_msg: "Success: Vm deleted sucessfully" From 5360e2be7e8d84dcf5091f796a413b68cd5a938b Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Wed, 2 Oct 2024 10:23:20 +0300 Subject: [PATCH 10/15] Remove ansible lint from workflows --- .github/workflows/ansible-lint.yml | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 .github/workflows/ansible-lint.yml diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml deleted file mode 100644 index 0f4591dc8..000000000 --- a/.github/workflows/ansible-lint.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: ansible-lint -on: - - pull_request - -jobs: - build: - name: Ansible Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.x (latest) - uses: actions/setup-python@v2 - with: - python-version: 3.x - - name: Run ansible-lint - uses: ansible/ansible-lint@main From 1e75a6169cbce85c0ea5c1d4d689ed95596a24c4 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Mon, 7 Oct 2024 11:38:06 +0300 Subject: [PATCH 11/15] Adding imprv where user can group VMs by project name --- plugins/inventory/ntnx_prism_vm_inventory.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index 4a085861f..c6100813e 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -184,6 +184,12 @@ def parse(self, inventory, loader, path, cache=True): self.inventory.set_variable( vm_name, "ntnx_categories", entity["metadata"]["categories"] ) + if "metadata" in entity and "project_reference" in entity["metadata"]: + self.inventory.set_variable( + vm_name, + "ntnx_project", + entity["metadata"]["project_reference"]["name"], + ) # Add variables created by the user's Jinja2 expressions to the host self._set_composite_vars( From 9dbfa5b751c8e8e73d740a0612d7b74303942df7 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Mon, 7 Oct 2024 15:22:12 +0300 Subject: [PATCH 12/15] Resolving comments --- plugins/inventory/ntnx_prism_vm_inventory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index c6100813e..1cceef089 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -187,8 +187,8 @@ def parse(self, inventory, loader, path, cache=True): if "metadata" in entity and "project_reference" in entity["metadata"]: self.inventory.set_variable( vm_name, - "ntnx_project", - entity["metadata"]["project_reference"]["name"], + "project_reference", + entity.get("metadata", {}).get("project_reference", {}), ) # Add variables created by the user's Jinja2 expressions to the host From c8b089b7fc9f6c6f71c6be7073ba35c4ed5711d0 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Mon, 7 Oct 2024 15:30:46 +0300 Subject: [PATCH 13/15] Revert "Adding imprv where user can group VMs by project name" This reverts commit 1e75a6169cbce85c0ea5c1d4d689ed95596a24c4. --- plugins/inventory/ntnx_prism_vm_inventory.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index c6100813e..4a085861f 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -184,12 +184,6 @@ def parse(self, inventory, loader, path, cache=True): self.inventory.set_variable( vm_name, "ntnx_categories", entity["metadata"]["categories"] ) - if "metadata" in entity and "project_reference" in entity["metadata"]: - self.inventory.set_variable( - vm_name, - "ntnx_project", - entity["metadata"]["project_reference"]["name"], - ) # Add variables created by the user's Jinja2 expressions to the host self._set_composite_vars( From 3a62c53fe7f465c40b2fee22b8ae48825d0bc94e Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Mon, 7 Oct 2024 15:41:09 +0300 Subject: [PATCH 14/15] Adding lint checks in pull requests pipelines --- .github/workflows/ansible-lint.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/ansible-lint.yml diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml new file mode 100644 index 000000000..7d3ecdd6f --- /dev/null +++ b/.github/workflows/ansible-lint.yml @@ -0,0 +1,22 @@ +--- +name: ansible-lint +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + build: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.x (latest) + uses: actions/setup-python@v2 + with: + python-version: 3.x + - name: Run ansible-lint + uses: ansible/ansible-lint@main From dbfd5e160ade34b886c67c9cb64fcf3e59ef9722 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Tue, 8 Oct 2024 08:42:47 +0300 Subject: [PATCH 15/15] resolving comments --- plugins/inventory/ntnx_prism_vm_inventory.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index 1cceef089..9b079e6f8 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -184,12 +184,11 @@ def parse(self, inventory, loader, path, cache=True): self.inventory.set_variable( vm_name, "ntnx_categories", entity["metadata"]["categories"] ) - if "metadata" in entity and "project_reference" in entity["metadata"]: - self.inventory.set_variable( - vm_name, - "project_reference", - entity.get("metadata", {}).get("project_reference", {}), - ) + self.inventory.set_variable( + vm_name, + "project_reference", + entity.get("metadata", {}).get("project_reference", {}), + ) # Add variables created by the user's Jinja2 expressions to the host self._set_composite_vars(