diff --git a/bibigrid/core/actions/create.py b/bibigrid/core/actions/create.py index d9827a43..07139b95 100644 --- a/bibigrid/core/actions/create.py +++ b/bibigrid/core/actions/create.py @@ -298,14 +298,16 @@ def create_server_volumes(self, provider, instance, name): """ Creates all volumes of a single instance @param provider: - @param instance: - @param name: + @param instance: flavor, image, ... description + @param name: sever name @return: """ self.log.info("Creating volumes ...") return_volumes = [] for i, volume in enumerate(instance.get("volumes", [])): + group_instance = {"volumes": []} + instance["group_instances"] = {name: group_instance} if not volume.get("exists"): if volume.get("permanent"): infix = "perm" @@ -314,20 +316,23 @@ def create_server_volumes(self, provider, instance, name): else: infix = "tmp" postfix = f"-{volume.get('name')}" if volume.get('name') else '' - volume["name"] = f"{name}-{infix}-{i}{postfix}" + volume_name = f"{name}-{infix}-{i}{postfix}" + else: + volume_name = volume["name"] + group_instance["volumes"].append({**volume, "name": volume_name}) - self.log.debug(f"Trying to find volume {volume['name']}") - return_volume = provider.get_volume_by_id_or_name(volume["name"]) + self.log.debug(f"Trying to find volume {volume_name}") + return_volume = provider.get_volume_by_id_or_name(volume_name) if not return_volume: - self.log.debug(f"Volume {volume['name']} not found.") + self.log.debug(f"Volume {volume_name} not found.") if volume.get('snapshot'): self.log.debug("Creating volume from snapshot...") - return_volume = provider.create_volume_from_snapshot(volume['snapshot'], volume["name"]) + return_volume = provider.create_volume_from_snapshot(volume['snapshot'], volume_name) if not return_volume: raise ConfigurationException(f"Snapshot {volume['snapshot']} not found!") else: self.log.debug("Creating volume...") - return_volume = provider.create_volume(name=volume["name"], size=volume.get("size", 50), + return_volume = provider.create_volume(name=volume_name, size=volume.get("size", 50), volume_type=volume.get("type"), description=f"Created for {name}") return_volumes.append(return_volume) @@ -345,10 +350,10 @@ def add_volume_device_info_to_instance(self, provider, server, instance): """ self.log.info("Adding device info") server_volumes = provider.get_mount_info_from_server(server) # list of volumes attachments - volumes = instance.get("volumes") + group_instance_volumes = instance["group_instances"][server["name"]].get("volumes") final_volumes = [] - if volumes: - for volume in volumes: + if group_instance_volumes: + for volume in group_instance_volumes: server_volume = next((server_volume for server_volume in server_volumes if server_volume["name"] == volume["name"]), None) if not server_volume: diff --git a/bibigrid/core/startup.py b/bibigrid/core/startup.py index 7f5c4792..71b659ec 100755 --- a/bibigrid/core/startup.py +++ b/bibigrid/core/startup.py @@ -27,7 +27,6 @@ def get_cluster_id_from_mem(): @return: cluster_id. If no mem file can be found, the file is not a valid yaml file or doesn't contain a cluster_id, it returns none. """ - print(create.CLUSTER_MEMORY_PATH) if os.path.isfile(create.CLUSTER_MEMORY_PATH): try: with open(create.CLUSTER_MEMORY_PATH, mode="r", encoding="UTF-8") as cluster_memory_file: diff --git a/bibigrid/core/utility/ansible_configurator.py b/bibigrid/core/utility/ansible_configurator.py index b24f4f93..83efe4a0 100644 --- a/bibigrid/core/utility/ansible_configurator.py +++ b/bibigrid/core/utility/ansible_configurator.py @@ -55,27 +55,26 @@ def generate_site_file_yaml(user_roles): return site_yaml -def write_worker_host_vars(*, cluster_id, worker, worker_dict, worker_count, log): - if worker_dict["on_demand"]: - for worker_number in range(worker.get('count', 1)): - name = create.WORKER_IDENTIFIER(cluster_id=cluster_id, additional=worker_count + worker_number) - write_volumes = [] - for i, volume in enumerate(worker.get("volumes", [])): - if not volume.get("exists"): - if volume.get("permanent"): - infix = "perm" - elif volume.get("semiPermanent"): - infix = "semiperm" - else: - infix = "tmp" - postfix = f"-{volume.get('name')}" if volume.get('name') else '' - volume_name = f"{name}-{infix}-{i}{postfix}" +def write_worker_host_vars(*, cluster_id, worker, worker_count, log): + for worker_number in range(worker.get('count', 1)): + name = create.WORKER_IDENTIFIER(cluster_id=cluster_id, additional=worker_count + worker_number) + write_volumes = [] + for i, volume in enumerate(worker.get("volumes", [])): + if not volume.get("exists"): + if volume.get("permanent"): + infix = "perm" + elif volume.get("semiPermanent"): + infix = "semiperm" else: - volume_name = volume["name"] - write_volumes.append({**volume, "name": volume_name}) - write_yaml(os.path.join(aRP.HOST_VARS_FOLDER, f"{name}.yaml"), - {"volumes": write_volumes}, - log) + infix = "tmp" + postfix = f"-{volume.get('name')}" if volume.get('name') else '' + volume_name = f"{name}-{infix}-{i}{postfix}" + else: + volume_name = volume["name"] + write_volumes.append({**volume, "name": volume_name}) + write_yaml(os.path.join(aRP.HOST_VARS_FOLDER, f"{name}.yaml"), + {"volumes": write_volumes}, + log) def write_worker_vars(*, provider, configuration, cluster_id, worker, worker_count, log): @@ -103,9 +102,12 @@ def write_worker_vars(*, provider, configuration, cluster_id, worker, worker_cou pass_through(configuration, worker_dict, "waitForServices", "wait_for_services") write_yaml(os.path.join(aRP.GROUP_VARS_FOLDER, f"{group_name}.yaml"), worker_dict, log) - write_worker_host_vars(cluster_id=cluster_id, worker=worker, worker_dict=worker_dict, worker_count=worker_count, - log=log) + if worker_dict["on_demand"]: # not on demand instances host_vars are created in create + write_worker_host_vars(cluster_id=cluster_id, worker=worker, worker_count=worker_count, + log=log) worker_count += worker.get('count', 1) + return worker_count + def write_vpn_var(*, provider, configuration, cluster_id, vpngtw, vpn_count, log): name = create.VPN_WORKER_IDENTIFIER(cluster_id=cluster_id, additional=f"{vpn_count}") @@ -158,8 +160,8 @@ def write_host_and_group_vars(configurations, providers, cluster_id, log): vpn_count = 0 for configuration, provider in zip(configurations, providers): # pylint: disable=too-many-nested-blocks for worker in configuration.get("workerInstances", []): - write_worker_vars(provider=provider, configuration=configuration, cluster_id=cluster_id, worker=worker, - worker_count=worker_count, log=log) + worker_count = write_worker_vars(provider=provider, configuration=configuration, cluster_id=cluster_id, + worker=worker, worker_count=worker_count, log=log) vpngtw = configuration.get("vpnInstance") if vpngtw: @@ -184,7 +186,8 @@ def pass_through(dict_from, dict_to, key_from, key_to=None): dict_to[key_to] = dict_from[key_from] -def generate_common_configuration_yaml(cidrs, configurations, cluster_id, ssh_user, default_user, log): # pylint: disable=too-many-positional-arguments +def generate_common_configuration_yaml(cidrs, configurations, cluster_id, ssh_user, default_user, + log): # pylint: disable=too-many-positional-arguments """ Generates common_configuration yaml (dict) @param cidrs: str subnet cidrs (provider generated) diff --git a/resources/playbook/roles/bibigrid/tasks/020-disk-server.yaml b/resources/playbook/roles/bibigrid/tasks/020-disk-server.yaml index 7eb18839..17bc4b51 100644 --- a/resources/playbook/roles/bibigrid/tasks/020-disk-server.yaml +++ b/resources/playbook/roles/bibigrid/tasks/020-disk-server.yaml @@ -16,8 +16,3 @@ with_items: - "{{ master.disks }}" when: master.disks is defined - -- name: Automount - when: volumes is defined - include_tasks: 020-disk-automount.yaml - with_items: "{{ volumes }}"