Skip to content

Commit

Permalink
final bugfixes
Browse files Browse the repository at this point in the history
  • Loading branch information
XaverStiensmeier committed Dec 2, 2024
1 parent bfb5e45 commit 496f78f
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 42 deletions.
27 changes: 16 additions & 11 deletions bibigrid/core/actions/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,14 +298,16 @@ def create_server_volumes(self, provider, instance, name):
"""
Creates all volumes of a single instance
@param provider:
@param instance:
@param name:
@param instance: flavor, image, ... description
@param name: sever name
@return:
"""
self.log.info("Creating volumes ...")
return_volumes = []

for i, volume in enumerate(instance.get("volumes", [])):
group_instance = {"volumes": []}
instance["group_instances"] = {name: group_instance}
if not volume.get("exists"):
if volume.get("permanent"):
infix = "perm"
Expand All @@ -314,20 +316,23 @@ def create_server_volumes(self, provider, instance, name):
else:
infix = "tmp"
postfix = f"-{volume.get('name')}" if volume.get('name') else ''
volume["name"] = f"{name}-{infix}-{i}{postfix}"
volume_name = f"{name}-{infix}-{i}{postfix}"
else:
volume_name = volume["name"]
group_instance["volumes"].append({**volume, "name": volume_name})

self.log.debug(f"Trying to find volume {volume['name']}")
return_volume = provider.get_volume_by_id_or_name(volume["name"])
self.log.debug(f"Trying to find volume {volume_name}")
return_volume = provider.get_volume_by_id_or_name(volume_name)
if not return_volume:
self.log.debug(f"Volume {volume['name']} not found.")
self.log.debug(f"Volume {volume_name} not found.")
if volume.get('snapshot'):
self.log.debug("Creating volume from snapshot...")
return_volume = provider.create_volume_from_snapshot(volume['snapshot'], volume["name"])
return_volume = provider.create_volume_from_snapshot(volume['snapshot'], volume_name)
if not return_volume:
raise ConfigurationException(f"Snapshot {volume['snapshot']} not found!")
else:
self.log.debug("Creating volume...")
return_volume = provider.create_volume(name=volume["name"], size=volume.get("size", 50),
return_volume = provider.create_volume(name=volume_name, size=volume.get("size", 50),
volume_type=volume.get("type"),
description=f"Created for {name}")
return_volumes.append(return_volume)
Expand All @@ -345,10 +350,10 @@ def add_volume_device_info_to_instance(self, provider, server, instance):
"""
self.log.info("Adding device info")
server_volumes = provider.get_mount_info_from_server(server) # list of volumes attachments
volumes = instance.get("volumes")
group_instance_volumes = instance["group_instances"][server["name"]].get("volumes")
final_volumes = []
if volumes:
for volume in volumes:
if group_instance_volumes:
for volume in group_instance_volumes:
server_volume = next((server_volume for server_volume in server_volumes if
server_volume["name"] == volume["name"]), None)
if not server_volume:
Expand Down
1 change: 0 additions & 1 deletion bibigrid/core/startup.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def get_cluster_id_from_mem():
@return: cluster_id. If no mem file can be found, the file is not a valid yaml file or doesn't contain a cluster_id,
it returns none.
"""
print(create.CLUSTER_MEMORY_PATH)
if os.path.isfile(create.CLUSTER_MEMORY_PATH):
try:
with open(create.CLUSTER_MEMORY_PATH, mode="r", encoding="UTF-8") as cluster_memory_file:
Expand Down
53 changes: 28 additions & 25 deletions bibigrid/core/utility/ansible_configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,27 +55,26 @@ def generate_site_file_yaml(user_roles):
return site_yaml


def write_worker_host_vars(*, cluster_id, worker, worker_dict, worker_count, log):
if worker_dict["on_demand"]:
for worker_number in range(worker.get('count', 1)):
name = create.WORKER_IDENTIFIER(cluster_id=cluster_id, additional=worker_count + worker_number)
write_volumes = []
for i, volume in enumerate(worker.get("volumes", [])):
if not volume.get("exists"):
if volume.get("permanent"):
infix = "perm"
elif volume.get("semiPermanent"):
infix = "semiperm"
else:
infix = "tmp"
postfix = f"-{volume.get('name')}" if volume.get('name') else ''
volume_name = f"{name}-{infix}-{i}{postfix}"
def write_worker_host_vars(*, cluster_id, worker, worker_count, log):
for worker_number in range(worker.get('count', 1)):
name = create.WORKER_IDENTIFIER(cluster_id=cluster_id, additional=worker_count + worker_number)
write_volumes = []
for i, volume in enumerate(worker.get("volumes", [])):
if not volume.get("exists"):
if volume.get("permanent"):
infix = "perm"
elif volume.get("semiPermanent"):
infix = "semiperm"
else:
volume_name = volume["name"]
write_volumes.append({**volume, "name": volume_name})
write_yaml(os.path.join(aRP.HOST_VARS_FOLDER, f"{name}.yaml"),
{"volumes": write_volumes},
log)
infix = "tmp"
postfix = f"-{volume.get('name')}" if volume.get('name') else ''
volume_name = f"{name}-{infix}-{i}{postfix}"
else:
volume_name = volume["name"]
write_volumes.append({**volume, "name": volume_name})
write_yaml(os.path.join(aRP.HOST_VARS_FOLDER, f"{name}.yaml"),
{"volumes": write_volumes},
log)


def write_worker_vars(*, provider, configuration, cluster_id, worker, worker_count, log):
Expand Down Expand Up @@ -103,9 +102,12 @@ def write_worker_vars(*, provider, configuration, cluster_id, worker, worker_cou

pass_through(configuration, worker_dict, "waitForServices", "wait_for_services")
write_yaml(os.path.join(aRP.GROUP_VARS_FOLDER, f"{group_name}.yaml"), worker_dict, log)
write_worker_host_vars(cluster_id=cluster_id, worker=worker, worker_dict=worker_dict, worker_count=worker_count,
log=log)
if worker_dict["on_demand"]: # not on demand instances host_vars are created in create
write_worker_host_vars(cluster_id=cluster_id, worker=worker, worker_count=worker_count,
log=log)
worker_count += worker.get('count', 1)
return worker_count


def write_vpn_var(*, provider, configuration, cluster_id, vpngtw, vpn_count, log):
name = create.VPN_WORKER_IDENTIFIER(cluster_id=cluster_id, additional=f"{vpn_count}")
Expand Down Expand Up @@ -158,8 +160,8 @@ def write_host_and_group_vars(configurations, providers, cluster_id, log):
vpn_count = 0
for configuration, provider in zip(configurations, providers): # pylint: disable=too-many-nested-blocks
for worker in configuration.get("workerInstances", []):
write_worker_vars(provider=provider, configuration=configuration, cluster_id=cluster_id, worker=worker,
worker_count=worker_count, log=log)
worker_count = write_worker_vars(provider=provider, configuration=configuration, cluster_id=cluster_id,
worker=worker, worker_count=worker_count, log=log)

vpngtw = configuration.get("vpnInstance")
if vpngtw:
Expand All @@ -184,7 +186,8 @@ def pass_through(dict_from, dict_to, key_from, key_to=None):
dict_to[key_to] = dict_from[key_from]


def generate_common_configuration_yaml(cidrs, configurations, cluster_id, ssh_user, default_user, log): # pylint: disable=too-many-positional-arguments
def generate_common_configuration_yaml(cidrs, configurations, cluster_id, ssh_user, default_user,
log): # pylint: disable=too-many-positional-arguments
"""
Generates common_configuration yaml (dict)
@param cidrs: str subnet cidrs (provider generated)
Expand Down
5 changes: 0 additions & 5 deletions resources/playbook/roles/bibigrid/tasks/020-disk-server.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,3 @@
with_items:
- "{{ master.disks }}"
when: master.disks is defined

- name: Automount
when: volumes is defined
include_tasks: 020-disk-automount.yaml
with_items: "{{ volumes }}"

0 comments on commit 496f78f

Please sign in to comment.