Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Additional partitions #498

Merged
merged 8 commits into from
Jun 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 11 additions & 8 deletions bibigrid/core/actions/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,6 @@ def get_identifier(identifier, cluster_id, additional=""):
WIREGUARD_SECURITY_GROUP_NAME = "wireguard" + SEPARATOR + "{cluster_id}"


def create_defaults():
if not os.path.isfile(a_rp.ANSIBLE_CFG_PATH):
shutil.copy(a_rp.ANSIBLE_CFG_DEFAULT_PATH, a_rp.ANSIBLE_CFG_PATH)
if not os.path.isfile(a_rp.SLURM_CONF_TEMPLATE_PATH):
shutil.copy(a_rp.SLURM_CONF_TEMPLATE_DEFAULT_PATH, a_rp.SLURM_CONF_TEMPLATE_PATH)


class Create: # pylint: disable=too-many-instance-attributes,too-many-arguments
"""
The class Create holds necessary methods to execute the Create-Action
Expand Down Expand Up @@ -108,6 +101,16 @@ def __init__(self, providers, configurations, config_path, log, debug=False, clu
"useMasterWithPublicIp", True)
self.log.debug("Keyname: %s", self.key_name)

def create_defaults(self):
self.log.debug("Creating default files")
if not self.configurations[0].get("customAnsibleCfg", False) or not os.path.isfile(a_rp.ANSIBLE_CFG_PATH):
self.log.debug("Copying ansible.cfg")
shutil.copy(a_rp.ANSIBLE_CFG_DEFAULT_PATH, a_rp.ANSIBLE_CFG_PATH)
if not self.configurations[0].get("customSlurmConf", False) or not os.path.isfile(
a_rp.SLURM_CONF_TEMPLATE_PATH):
self.log.debug("Copying slurm.conf")
shutil.copy(a_rp.SLURM_CONF_TEMPLATE_DEFAULT_PATH, a_rp.SLURM_CONF_TEMPLATE_PATH)

def generate_keypair(self):
"""
Generates ECDSA Keypair using system-function ssh-keygen and uploads the generated public key to providers.
Expand Down Expand Up @@ -427,7 +430,7 @@ def create(self): # pylint: disable=too-many-branches,too-many-statements
try:
self.generate_keypair()
self.prepare_configurations()
create_defaults()
self.create_defaults()
self.generate_security_groups()
self.start_start_server_threads()
self.extended_network_configuration()
Expand Down
28 changes: 6 additions & 22 deletions bibigrid/core/utility/ansible_configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def generate_site_file_yaml(user_roles):
host_dict["vars_files"] = host_dict["vars_files"] + user_role.get("varsFiles", [])
host_dict["roles"] = host_dict["roles"] + [{"role": role["name"], "tags": role.get("tags", [])} for role
in user_role["roles"]]

return site_yaml


Expand Down Expand Up @@ -96,14 +97,16 @@ def write_host_and_group_vars(configurations, providers, cluster_id, log): # py
"network": configuration["network"], "flavor": flavor_dict,
"gateway_ip": configuration["private_v4"],
"cloud_identifier": configuration["cloud_identifier"],
"on_demand": worker.get("onDemand", True)}
"on_demand": worker.get("onDemand", True),
"partitions": worker.get("partitions", []) + ["all", configuration["cloud_identifier"]]}

worker_features = worker.get("features", [])
if isinstance(worker_features, str):
worker_features = [worker_features]
features = set(configuration_features + worker_features)
if features:
worker_dict["features"] = features

pass_through(configuration, worker_dict, "waitForServices", "wait_for_services")
write_yaml(os.path.join(aRP.GROUP_VARS_FOLDER, group_name), worker_dict, log)
vpngtw = configuration.get("vpnInstance")
Expand Down Expand Up @@ -135,7 +138,8 @@ def write_host_and_group_vars(configurations, providers, cluster_id, log): # py
"flavor": flavor_dict, "private_v4": configuration["private_v4"],
"cloud_identifier": configuration["cloud_identifier"], "volumes": configuration["volumes"],
"fallback_on_other_image": configuration.get("fallbackOnOtherImage", False),
"on_demand": False}
"on_demand": False,
"partitions": master.get("partitions", []) + ["all", configuration["cloud_identifier"]]}
if configuration.get("wireguard_peer"):
master_dict["wireguard"] = {"ip": "10.0.0.1", "peer": configuration.get("wireguard_peer")}
pass_through(configuration, master_dict, "waitForServices", "wait_for_services")
Expand Down Expand Up @@ -277,26 +281,6 @@ def get_cidrs(configurations):
return all_cidrs


def get_ansible_roles(ansible_roles, log):
"""
Checks if ansible_roles have all necessary values and returns True if so.
@param ansible_roles: ansible_roles from master configuration (first configuration)
@param log:
@return: list of valid ansible_roles
"""
ansible_roles_yaml = []
for ansible_role in (ansible_roles or []):
if ansible_role.get("file") and ansible_role.get("hosts"):
ansible_role_dict = {"file": ansible_role["file"], "hosts": ansible_role["hosts"]}
for key in ["name", "vars", "vars_file"]:
if ansible_role.get(key):
ansible_role_dict[key] = ansible_role[key]
ansible_roles_yaml.append(ansible_role_dict)
else:
log.warning("Ansible role %s had neither galaxy,git nor url. Not added.", ansible_role)
return ansible_roles_yaml


def get_ansible_galaxy_roles(ansible_galaxy_roles, log):
"""
Checks if ansible_galaxy_role have all necessary values and adds it to the return list if so.
Expand Down
105 changes: 44 additions & 61 deletions documentation/markdown/features/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,21 +48,36 @@ sshPublicKeyFiles:
Defines the number of attempts that BiBiGrid will try to connect to the master instance via ssh.
Attempts have a pause of `2^(attempts+2)` seconds in between. Default value is 4.

#### customAnsibleCfg (optional:False)
When False, changes in the resources/playbook/ansible.cfg are overwritten by the create action.
When True, changes are kept - even when you perform a git pull as the file is not tracked. The default can be found at
resources/default/ansible/ansible.cfg.

#### customSlurmTemplate (optional:False)
When False, changes in the resources/playbook/roles/bibigrid/templates/slurm.j2 are overwritten by the create action.
When True, changes are kept - even when you perform a git pull as the file is not tracked. The default can be found at
resources/default/slurm/slurm.j2.

#### cloudScheduling (optional)
This key allows you to influence cloud scheduling. Currently, only a single key `sshTimeout` can be set here.

##### sshTimeout (optional)
##### sshTimeout (optional:4)
Defines the number of attempts that the master will try to connect to on demand created worker instances via ssh.
Attempts have a pause of `2^(attempts+2)` seconds in between. Default value is 4.

#### autoMount (optional)
```yaml
cloudScheduling:
sshTimeout: 4
```

#### autoMount (optional:False)
> **Warning:** If a volume has an obscure filesystem, this might overwrite your data!

If `True` all [masterMounts](#mastermounts-optional) will be automatically mounted by BiBiGrid if possible.
If a volume is not formatted or has an unknown filesystem, it will be formatted to `ext4`.
Default `False`.

#### masterMounts (optional)
#### masterMounts (optional:False)

`masterMounts` expects a list of volumes and snapshots. Those will be attached to the master. If any snapshots are
given, volumes are first created from them. Volumes are not deleted after Cluster termination.
Expand Down Expand Up @@ -108,19 +123,19 @@ userRoles: # see ansible_hosts for all options
# - file1
```

#### localFS (optional)
#### localFS (optional:False)

In general, this key is ignored.
It expects `True` or `False` and helps some specific users to create a filesystem to their liking. Default is `False`.

#### localDNSlookup (optional)
#### localDNSlookup (optional:False)

If `True`, master will store DNS information for his workers. Default is `False`.
[More information](https://helpdeskgeek.com/networking/edit-hosts-file/).

#### slurm
#### slurm (optional:True)
If `False`, the cluster will start without the job scheduling system slurm.
This is relevant to the fewest. Default is `True`.
For nearly all cases the default value is what you need. Default is `True`.

##### slurmConf (optional)
`slurmConf` contains variable fields in the `slurm.conf`. The most common use is to increase the `SuspendTime`
Expand All @@ -147,24 +162,24 @@ slurmConf:
TreeWidth: 128 # https://slurm.schedmd.com/slurm.conf.html#OPT_TreeWidth
```

#### zabbix (optional)
#### zabbix (optional:False)

If `True`, the monitoring solution [zabbix](https://www.zabbix.com/) will be installed on the master. Default is `False`.

#### nfs (optional)
#### nfs (optional:False)

If `True`, [nfs](../software/nfs.md) is set up. Default is `False`.

#### ide (optional)
#### ide (optional:False)

If `True`, [Theia Web IDE](../software/theia_ide.md) is installed.
After creation connection information is [printed](../features/create.md#prints-cluster-information).
After creation connection information is [printed](../features/create.md#prints-cluster-information). Default is `False`.

#### useMasterAsCompute (optional)
#### useMasterAsCompute (optional:True)

If `False`, master will no longer help workers to process jobs. Default is `True`.

#### useMasterWithPublicIP (optional)
#### useMasterWithPublicIP (optional:True)

If `False`, master will not be created with an attached floating ip. Default is `True`.

Expand Down Expand Up @@ -200,7 +215,7 @@ listed on [de.NBI Wiki](https://cloud.denbi.de/wiki/) at `Computer Center Specif
`infrastructure` sets the used provider implementation for this configuration. Currently only `openstack` is available.
Other infrastructures would be [AWS](https://aws.amazon.com/) and so on.

#### cloud
#### cloud (required)

`cloud` decides which entry in the `clouds.yaml` is used. When using OpenStack the entry is named `openstack`.
You can read more about the `clouds.yaml` [here](cloud_specification_data.md).
Expand All @@ -216,12 +231,20 @@ workerInstance:
image: Ubuntu 22.04 LTS (2022-10-14)
count: 2
onDemand: True # optional only on master cloud for now. Default True.
partitions: # optional. Always adds "all" and the cloud identifier as partitions
- small
- onDemand
features: # optional
- hasdatabase
- holdsinformation
```

- `type` sets the instance's hardware configuration.
- `image` sets the bootable operating system to be installed on the instance.
- `count` sets how many workers of that `type` `image` combination are in this work group
- `onDemand` defines whether nodes in the worker group are scheduled on demand (True) or are started permanently (False). Please only use if necessary. On Demand Scheduling improves resource availability for all users. This option only works on the master cloud for now.
- `partitions` allow you to force Slurm to schedule to a group of nodes (partitions) ([more](https://slurm.schedmd.com/slurm.conf.html#SECTION_PARTITION-CONFIGURATION))
- `features` allow you to force Slurm to schedule a job only on nodes that meet certain `bool` constraints. This can be helpful when only certain nodes can access a specific resource - like a database ([more](https://slurm.schedmd.com/slurm.conf.html#OPT_Features)).

##### Find your active `images`

Expand All @@ -248,25 +271,6 @@ There's also a [Fallback Option](#fallbackonotherimage-optional).
openstack flavor list --os-cloud=openstack
```

##### features (optional)
You can declare a list of features for a worker group. Those are then attached to each node in the worker group.
For example:
```yaml
workerInstance:
- type: de.NBI tiny
image: Ubuntu 22.04 LTS (2022-10-14)
count: 2
features:
- hasdatabase
- holdsinformation
```

###### What's a feature?
Features allow you to force Slurm to schedule a job only on nodes that meet a certain `bool` constraint.
This can be helpful when only certain nodes can access a specific resource - like a database.

If you would like to know more about how features exactly work,
take a look at [slurm's documentation](https://slurm.schedmd.com/slurm.conf.html#OPT_Features).

#### Master or vpngtw?

Expand Down Expand Up @@ -301,8 +305,8 @@ Exactly one in every configuration but the first:
image: Ubuntu 22.04 LTS (2022-10-14) # regex allowed
```

### fallbackOnOtherImage (optional)
If set to `true` and an image is not among the active images,
### fallbackOnOtherImage (optional:False)
If set to `True` and an image is not among the active images,
BiBiGrid will try to pick a fallback image for you by finding the closest active image by name that has at least 60% name overlap.
This will not find a good fallback every time.

Expand All @@ -318,28 +322,6 @@ and can be helpful to when image updates occur while running a cluster.

`sshUser` is the standard user of the installed images. For `Ubuntu 22.04` this would be `ubuntu`.

#### region (required)

Every [region](https://docs.openstack.org/python-openstackclient/rocky/cli/command-objects/region.html) has its own
openstack deployment. Every [avilability zone](#availabilityzone-required) belongs to a region.

Find your `regions`:

```commandline
openstack region list --os-cloud=openstack
```

#### availabilityZone (required)

[availability zones](https://docs.openstack.org/nova/latest/admin/availability-zones.html) allow to logically group
nodes.

Find your `availabilityZones`:

```commandline
openstack region list --os-cloud=openstack
```

#### subnet (required)

`subnet` is a block of ip addresses.
Expand All @@ -350,13 +332,14 @@ Find available `subnets`:
openstack subnet list --os-cloud=openstack
```

#### localDNSLookup (optional)
#### localDNSLookup (optional:False)

If no full DNS service for started instances is available, set `localDNSLookup: True`.
Currently, the case in Berlin, DKFZ, Heidelberg and Tuebingen.

#### features (optional)

You can declare a list of [features](#whats-a-feature) that are then attached to every node in the configuration.
If both [worker group](#features-optional) or [master features](#masterInstance) and configuration features are defined,
they are merged.
You can declare a list of cloud-wide [features](#whats-a-feature) that are then attached to every node in the cloud described by the configuration.
If both [worker group](#workerinstances) or [master features](#masterInstance) and configuration features are defined,
they are merged. If you only have a single cloud and therefore a single configuration, this key is not helpful as a feature
that is present at all nodes can be omitted as it can't influence the scheduling.
2 changes: 1 addition & 1 deletion resources/defaults/ansible/ansible.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This file is moved programmatically to /etc/ansible/ansible.cfg on the master so it shouldn't be moved manually
[defaults]
roles_path = "/opt/playbook/roles:/opt/playbook/roles_galaxy:/opt/playbook/roles_user"
roles_path = /opt/playbook/roles:/opt/playbook/roles_galaxy:/opt/playbook/roles_user
inventory = ./ansible_hosts
host_key_checking = False
forks=50
Expand Down
34 changes: 20 additions & 14 deletions resources/defaults/slurm/slurm.j2
Original file line number Diff line number Diff line change
Expand Up @@ -64,25 +64,31 @@ SlurmdDebug=info
SlurmdLogFile=/var/log/slurm/slurmd.log

# COMPUTE NODES
{% set sl = {} %}
{% set all = {"nodes":[]} %}
{% set partitions = {} %}
{% set exclude_groups = [] %}
{% set master_or_empty = groups.master if use_master_as_compute else [] %}
{% for node_name in master_or_empty +groups.workers %}
{% set node_groups = [] %}
{% for node_name in master_or_empty + groups.workers %}
{% set node = hostvars[node_name] %}
{% set mem = node.flavor.ram // 1024 * 1000 %}
{% if node.cloud_identifier not in sl %}
{{ sl.update({node.cloud_identifier: []}) }}
{% if node.name not in node_groups %}
{% if not node.on_demand %}
{% set _ = exclude_groups.append(node.name) %}
{% endif %}
{% if node.name not in sl[node.cloud_identifier] %}
NodeName={{ node.name }} SocketsPerBoard={{ node.flavor.vcpus }} CoresPerSocket=1 RealMemory={{ mem - [mem // 2, 16000] | min }} State={{'CLOUD' if node.on_demand else 'UNKNOWN'}} {{"Features="+node.features|join(",") if node.features is defined}}# {{ node.cloud_identifier }}
{{ sl[node.cloud_identifier].append(node.name)}}
{{ all.nodes.append(node.name)}}
{% set _ = node_groups.append(node.name) %}
{% set mem = (node.flavor.ram // 1024) * 1000 %}
NodeName={{ node.name }} SocketsPerBoard={{ node.flavor.vcpus }} CoresPerSocket=1 RealMemory={{ mem - [mem // 2, 16000] | min }} State={{ 'CLOUD' if node.on_demand else 'UNKNOWN' }} {{"Features=" + (node.features | join(",")) if node.features is defined }}# {{ node.cloud_identifier }}
{% for partition in node.partitions %}
{% if partition not in partitions %}
{% set _ = partitions.update({partition: []}) %}
{% endif %}
{% set _ = partitions[partition].append(node.name) %}
{% endfor %}
{% for key,value in sl.items() %}
PartitionName={{ key }} Nodes={{ value|join(",") }}
{% endif %}
{% endfor %}

{% for key, value in partitions.items() %}
PartitionName={{ key }} Nodes={{ value | join(",") }}
{% endfor %}
PartitionName=All Nodes = {{ all.nodes|join(",") }} default=yes

# JobSubmitPlugin
JobSubmitPlugins=all_partitions
Expand All @@ -95,7 +101,7 @@ SuspendProgram=/opt/slurm/terminate.sh
# Suspend time is 10 minutes (600 seconds)
SuspendTime= {{ slurm_conf.elastic_scheduling.SuspendTime }}
# Excludes {{ hostvars[groups.master.0].name }} from suspend
SuspendExcNodes={{ hostvars[groups.master.0].name }}
SuspendExcNodes={{ exclude_groups | join(',') }}
# Maximum number of nodes
TreeWidth= {{ slurm_conf.elastic_scheduling.TreeWidth }}
# Do not cache dns names
Expand Down
Loading
Loading