diff --git a/.flake8 b/.flake8
index cdf9353c..5cb0b8fb 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,11 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# author: Kenneth Hoste (@boegel)
+#
+# license: GPLv2
+#
+
[flake8]
max-line-length = 120
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index ec018bd0..96379ba1 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -1,6 +1,11 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
+#
+# author: Alan O'Cais (@ocaisa)
+#
+# license: GPLv2
+#
name: Scorecards supply-chain security
on:
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index 13fcd3a0..660ed2e2 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -1,3 +1,16 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Kenneth Hoste (@boegel)
+# author: Alan O'Cais (@ocaisa)
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
+
name: Run tests
on: [push, pull_request]
# Declare default permissions as read only.
diff --git a/.gitignore b/.gitignore
index 9261c2e8..24e6f0f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,15 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Bob Droege (@bedroge)
+# author: Hafsa Naeem (@hafsa-naeem)
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
__pycache__/
*.py[cod]
*.log
diff --git a/.hound.yml b/.hound.yml
index 09d8c1ba..3de315f5 100644
--- a/.hound.yml
+++ b/.hound.yml
@@ -1,3 +1,13 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Kenneth Hoste (@boegel)
+#
+# license: GPLv2
+#
flake8:
enabled: true
config_file: .flake8
diff --git a/README.md b/README.md
index ca96d2eb..e268fc41 100644
--- a/README.md
+++ b/README.md
@@ -175,7 +175,9 @@ You can exit the virtual environment simply by running `deactivate`.
### Step 4.1: Installing tools to access S3 bucket
-The [`scripts/eessi-upload-to-staging`](https://github.com/EESSI/eessi-bot-software-layer/blob/main/scripts/eessi-upload-to-staging) script uploads a tarball and an associated metadata file to an S3 bucket.
+The
+[`scripts/eessi-upload-to-staging`](https://github.com/EESSI/eessi-bot-software-layer/blob/main/scripts/eessi-upload-to-staging)
+script uploads an artefact and an associated metadata file to an S3 bucket.
It needs two tools for this:
* the `aws` command to actually upload the files;
@@ -444,14 +446,17 @@ information about the result of the command that was run (can be empty).
The `[deploycfg]` section defines settings for uploading built artefacts (tarballs).
```
-tarball_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
+artefact_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
```
-`tarball_upload_script` provides the location for the script used for uploading built software packages to an S3 bucket.
+`artefact_upload_script` provides the location for the script used for uploading built software packages to an S3 bucket.
```
endpoint_url = URL_TO_S3_SERVER
```
-`endpoint_url` provides an endpoint (URL) to a server hosting an S3 bucket. The server could be hosted by a commercial cloud provider like AWS or Azure, or running in a private environment, for example, using Minio. The bot uploads tarballs to the bucket which will be periodically scanned by the ingestion procedure at the Stratum 0 server.
+`endpoint_url` provides an endpoint (URL) to a server hosting an S3 bucket. The
+server could be hosted by a commercial cloud provider like AWS or Azure, or
+running in a private environment, for example, using Minio. The bot uploads
+artefacts to the bucket which will be periodically scanned by the ingestion procedure at the Stratum 0 server.
```ini
@@ -466,7 +471,7 @@ bucket_name = {
}
```
-`bucket_name` is the name of the bucket used for uploading of tarballs.
+`bucket_name` is the name of the bucket used for uploading of artefacts.
The bucket must be available on the default server (`https://${bucket_name}.s3.amazonaws.com`), or the one provided via `endpoint_url`.
`bucket_name` can be specified as a string value to use the same bucket for all target repos, or it can be mapping from target repo id to bucket name.
@@ -481,7 +486,7 @@ The `upload_policy` defines what policy is used for uploading built artefacts to
|`upload_policy` value|Policy|
|:--------|:--------------------------------|
|`all`|Upload all artefacts (mulitple uploads of the same artefact possible).|
-|`latest`|For each build target (prefix in tarball name `eessi-VERSION-{software,init,compat}-OS-ARCH)` only upload the latest built artefact.|
+|`latest`|For each build target (prefix in artefact name `eessi-VERSION-{software,init,compat}-OS-ARCH)` only upload the latest built artefact.|
|`once`|Only once upload any built artefact for the build target.|
|`none`|Do not upload any built artefacts.|
@@ -496,30 +501,30 @@ deployment), or a space delimited list of GitHub accounts.
no_deploy_permission_comment = Label `bot:deploy` has been set by user `{deploy_labeler}`, but this person does not have permission to trigger deployments
```
This defines a message that is added to the status table in a PR comment
-corresponding to a job whose tarball should have been uploaded (e.g., after
+corresponding to a job whose artefact should have been uploaded (e.g., after
setting the `bot:deploy` label).
```
metadata_prefix = LOCATION_WHERE_METADATA_FILE_GETS_DEPOSITED
-tarball_prefix = LOCATION_WHERE_TARBALL_GETS_DEPOSITED
+artefact_prefix = LOCATION_WHERE_TARBALL_GETS_DEPOSITED
```
These two settings are used to define where (which directory) in the S3 bucket
-(see `bucket_name` above) the metadata file and the tarball will be stored. The
+(see `bucket_name` above) the metadata file and the artefact will be stored. The
value `LOCATION...` can be a string value to always use the same 'prefix'
regardless of the target CVMFS repository, or can be a mapping of a target
repository id (see also `repo_target_map` below) to a prefix.
The prefix itself can use some (environment) variables that are set within
-the upload script (see `tarball_upload_script` above). Currently those are:
+the upload script (see `artefact_upload_script` above). Currently those are:
* `'${github_repository}'` (which would be expanded to the full name of the GitHub
repository, e.g., `EESSI/software-layer`),
* `'${legacy_aws_path}'` (which expands to the legacy/old prefix being used for
- storing tarballs/metadata files, the old prefix is
+ storing artefacts/metadata files, the old prefix is
`EESSI_VERSION/TARBALL_TYPE/OS_TYPE/CPU_ARCHITECTURE/TIMESTAMP/`), _and_
* `'${pull_request_number}'` (which would be expanded to the number of the pull
- request from which the tarball originates).
+ request from which the artefact originates).
Note, it's important to single-quote (`'`) the variables as shown above, because
they may likely not be defined when the bot calls the upload script.
@@ -529,7 +534,7 @@ The list of supported variables can be shown by running
**Examples:**
```
metadata_prefix = {"eessi.io-2023.06": "new/${github_repository}/${pull_request_number}"}
-tarball_prefix = {
+artefact_prefix = {
"eessi-pilot-2023.06": "",
"eessi.io-2023.06": "new/${github_repository}/${pull_request_number}"
}
@@ -656,46 +661,6 @@ running_job = job `{job_id}` is running
#### `[finished_job_comments]` section
The `[finished_job_comments]` section sets templates for messages about finished jobs.
-```
-success = :grin: SUCCESS tarball `{tarball_name}` ({tarball_size} GiB) in job dir
-```
-`success` specifies the message for a successful job that produced a tarball.
-
-```
-failure = :cry: FAILURE
-```
-`failure` specifies the message for a failed job.
-
-```
-no_slurm_out = No slurm output `{slurm_out}` in job dir
-```
-`no_slurm_out` specifies the message for missing Slurm output file.
-
-```
-slurm_out = Found slurm output `{slurm_out}` in job dir
-```
-`slurm_out` specifies the message for found Slurm output file.
-
-```
-missing_modules = Slurm output lacks message "No missing modules!".
-```
-`missing_modules` is used to signal the lack of a message that all modules were built.
-
-```
-no_tarball_message = Slurm output lacks message about created tarball.
-```
-`no_tarball_message` is used to signal the lack of a message about a created tarball.
-
-```
-no_matching_tarball = No tarball matching `{tarball_pattern}` found in job dir.
-```
-`no_matching_tarball` is used to signal a missing tarball.
-
-```
-multiple_tarballs = Found {num_tarballs} tarballs in job dir - only 1 matching `{tarball_pattern}` expected.
-```
-`multiple_tarballs` is used to report that multiple tarballs have been found.
-
```
job_result_unknown_fmt = :shrug: UNKNOWN _(click triangle for details)_
- Job results file `{filename}` does not exist in job directory, or parsing it failed.
- No artefacts were found/reported.
```
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index bfc562ae..20080acf 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -1,6 +1,21 @@
This file contains a description of the major changes to the EESSI
build-and-deploy bot. For more detailed information, please see the git log.
+v0.5.0 (16 May 2024)
+--------------------------
+
+This is a minor release of the EESSI build-and-deploy bot.
+
+Improvements:
+* list authors in source code files where applicable (#261)
+* make display of build targets more concise (#262)
+* use _bot_jobJOBID.result files created by bot/check-build.sh when determining
+ which build artefacts should be deployed (#263)
+ * this makes the bot fully agnostic to what it builds
+* reorganise declaration, initialisation and use of string constants _and_ verify
+* that required configuration settings are defined in 'app.cfg' (#266)
+
+
v0.4.0 (28 February 2024)
--------------------------
diff --git a/app.cfg.example b/app.cfg.example
index f01c2be9..ae51ade6 100644
--- a/app.cfg.example
+++ b/app.cfg.example
@@ -1,3 +1,19 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Kenneth Hoste (@boegel)
+# author: Bob Droege (@bedroge)
+# author: Hafsa Naeem (@hafsa-naeem)
+# author: Jonas Qvigstad (@jonas-lq)
+# author: Pedro Santos Neves (@Neves-P)
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
+
# Also see documentation at https://github.com/EESSI/eessi-bot-software-layer/blob/main/README.md#step5.5
[github]
@@ -111,7 +127,7 @@ no_build_permission_comment = Label `bot:build` has been set by user `{build_lab
[deploycfg]
# script for uploading built software packages
-tarball_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
+artefact_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
# URL to S3/minio bucket
# if attribute is set, bucket_base will be constructed as follows
@@ -144,11 +160,11 @@ upload_policy = once
# value can be a space delimited list of GH accounts
deploy_permission =
-# template for comment when user who set a label has no permission to trigger deploying tarballs
+# template for comment when user who set a label has no permission to trigger deploying artefacts
no_deploy_permission_comment = Label `bot:deploy` has been set by user `{deploy_labeler}`, but this person does not have permission to trigger deployments
# settings for where (directory) in the S3 bucket to store the metadata file and
-# the tarball
+# the artefact
# - Can be a string value to always use the same 'prefix' regardless of the target
# CVMFS repository, or can be a mapping of a target repository id (see also
# repo_target_map) to a prefix.
@@ -157,17 +173,17 @@ no_deploy_permission_comment = Label `bot:deploy` has been set by user `{deploy_
# * 'github_repository' (which would be expanded to the full name of the GitHub
# repository, e.g., 'EESSI/software-layer'),
# * 'legacy_aws_path' (which expands to the legacy/old prefix being used for
-# storing tarballs/metadata files) and
+# storing artefacts/metadata files) and
# * 'pull_request_number' (which would be expanded to the number of the pull
-# request from which the tarball originates).
+# request from which the artefact originates).
# - The list of supported variables can be shown by running
# `scripts/eessi-upload-to-staging --list-variables`.
# - Examples:
# metadata_prefix = {"eessi.io-2023.06": "new/${github_repository}/${pull_request_number}"}
-# tarball_prefix = {"eessi-pilot-2023.06": "", "eessi.io-2023.06": "new/${github_repository}/${pull_request_number}"}
+# artefact_prefix = {"eessi-pilot-2023.06": "", "eessi.io-2023.06": "new/${github_repository}/${pull_request_number}"}
# If left empty, the old/legacy prefix is being used.
metadata_prefix =
-tarball_prefix =
+artefact_prefix =
[architecturetargets]
@@ -231,14 +247,6 @@ running_job = job `{job_id}` is running
[finished_job_comments]
-success = :grin: SUCCESS tarball `{tarball_name}` ({tarball_size} GiB) in job dir
-failure = :cry: FAILURE
-no_slurm_out = No slurm output `{slurm_out}` in job dir
-slurm_out = Found slurm output `{slurm_out}` in job dir
-missing_modules = Slurm output lacks message "No missing modules!".
-no_tarball_message = Slurm output lacks message about created tarball.
-no_matching_tarball = No tarball matching `{tarball_pattern}` found in job dir.
-multiple_tarballs = Found {num_tarballs} tarballs in job dir - only 1 matching `{tarball_pattern}` expected.
job_result_unknown_fmt = :shrug: UNKNOWN _(click triangle for detailed information)_
- Job results file `{filename}` does not exist in job directory, or parsing it failed.
- No artefacts were found/reported.
job_test_unknown_fmt = :shrug: UNKNOWN _(click triangle for detailed information)_
- Job test file `{filename}` does not exist in job directory, or parsing it failed.
diff --git a/connections/__init__.py b/connections/__init__.py
index e69de29b..79566b3a 100644
--- a/connections/__init__.py
+++ b/connections/__init__.py
@@ -0,0 +1,10 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Bob Droege (@bedroge)
+#
+# license: GPLv2
+#
diff --git a/connections/github.py b/connections/github.py
index 3037c0fe..37cf205c 100644
--- a/connections/github.py
+++ b/connections/github.py
@@ -5,6 +5,9 @@
# EESSI software layer, see https://github.com/EESSI/software-layer
#
# author: Bob Droege (@bedroge)
+# author: Hafsa Naeem (@hafsa-naeem)
+# author: Jacob Ziemke (@jacobz137)
+# author: Lara Ramona Peeters (@laraPPr)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
@@ -43,10 +46,10 @@ def get_token():
global _token
cfg = config.read_config()
- github_cfg = cfg['github']
- app_id = github_cfg.get('app_id')
- installation_id = github_cfg.get('installation_id')
- private_key_path = github_cfg.get('private_key')
+ github_cfg = cfg[config.SECTION_GITHUB]
+ app_id = github_cfg.get(config.GITHUB_SETTING_APP_ID)
+ installation_id = github_cfg.get(config.GITHUB_SETTING_INSTALLATION_ID)
+ private_key_path = github_cfg.get(config.GITHUB_SETTING_PRIVATE_KEY)
private_key = ''
with open(private_key_path, 'r') as private_key_file:
diff --git a/eessi_bot_event_handler.py b/eessi_bot_event_handler.py
index be48306f..5677ed2c 100644
--- a/eessi_bot_event_handler.py
+++ b/eessi_bot_event_handler.py
@@ -6,9 +6,11 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
+# author: Jonas Qvigstad (@jonas-lq)
+# author: Lara Ramona Peeters (@laraPPr)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
@@ -24,10 +26,8 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from connections import github
-import tasks.build as build
from tasks.build import check_build_permission, get_architecture_targets, get_repo_cfg, \
request_bot_build_issue_comments, submit_build_jobs
-import tasks.deploy as deploy
from tasks.deploy import deploy_built_artefacts
from tools import config
from tools.args import event_handler_parse
@@ -37,11 +37,59 @@
from tools.pr_comments import create_comment
-APP_NAME = "app_name"
-BOT_CONTROL = "bot_control"
-COMMAND_RESPONSE_FMT = "command_response_fmt"
-GITHUB = "github"
-REPO_TARGET_MAP = "repo_target_map"
+REQUIRED_CONFIG = {
+ config.SECTION_ARCHITECTURETARGETS: [
+ config.ARCHITECTURETARGETS_SETTING_ARCH_TARGET_MAP], # required
+ config.SECTION_BOT_CONTROL: [
+ config.BOT_CONTROL_SETTING_COMMAND_PERMISSION, # required
+ config.BOT_CONTROL_SETTING_COMMAND_RESPONSE_FMT], # required
+ config.SECTION_BUILDENV: [
+ config.BUILDENV_SETTING_BUILD_JOB_SCRIPT, # required
+ config.BUILDENV_SETTING_BUILD_LOGS_DIR, # optional+recommended
+ config.BUILDENV_SETTING_BUILD_PERMISSION, # optional+recommended
+ config.BUILDENV_SETTING_CONTAINER_CACHEDIR, # optional+recommended
+ # config.BUILDENV_SETTING_CVMFS_CUSTOMIZATIONS, # optional
+ # config.BUILDENV_SETTING_HTTPS_PROXY, # optional
+ # config.BUILDENV_SETTING_HTTP_PROXY, # optional
+ config.BUILDENV_SETTING_JOBS_BASE_DIR, # required
+ # config.BUILDENV_SETTING_LOAD_MODULES, # optional
+ config.BUILDENV_SETTING_LOCAL_TMP, # required
+ config.BUILDENV_SETTING_NO_BUILD_PERMISSION_COMMENT, # required
+ config.BUILDENV_SETTING_SHARED_FS_PATH, # optional+recommended
+ # config.BUILDENV_SETTING_SLURM_PARAMS, # optional
+ config.BUILDENV_SETTING_SUBMIT_COMMAND], # required
+ config.SECTION_DEPLOYCFG: [
+ config.DEPLOYCFG_SETTING_ARTEFACT_PREFIX, # (required)
+ config.DEPLOYCFG_SETTING_ARTEFACT_UPLOAD_SCRIPT, # required
+ config.DEPLOYCFG_SETTING_BUCKET_NAME, # required
+ config.DEPLOYCFG_SETTING_DEPLOY_PERMISSION, # optional+recommended
+ # config.DEPLOYCFG_SETTING_ENDPOINT_URL, # optional
+ config.DEPLOYCFG_SETTING_METADATA_PREFIX, # (required)
+ config.DEPLOYCFG_SETTING_NO_DEPLOY_PERMISSION_COMMENT, # required
+ config.DEPLOYCFG_SETTING_UPLOAD_POLICY], # required
+ config.SECTION_DOWNLOAD_PR_COMMENTS: [
+ config.DOWNLOAD_PR_COMMENTS_SETTING_CURL_FAILURE, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_CURL_TIP, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_FAILURE, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_TIP, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_FAILURE, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_TIP, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_FAILURE, # required
+ config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_TIP], # required
+ config.SECTION_EVENT_HANDLER: [
+ config.EVENT_HANDLER_SETTING_LOG_PATH], # required
+ config.SECTION_GITHUB: [
+ config.GITHUB_SETTING_APP_ID, # required
+ config.GITHUB_SETTING_APP_NAME, # required
+ config.GITHUB_SETTING_INSTALLATION_ID, # required
+ config.GITHUB_SETTING_PRIVATE_KEY], # required
+ config.SECTION_REPO_TARGETS: [
+ config.REPO_TARGETS_SETTING_REPO_TARGET_MAP, # required
+ config.REPO_TARGETS_SETTING_REPOS_CFG_DIR], # required
+ config.SECTION_SUBMITTED_JOB_COMMENTS: [
+ config.SUBMITTED_JOB_COMMENTS_SETTING_INITIAL_COMMENT, # required
+ config.SUBMITTED_JOB_COMMENTS_SETTING_AWAITS_RELEASE] # required
+ }
class EESSIBotSoftwareLayer(PyGHee):
@@ -60,8 +108,8 @@ def __init__(self, *args, **kwargs):
super(EESSIBotSoftwareLayer, self).__init__(*args, **kwargs)
self.cfg = config.read_config()
- event_handler_cfg = self.cfg['event_handler']
- self.logfile = event_handler_cfg.get('log_path')
+ event_handler_cfg = self.cfg[config.SECTION_EVENT_HANDLER]
+ self.logfile = event_handler_cfg.get(config.EVENT_HANDLER_SETTING_LOG_PATH)
def log(self, msg, *args):
"""
@@ -108,8 +156,8 @@ def handle_issue_comment_event(self, event_info, log_file=None):
# log level is set to debug
self.log(f"Comment in {issue_url} (owned by @{owner}) {action} by @{sender}")
- app_name = self.cfg[GITHUB][APP_NAME]
- command_response_fmt = self.cfg[BOT_CONTROL][COMMAND_RESPONSE_FMT]
+ app_name = self.cfg[config.SECTION_GITHUB][config.GITHUB_SETTING_APP_NAME]
+ command_response_fmt = self.cfg[config.SECTION_BOT_CONTROL][config.BOT_CONTROL_SETTING_COMMAND_RESPONSE_FMT]
# currently, only commands in new comments are supported
# - commands have the syntax 'bot: COMMAND [ARGS*]'
@@ -299,8 +347,8 @@ def handle_pull_request_labeled_event(self, event_info, pr):
request_body = event_info['raw_request_body']
repo_name = request_body['repository']['full_name']
pr_number = request_body['pull_request']['number']
- app_name = self.cfg[GITHUB][APP_NAME]
- command_response_fmt = self.cfg[BOT_CONTROL][COMMAND_RESPONSE_FMT]
+ app_name = self.cfg[config.SECTION_GITHUB][config.GITHUB_SETTING_APP_NAME]
+ command_response_fmt = self.cfg[config.SECTION_BOT_CONTROL][config.BOT_CONTROL_SETTING_COMMAND_RESPONSE_FMT]
comment_body = command_response_fmt.format(
app_name=app_name,
comment_response=msg,
@@ -328,21 +376,26 @@ def handle_pull_request_opened_event(self, event_info, pr):
PyGithub, not the github from the internal connections module)
"""
self.log("PR opened: waiting for label bot:build")
- app_name = self.cfg[GITHUB][APP_NAME]
+ app_name = self.cfg[config.SECTION_GITHUB][config.GITHUB_SETTING_APP_NAME]
# TODO check if PR already has a comment with arch targets and
# repositories
arch_map = get_architecture_targets(self.cfg)
repo_cfg = get_repo_cfg(self.cfg)
- comment = f"Instance `{app_name}` is configured to build:"
-
- for arch in arch_map.keys():
- # check if repo_target_map contains an entry for {arch}
- if arch not in repo_cfg[REPO_TARGET_MAP]:
- self.log(f"skipping arch {arch} because repo target map does not define repositories to build for")
- continue
- for repo_id in repo_cfg[REPO_TARGET_MAP][arch]:
- comment += f"\n- arch `{'/'.join(arch.split('/')[1:])}` for repo `{repo_id}`"
+ comment = f"Instance `{app_name}` is configured to build for:"
+ architectures = ['/'.join(arch.split('/')[1:]) for arch in arch_map.keys()]
+ comment += "\n- architectures: "
+ if len(architectures) > 0:
+ comment += f"{', '.join([f'`{arch}`' for arch in architectures])}"
+ else:
+ comment += "none"
+ repositories = list(set([repo_id for repo_ids in repo_cfg[config.REPO_TARGETS_SETTING_REPO_TARGET_MAP].values()
+ for repo_id in repo_ids]))
+ comment += "\n- repositories: "
+ if len(repositories) > 0:
+ comment += f"{', '.join([f'`{repo_id}`' for repo_id in repositories])}"
+ else:
+ comment += "none"
self.log(f"PR opened: comment '{comment}'")
@@ -541,9 +594,7 @@ def start(self, app, port=3000):
print(port_info)
self.log(port_info)
- event_handler_cfg = self.cfg['event_handler']
- my_logfile = event_handler_cfg.get('log_path')
- log_file_info = "logging in to %s" % my_logfile
+ log_file_info = "logging in to %s" % self.logfile
print(log_file_info)
self.log(log_file_info)
waitress.serve(app, listen='*:%s' % port)
@@ -557,13 +608,12 @@ def main():
"""
opts = event_handler_parse()
- required_config = {
- build.SUBMITTED_JOB_COMMENTS: [build.INITIAL_COMMENT, build.AWAITS_RELEASE],
- build.BUILDENV: [build.NO_BUILD_PERMISSION_COMMENT],
- deploy.DEPLOYCFG: [deploy.NO_DEPLOY_PERMISSION_COMMENT]
- }
# config is read and checked for settings to raise an exception early when the event_handler starts.
- config.check_required_cfg_settings(required_config)
+ if config.check_required_cfg_settings(REQUIRED_CONFIG):
+ print("Configuration check: PASSED")
+ else:
+ print("Configuration check: FAILED")
+ sys.exit(1)
github.connect()
if opts.file:
diff --git a/eessi_bot_job_manager.py b/eessi_bot_job_manager.py
index 5c475898..e7473f00 100644
--- a/eessi_bot_job_manager.py
+++ b/eessi_bot_job_manager.py
@@ -19,10 +19,13 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Jacob Ziemke (@jacobz137)
+# author: Jonas Qvigstad (@jonas-lq)
+# author: Lara Ramona Peeters (@laraPPr)
+# author: Richard Topouchian (@TopRichard)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
@@ -40,38 +43,32 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from connections import github
-from tools import config, run_cmd
+from tools import config, job_metadata, run_cmd
from tools.args import job_manager_parse
-from tools.job_metadata import read_job_metadata_from_file, read_metadata_file
from tools.pr_comments import get_submitted_job_comment, update_comment
-AWAITS_LAUNCH = "awaits_launch"
-FAILURE = "failure"
-FINISHED_JOB_COMMENTS = "finished_job_comments"
-JOB_RESULT_COMMENT_DESCRIPTION = "comment_description"
-JOB_RESULT_UNKNOWN_FMT = "job_result_unknown_fmt"
-JOB_TEST_COMMENT_DESCRIPTION = "comment_description"
-JOB_TEST_UNKNOWN_FMT = "job_test_unknown_fmt"
-MISSING_MODULES = "missing_modules"
-MULTIPLE_TARBALLS = "multiple_tarballs"
-NEW_JOB_COMMENTS = "new_job_comments"
-NO_MATCHING_TARBALL = "no_matching_tarball"
-NO_SLURM_OUT = "no_slurm_out"
-NO_TARBALL_MESSAGE = "no_tarball_message"
-RUNNING_JOB = "running_job"
-RUNNING_JOB_COMMENTS = "running_job_comments"
-SLURM_OUT = "slurm_out"
-SUCCESS = "success"
-
+# settings that are required in 'app.cfg'
REQUIRED_CONFIG = {
- FINISHED_JOB_COMMENTS: [FAILURE, JOB_RESULT_UNKNOWN_FMT, MISSING_MODULES,
- MULTIPLE_TARBALLS, NO_MATCHING_TARBALL,
- NO_SLURM_OUT, NO_TARBALL_MESSAGE, SLURM_OUT,
- SUCCESS],
- NEW_JOB_COMMENTS: [AWAITS_LAUNCH],
- RUNNING_JOB_COMMENTS: [RUNNING_JOB]
-}
+ config.SECTION_FINISHED_JOB_COMMENTS: [
+ config.FINISHED_JOB_COMMENTS_SETTING_JOB_RESULT_UNKNOWN_FMT, # required
+ config.FINISHED_JOB_COMMENTS_SETTING_JOB_TEST_UNKNOWN_FMT], # required
+ config.SECTION_GITHUB: [
+ config.GITHUB_SETTING_APP_ID, # required
+ # config.GITHUB_SETTING_APP_NAME, # unused
+ config.GITHUB_SETTING_INSTALLATION_ID, # required
+ config.GITHUB_SETTING_PRIVATE_KEY], # required
+ config.SECTION_JOB_MANAGER: [
+ config.JOB_MANAGER_SETTING_LOG_PATH, # required
+ config.JOB_MANAGER_SETTING_JOB_IDS_DIR, # required
+ config.JOB_MANAGER_SETTING_POLL_COMMAND, # required
+ config.JOB_MANAGER_SETTING_POLL_INTERVAL, # optional+recommended
+ config.JOB_MANAGER_SETTING_SCONTROL_COMMAND], # required
+ config.SECTION_NEW_JOB_COMMENTS: [
+ config.NEW_JOB_COMMENTS_SETTING_AWAITS_LAUNCH], # required
+ config.SECTION_RUNNING_JOB_COMMENTS: [
+ config.RUNNING_JOB_COMMENTS_SETTING_RUNNING_JOB] # required
+ }
class EESSIBotSoftwareLayerJobManager:
@@ -86,8 +83,8 @@ def __init__(self):
configuration to set the path to the logfile.
"""
cfg = config.read_config()
- job_manager_cfg = cfg['job_manager']
- self.logfile = job_manager_cfg.get('log_path')
+ job_manager_cfg = cfg[config.SECTION_JOB_MANAGER]
+ self.logfile = job_manager_cfg.get(config.JOB_MANAGER_SETTING_LOG_PATH)
def get_current_jobs(self):
"""
@@ -251,42 +248,6 @@ def determine_finished_jobs(self, known_jobs, current_jobs):
return finished_jobs
- def read_job_result(self, job_result_file_path):
- """
- Read job result file and return the contents of the 'RESULT' section.
-
- Args:
- job_result_file_path (string): path to job result file
-
- Returns:
- (ConfigParser): instance of ConfigParser corresponding to the
- 'RESULT' section or None
- """
- # reuse function from module tools.job_metadata to read metadata file
- result = read_metadata_file(job_result_file_path, self.logfile)
- if result and "RESULT" in result:
- return result["RESULT"]
- else:
- return None
-
- def read_job_test(self, job_test_file_path):
- """
- Read job test file and return the contents of the 'TEST' section.
-
- Args:
- job_test_file_path (string): path to job test file
-
- Returns:
- (ConfigParser): instance of ConfigParser corresponding to the
- 'TEST' section or None
- """
- # reuse function from module tools.job_metadata to read metadata file
- test = read_metadata_file(job_test_file_path, self.logfile)
- if test and "TEST" in test:
- return test["TEST"]
- else:
- return None
-
def process_new_job(self, new_job):
"""
Process a new job by verifying that it is a bot job and if so
@@ -332,7 +293,9 @@ def process_new_job(self, new_job):
# assuming that a bot job's working directory contains a metadata
# file, its existence is used to check if the job belongs to the bot
- metadata_pr = read_job_metadata_from_file(job_metadata_path, self.logfile)
+ metadata_pr = job_metadata.get_section_from_file(job_metadata_path,
+ job_metadata.JOB_PR_SECTION,
+ self.logfile)
if metadata_pr is None:
log(f"No metadata file found at {job_metadata_path} for job {job_id}, so skipping it",
@@ -383,10 +346,10 @@ def process_new_job(self, new_job):
# update status table if we found a comment
if "comment_id" in new_job:
- new_job_comments_cfg = config.read_config()[NEW_JOB_COMMENTS]
+ new_job_comments_cfg = config.read_config()[config.SECTION_NEW_JOB_COMMENTS]
dt = datetime.now(timezone.utc)
update = "\n|%s|released|" % dt.strftime("%b %d %X %Z %Y")
- update += f"{new_job_comments_cfg[AWAITS_LAUNCH]}|"
+ update += f"{new_job_comments_cfg[config.NEW_JOB_COMMENTS_SETTING_AWAITS_LAUNCH]}|"
update_comment(new_job["comment_id"], pr, update)
else:
log(
@@ -428,7 +391,9 @@ def process_running_jobs(self, running_job):
job_metadata_path = os.path.join(job_dir, metadata_file)
# check if metadata file exist
- metadata_pr = read_job_metadata_from_file(job_metadata_path, self.logfile)
+ metadata_pr = job_metadata.get_section_from_file(job_metadata_path,
+ job_metadata.JOB_PR_SECTION,
+ self.logfile)
if metadata_pr is None:
raise Exception("Unable to find metadata file")
@@ -453,8 +418,9 @@ def process_running_jobs(self, running_job):
if "comment_id" in running_job:
dt = datetime.now(timezone.utc)
- running_job_comments_cfg = config.read_config()[RUNNING_JOB_COMMENTS]
- running_msg = running_job_comments_cfg[RUNNING_JOB].format(job_id=running_job['jobid'])
+ running_job_comments_cfg = config.read_config()[config.SECTION_RUNNING_JOB_COMMENTS]
+ running_msg_fmt = running_job_comments_cfg[config.RUNNING_JOB_COMMENTS_SETTING_RUNNING_JOB]
+ running_msg = running_msg_fmt.format(job_id=running_job['jobid'])
if "comment_body" in running_job and running_msg in running_job["comment_body"]:
log("Not updating comment, '%s' already found" % running_msg)
else:
@@ -517,20 +483,22 @@ def process_finished_job(self, finished_job):
# status = {SUCCESS,FAILURE,UNKNOWN}
# obtain format templates from app.cfg
- finished_job_comments_cfg = config.read_config()[FINISHED_JOB_COMMENTS]
+ finished_job_comments_cfg = config.read_config()[config.SECTION_FINISHED_JOB_COMMENTS]
# check if _bot_jobJOBID.result exits
job_result_file = f"_bot_job{job_id}.result"
job_result_file_path = os.path.join(new_symlink, job_result_file)
- job_results = self.read_job_result(job_result_file_path)
+ job_results = job_metadata.get_section_from_file(job_result_file_path,
+ job_metadata.JOB_RESULT_SECTION,
+ self.logfile)
- job_result_unknown_fmt = finished_job_comments_cfg[JOB_RESULT_UNKNOWN_FMT]
+ job_result_unknown_fmt = finished_job_comments_cfg[config.FINISHED_JOB_COMMENTS_SETTING_JOB_RESULT_UNKNOWN_FMT]
# set fallback comment_description in case no result file was found
- # (self.read_job_result returned None)
+ # (job_metadata.get_section_from_file returned None)
comment_description = job_result_unknown_fmt.format(filename=job_result_file)
if job_results:
# get preformatted comment_description or use previously set default for unknown
- comment_description = job_results.get(JOB_RESULT_COMMENT_DESCRIPTION, comment_description)
+ comment_description = job_results.get(job_metadata.JOB_RESULT_COMMENT_DESCRIPTION, comment_description)
# report to log
log(f"{fn}(): finished job {job_id}\n"
@@ -549,15 +517,17 @@ def process_finished_job(self, finished_job):
# --> bot/test.sh and bot/check-test.sh scripts are run in job script used by bot for 'build' action
job_test_file = f"_bot_job{job_id}.test"
job_test_file_path = os.path.join(new_symlink, job_test_file)
- job_tests = self.read_job_test(job_test_file_path)
+ job_tests = job_metadata.get_section_from_file(job_test_file_path,
+ job_metadata.JOB_TEST_SECTION,
+ self.logfile)
- job_test_unknown_fmt = finished_job_comments_cfg[JOB_TEST_UNKNOWN_FMT]
+ job_test_unknown_fmt = finished_job_comments_cfg[config.FINISHED_JOB_COMMENTS_SETTING_JOB_TEST_UNKNOWN_FMT]
# set fallback comment_description in case no test file was found
- # (self.read_job_result returned None)
+ # (job_metadata.get_section_from_file returned None)
comment_description = job_test_unknown_fmt.format(filename=job_test_file)
if job_tests:
# get preformatted comment_description or use previously set default for unknown
- comment_description = job_tests.get(JOB_TEST_COMMENT_DESCRIPTION, comment_description)
+ comment_description = job_tests.get(job_metadata.JOB_TEST_COMMENT_DESCRIPTION, comment_description)
# report to log
log(f"{fn}(): finished job {job_id}, test suite result\n"
@@ -573,7 +543,9 @@ def process_finished_job(self, finished_job):
# obtain id of PR comment to be updated (from file '_bot_jobID.metadata')
metadata_file = f"_bot_job{job_id}.metadata"
job_metadata_path = os.path.join(new_symlink, metadata_file)
- metadata_pr = read_job_metadata_from_file(job_metadata_path, self.logfile)
+ metadata_pr = job_metadata.get_section_from_file(job_metadata_path,
+ job_metadata.JOB_PR_SECTION,
+ self.logfile)
if metadata_pr is None:
raise Exception("Unable to find metadata file ... skip updating PR comment")
@@ -605,7 +577,11 @@ def main():
# config is read and checked for settings to raise an exception early when
# the job_manager runs
- config.check_required_cfg_settings(REQUIRED_CONFIG)
+ if config.check_required_cfg_settings(REQUIRED_CONFIG):
+ print("Configuration check: PASSED")
+ else:
+ print("Configuration check: FAILED")
+ sys.exit(1)
github.connect()
job_manager = EESSIBotSoftwareLayerJobManager()
@@ -646,16 +622,16 @@ def main():
job_manager.scontrol_command = ""
if max_iter != 0:
cfg = config.read_config()
- job_mgr = cfg["job_manager"]
- job_manager.job_ids_dir = job_mgr.get("job_ids_dir")
+ job_mgr = cfg[config.SECTION_JOB_MANAGER]
+ job_manager.job_ids_dir = job_mgr.get(config.JOB_MANAGER_SETTING_JOB_IDS_DIR)
job_manager.submitted_jobs_dir = os.path.join(
job_manager.job_ids_dir, "submitted"
)
- job_manager.poll_command = job_mgr.get("poll_command") or False
- poll_interval = int(job_mgr.get("poll_interval") or 0)
+ job_manager.poll_command = job_mgr.get(config.JOB_MANAGER_SETTING_POLL_COMMAND) or False
+ poll_interval = int(job_mgr.get(config.JOB_MANAGER_SETTING_POLL_INTERVAL) or 0)
if poll_interval <= 0:
poll_interval = 60
- job_manager.scontrol_command = job_mgr.get("scontrol_command") or False
+ job_manager.scontrol_command = job_mgr.get(config.JOB_MANAGER_SETTING_SCONTROL_COMMAND) or False
os.makedirs(job_manager.submitted_jobs_dir, exist_ok=True)
# max_iter
diff --git a/event_handler.sh b/event_handler.sh
index e1a811b1..783e7c0e 100755
--- a/event_handler.sh
+++ b/event_handler.sh
@@ -5,8 +5,8 @@
# A bot to help with requests to add software installations to the EESSI software layer,
# see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Thomas Roeblitz (@trz42)
#
diff --git a/job_manager.sh b/job_manager.sh
index e4ce34cd..0a3a2ff7 100755
--- a/job_manager.sh
+++ b/job_manager.sh
@@ -5,8 +5,8 @@
# A bot to help with requests to add software installations to the EESSI software layer,
# see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Thomas Roeblitz (@trz42)
#
diff --git a/requirements.txt b/requirements.txt
index 5545325e..d650d246 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,15 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
PyGithub
Waitress
cryptography
diff --git a/scripts/eessi-upload-to-staging b/scripts/eessi-upload-to-staging
index 45e52fbf..b5e4482d 100755
--- a/scripts/eessi-upload-to-staging
+++ b/scripts/eessi-upload-to-staging
@@ -38,7 +38,7 @@ function check_file_name
function create_metadata_file
{
- _tarball=$1
+ _artefact=$1
_url=$2
_repository=$3
_pull_request_number=$4
@@ -50,10 +50,10 @@ function create_metadata_file
--arg un $(whoami) \
--arg ip $(curl -s https://checkip.amazonaws.com) \
--arg hn "$(hostname -f)" \
- --arg fn "$(basename ${_tarball})" \
- --arg sz "$(du -b "${_tarball}" | awk '{print $1}')" \
- --arg ct "$(date -r "${_tarball}")" \
- --arg sha256 "$(sha256sum "${_tarball}" | awk '{print $1}')" \
+ --arg fn "$(basename ${_artefact})" \
+ --arg sz "$(du -b "${_artefact}" | awk '{print $1}')" \
+ --arg ct "$(date -r "${_artefact}")" \
+ --arg sha256 "$(sha256sum "${_artefact}" | awk '{print $1}')" \
--arg url "${_url}" \
--arg repo "${_repository}" \
--arg pr "${_pull_request_number}" \
@@ -70,6 +70,11 @@ function create_metadata_file
function display_help
{
echo "Usage: $0 [OPTIONS] " >&2
+ echo " -a | --artefact-prefix PREFIX - a directory to which the artefact" >&2
+ echo " shall be uploaded; BASH variable" >&2
+ echo " expansion will be applied; arg '-l'" >&2
+ echo " lists variables that are defined at" >&2
+ echo " the time of expansion" >&2
echo " -e | --endpoint-url URL - endpoint url (needed for non AWS S3)" >&2
echo " -h | --help - display this usage information" >&2
echo " -i | --pr-comment-id - identifier of a PR comment; may be" >&2
@@ -88,11 +93,6 @@ function display_help
echo " link the upload to a PR" >&2
echo " -r | --repository FULL_NAME - a repository name ACCOUNT/REPONAME;" >&2
echo " used to link the upload to a PR" >&2
- echo " -t | --tarball-prefix PREFIX - a directory to which the tarball" >&2
- echo " shall be uploaded; BASH variable" >&2
- echo " expansion will be applied; arg '-l'" >&2
- echo " lists variables that are defined at" >&2
- echo " the time of expansion" >&2
}
if [[ $# -lt 1 ]]; then
@@ -123,7 +123,7 @@ github_repository="EESSI/software-layer"
# provided via options in the bot's config file app.cfg and/or command line argument
metadata_prefix=
-tarball_prefix=
+artefact_prefix=
# other variables
legacy_aws_path=
@@ -131,6 +131,10 @@ variables="github_repository legacy_aws_path pull_request_number"
while [[ $# -gt 0 ]]; do
case $1 in
+ -a|--artefact-prefix)
+ artefact_prefix="$2"
+ shift 2
+ ;;
-e|--endpoint-url)
endpoint_url="$2"
shift 2
@@ -167,10 +171,6 @@ while [[ $# -gt 0 ]]; do
github_repository="$2"
shift 2
;;
- -t|--tarball-prefix)
- tarball_prefix="$2"
- shift 2
- ;;
-*|--*)
echo "Error: Unknown option: $1" >&2
exit 1
@@ -204,17 +204,17 @@ for file in "$*"; do
basefile=$( basename ${file} )
if check_file_name ${basefile}; then
if tar tf "${file}" | head -n1 > /dev/null; then
- # 'legacy_aws_path' might be used in tarball_prefix or metadata_prefix
+ # 'legacy_aws_path' might be used in artefact_prefix or metadata_prefix
# its purpose is to support the old/legacy method to derive the location
- # where to store the tarball and metadata file
+ # where to store the artefact and metadata file
export legacy_aws_path=$(basename ${file} | tr -s '-' '/' \
| perl -pe 's/^eessi.//;' | perl -pe 's/\.tar\.gz$//;' )
- if [ -z ${tarball_prefix} ]; then
+ if [ -z ${artefact_prefix} ]; then
aws_path=${legacy_aws_path}
else
export pull_request_number
export github_repository
- aws_path=$(envsubst <<< "${tarball_prefix}")
+ aws_path=$(envsubst <<< "${artefact_prefix}")
fi
aws_file=$(basename ${file})
echo "Creating metadata file"
@@ -233,7 +233,7 @@ for file in "$*"; do
cat ${metadata_file}
echo Uploading to "${url}"
- echo " store tarball at ${aws_path}/${aws_file}"
+ echo " store artefact at ${aws_path}/${aws_file}"
upload_to_staging_bucket \
"${file}" \
"${bucket_name}" \
diff --git a/smee.sh b/smee.sh
index a0908462..9447ee08 100755
--- a/smee.sh
+++ b/smee.sh
@@ -1,2 +1,13 @@
#!/bin/bash
+#
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Kenneth Hoste (@boegel)
+#
+# license: GPLv2
+#
smee --url https://smee.io/7PIXBDoqczjEVXaf
diff --git a/tasks/build.py b/tasks/build.py
index 36cc08d1..82a0911e 100644
--- a/tasks/build.py
+++ b/tasks/build.py
@@ -4,9 +4,13 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
+# author: Jacob Ziemke (@jacobz137)
+# author: Jonas Qvigstad (@jonas-lq)
+# author: Lara Ramona Peeters (@laraPPr)
+# author: Pedro Santos Neves (@Neves-P)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
@@ -27,70 +31,18 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from connections import github
-from tools import config, pr_comments, run_cmd
-from tools.job_metadata import create_metadata_file
-
-
-APP_NAME = "app_name"
-ARCHITECTURE_TARGETS = "architecturetargets"
-AWAITS_RELEASE = "awaits_release"
-BUILDENV = "buildenv"
-BUILD_JOB_SCRIPT = "build_job_script"
-BUILD_LOGS_DIR = "build_logs_dir"
-BUILD_PERMISSION = "build_permission"
-CFG_DIRNAME = "cfg"
-CONTAINER_CACHEDIR = "container_cachedir"
-CURL_FAILURE = "curl_failure"
-CURL_TIP = "curl_tip"
-CVMFS_CUSTOMIZATIONS = "cvmfs_customizations"
+from tools import config, cvmfs_repository, job_metadata, pr_comments, run_cmd
+
+
+# defaults (used if not specified via, eg, 'app.cfg')
DEFAULT_JOB_TIME_LIMIT = "24:00:00"
-DOWNLOAD_PR_COMMENTS = "download_pr_comments"
-ERROR_CURL = "curl"
-ERROR_GIT_APPLY = "git apply"
-ERROR_GIT_CHECKOUT = "git checkout"
-ERROR_GIT_CLONE = "curl"
-ERROR_NONE = "none"
-GITHUB = "github"
-GIT_CLONE_FAILURE = "git_clone_failure"
-GIT_CLONE_TIP = "git_clone_tip"
-GIT_CHECKOUT_FAILURE = "git_checkout_failure"
-GIT_CHECKOUT_TIP = "git_checkout_tip"
-GIT_APPLY_FAILURE = "git_apply_failure"
-GIT_APPLY_TIP = "git_apply_tip"
-HTTPS_PROXY = "https_proxy"
-HTTP_PROXY = "http_proxy"
-INITIAL_COMMENT = "initial_comment"
-JOBS_BASE_DIR = "jobs_base_dir"
-JOB_ARCHITECTURE = "architecture"
-JOB_CFG_FILENAME = "job.cfg"
-JOB_CONTAINER = "container"
-JOB_LOCAL_TMP = "local_tmp"
-JOB_HTTPS_PROXY = "https_proxy"
-JOB_HTTP_PROXY = "http_proxy"
-JOB_LOAD_MODULES = "load_modules"
-JOB_OS_TYPE = "os_type"
-JOB_REPOSITORY = "repository"
-JOB_REPOS_CFG_DIR = "repos_cfg_dir"
-JOB_REPO_ID = "repo_id"
-JOB_REPO_NAME = "repo_name"
-JOB_REPO_VERSION = "repo_version"
-JOB_SITECONFIG = "site_config"
-JOB_SOFTWARE_SUBDIR = "software_subdir"
-LOAD_MODULES = "load_modules"
-LOCAL_TMP = "local_tmp"
-NO_BUILD_PERMISSION_COMMENT = "no_build_permission_comment"
-REPOS_CFG_DIR = "repos_cfg_dir"
-REPOS_REPO_NAME = "repo_name"
-REPOS_REPO_VERSION = "repo_version"
-REPOS_CONFIG_BUNDLE = "config_bundle"
-REPOS_CONFIG_MAP = "config_map"
-REPOS_CONTAINER = "container"
-REPO_TARGETS = "repo_targets"
-REPO_TARGET_MAP = "repo_target_map"
-SHARED_FS_PATH = "shared_fs_path"
-SLURM_PARAMS = "slurm_params"
-SUBMITTED_JOB_COMMENTS = "submitted_job_comments"
-SUBMIT_COMMAND = "submit_command"
+
+# error codes used in this file
+_ERROR_CURL = "curl"
+_ERROR_GIT_APPLY = "git apply"
+_ERROR_GIT_CHECKOUT = "git checkout"
+_ERROR_GIT_CLONE = "curl"
+_ERROR_NONE = "none"
Job = namedtuple('Job', ('working_dir', 'arch_target', 'repo_id', 'slurm_opts', 'year_month', 'pr_id'))
@@ -113,44 +65,45 @@ def get_build_env_cfg(cfg):
"""
fn = sys._getframe().f_code.co_name
- buildenv = cfg[BUILDENV]
+ buildenv = cfg[config.SECTION_BUILDENV]
- jobs_base_dir = buildenv.get(JOBS_BASE_DIR)
+ jobs_base_dir = buildenv.get(config.BUILDENV_SETTING_JOBS_BASE_DIR)
log(f"{fn}(): jobs_base_dir '{jobs_base_dir}'")
- config_data = {JOBS_BASE_DIR: jobs_base_dir}
- local_tmp = buildenv.get(LOCAL_TMP)
+ config_data = {config.BUILDENV_SETTING_JOBS_BASE_DIR: jobs_base_dir}
+
+ local_tmp = buildenv.get(config.BUILDENV_SETTING_LOCAL_TMP)
log(f"{fn}(): local_tmp '{local_tmp}'")
- config_data[LOCAL_TMP] = local_tmp
+ config_data[config.BUILDENV_SETTING_LOCAL_TMP] = local_tmp
- build_job_script = buildenv.get(BUILD_JOB_SCRIPT)
+ build_job_script = buildenv.get(config.BUILDENV_SETTING_BUILD_JOB_SCRIPT)
log(f"{fn}(): build_job_script '{build_job_script}'")
- config_data[BUILD_JOB_SCRIPT] = build_job_script
+ config_data[config.BUILDENV_SETTING_BUILD_JOB_SCRIPT] = build_job_script
- submit_command = buildenv.get(SUBMIT_COMMAND)
+ submit_command = buildenv.get(config.BUILDENV_SETTING_SUBMIT_COMMAND)
log(f"{fn}(): submit_command '{submit_command}'")
- config_data[SUBMIT_COMMAND] = submit_command
+ config_data[config.BUILDENV_SETTING_SUBMIT_COMMAND] = submit_command
- slurm_params = buildenv.get(SLURM_PARAMS)
+ slurm_params = buildenv.get(config.BUILDENV_SETTING_SLURM_PARAMS)
# always submit jobs with hold set, so job manager can release them
slurm_params += ' --hold'
log(f"{fn}(): slurm_params '{slurm_params}'")
- config_data[SLURM_PARAMS] = slurm_params
+ config_data[config.BUILDENV_SETTING_SLURM_PARAMS] = slurm_params
- shared_fs_path = buildenv.get(SHARED_FS_PATH)
+ shared_fs_path = buildenv.get(config.BUILDENV_SETTING_SHARED_FS_PATH)
log(f"{fn}(): shared_fs_path: '{shared_fs_path}'")
- config_data[SHARED_FS_PATH] = shared_fs_path
+ config_data[config.BUILDENV_SETTING_SHARED_FS_PATH] = shared_fs_path
- container_cachedir = buildenv.get(CONTAINER_CACHEDIR)
+ container_cachedir = buildenv.get(config.BUILDENV_SETTING_CONTAINER_CACHEDIR)
log(f"{fn}(): container_cachedir '{container_cachedir}'")
- config_data[CONTAINER_CACHEDIR] = container_cachedir
+ config_data[config.BUILDENV_SETTING_CONTAINER_CACHEDIR] = container_cachedir
- build_logs_dir = buildenv.get(BUILD_LOGS_DIR)
+ build_logs_dir = buildenv.get(config.BUILDENV_SETTING_BUILD_LOGS_DIR)
log(f"{fn}(): build_logs_dir '{build_logs_dir}'")
- config_data[BUILD_LOGS_DIR] = build_logs_dir
+ config_data[config.BUILDENV_SETTING_BUILD_LOGS_DIR] = build_logs_dir
cvmfs_customizations = {}
try:
- cvmfs_customizations_str = buildenv.get(CVMFS_CUSTOMIZATIONS)
+ cvmfs_customizations_str = buildenv.get(config.BUILDENV_SETTING_CVMFS_CUSTOMIZATIONS)
log("{fn}(): cvmfs_customizations '{cvmfs_customizations_str}'")
if cvmfs_customizations_str is not None:
@@ -161,19 +114,19 @@ def get_build_env_cfg(cfg):
print(e)
error(f"{fn}(): Value for cvmfs_customizations ({cvmfs_customizations_str}) could not be decoded.")
- config_data[CVMFS_CUSTOMIZATIONS] = cvmfs_customizations
+ config_data[config.BUILDENV_SETTING_CVMFS_CUSTOMIZATIONS] = cvmfs_customizations
- http_proxy = buildenv.get(HTTP_PROXY, None)
+ http_proxy = buildenv.get(config.BUILDENV_SETTING_HTTP_PROXY, None)
log(f"{fn}(): http_proxy '{http_proxy}'")
- config_data[HTTP_PROXY] = http_proxy
+ config_data[config.BUILDENV_SETTING_HTTP_PROXY] = http_proxy
- https_proxy = buildenv.get(HTTPS_PROXY, None)
+ https_proxy = buildenv.get(config.BUILDENV_SETTING_HTTPS_PROXY, None)
log(f"{fn}(): https_proxy '{https_proxy}'")
- config_data[HTTPS_PROXY] = https_proxy
+ config_data[config.BUILDENV_SETTING_HTTPS_PROXY] = https_proxy
- load_modules = buildenv.get(LOAD_MODULES, None)
+ load_modules = buildenv.get(config.BUILDENV_SETTING_LOAD_MODULES, None)
log(f"{fn}(): load_modules '{load_modules}'")
- config_data[LOAD_MODULES] = load_modules
+ config_data[config.BUILDENV_SETTING_LOAD_MODULES] = load_modules
return config_data
@@ -193,9 +146,9 @@ def get_architecture_targets(cfg):
"""
fn = sys._getframe().f_code.co_name
- architecture_targets = cfg[ARCHITECTURE_TARGETS]
+ architecture_targets = cfg[config.SECTION_ARCHITECTURETARGETS]
- arch_target_map = json.loads(architecture_targets.get('arch_target_map'))
+ arch_target_map = json.loads(architecture_targets.get(config.ARCHITECTURETARGETS_SETTING_ARCH_TARGET_MAP))
log(f"{fn}(): arch target map '{json.dumps(arch_target_map)}'")
return arch_target_map
@@ -211,10 +164,11 @@ def get_repo_cfg(cfg):
Returns:
(dict): dictionary containing repository settings as follows
- - {REPOS_CFG_DIR: path to repository config directory as defined in 'app.cfg'}
- - {REPO_TARGET_MAP: json of REPO_TARGET_MAP value as defined in 'app.cfg'}
- - for all sections [JOB_REPO_ID] defined in REPOS_CFG_DIR/repos.cfg add a
- mapping {JOB_REPO_ID: dictionary containing settings of that section}
+ - {config.REPO_TARGETS_SETTING_REPOS_CFG_DIR: path to repository config directory as defined in 'app.cfg'}
+ - {config.REPO_TARGETS_SETTING_REPO_TARGET_MAP: json of
+ config.REPO_TARGETS_SETTING_REPO_TARGET_MAP value as defined in 'app.cfg'}
+ - for all sections [repo_id] defined in config.REPO_TARGETS_SETTING_REPOS_CFG_DIR/repos.cfg add a
+ mapping {repo_id: dictionary containing settings of that section}
"""
fn = sys._getframe().f_code.co_name
@@ -224,13 +178,14 @@ def get_repo_cfg(cfg):
if repo_cfg:
return repo_cfg
- repo_cfg_org = cfg[REPO_TARGETS]
+ repo_cfg_org = cfg[config.SECTION_REPO_TARGETS]
repo_cfg = {}
- repo_cfg[REPOS_CFG_DIR] = repo_cfg_org.get(REPOS_CFG_DIR, None)
+ settings_repos_cfg_dir = config.REPO_TARGETS_SETTING_REPOS_CFG_DIR
+ repo_cfg[settings_repos_cfg_dir] = repo_cfg_org.get(settings_repos_cfg_dir, None)
repo_map = {}
try:
- repo_map_str = repo_cfg_org.get(REPO_TARGET_MAP)
+ repo_map_str = repo_cfg_org.get(config.REPO_TARGETS_SETTING_REPO_TARGET_MAP)
log(f"{fn}(): repo_map '{repo_map_str}'")
if repo_map_str is not None:
@@ -241,13 +196,13 @@ def get_repo_cfg(cfg):
print(err)
error(f"{fn}(): Value for repo_map ({repo_map_str}) could not be decoded.")
- repo_cfg[REPO_TARGET_MAP] = repo_map
+ repo_cfg[config.REPO_TARGETS_SETTING_REPO_TARGET_MAP] = repo_map
- if repo_cfg[REPOS_CFG_DIR] is None:
+ if repo_cfg[config.REPO_TARGETS_SETTING_REPOS_CFG_DIR] is None:
return repo_cfg
# add entries for sections from repos.cfg (one dictionary per section)
- repos_cfg_file = os.path.join(repo_cfg[REPOS_CFG_DIR], 'repos.cfg')
+ repos_cfg_file = os.path.join(repo_cfg[config.REPO_TARGETS_SETTING_REPOS_CFG_DIR], 'repos.cfg')
log(f"{fn}(): repos_cfg_file '{repos_cfg_file}'")
try:
repos_cfg = configparser.ConfigParser()
@@ -267,7 +222,7 @@ def get_repo_cfg(cfg):
config_map = {}
try:
- config_map_str = repos_cfg[repo_id].get(REPOS_CONFIG_MAP)
+ config_map_str = repos_cfg[repo_id].get(cvmfs_repository.REPOS_CFG_CONFIG_MAP)
log(f"{fn}(): config_map '{config_map_str}'")
if config_map_str is not None:
@@ -278,7 +233,7 @@ def get_repo_cfg(cfg):
print(err)
error(f"{fn}(): Value for config_map ({config_map_str}) could not be decoded.")
- repo_cfg[repo_id][REPOS_CONFIG_MAP] = config_map
+ repo_cfg[repo_id][cvmfs_repository.REPOS_CFG_CONFIG_MAP] = config_map
# print full repo_cfg for debugging purposes
log(f"{fn}(): complete repo_cfg that was just read: {json.dumps(repo_cfg, indent=4)}")
@@ -291,9 +246,9 @@ def create_pr_dir(pr, cfg, event_info):
Create working directory for job to be submitted. Full path to the working
directory has the format
- JOBS_BASE_DIR/./pr_/event_/run_
+ config.BUILDENV_SETTING_JOBS_BASE_DIR/./pr_/event_/run_
- where JOBS_BASE_DIR is defined in the configuration (see 'app.cfg'), year
+ where config.BUILDENV_SETTING_JOBS_BASE_DIR is defined in the configuration (see 'app.cfg'), year
contains four digits, and month contains two digits
Args:
@@ -313,13 +268,13 @@ def create_pr_dir(pr, cfg, event_info):
# create directory structure (see discussion of options in
# https://github.com/EESSI/eessi-bot-software-layer/issues/7)
#
- # JOBS_BASE_DIR/./pr_/event_/run_
+ # config.BUILDENV_SETTING_JOBS_BASE_DIR/./pr_/event_/run_
#
- # where JOBS_BASE_DIR is defined in the configuration (see 'app.cfg'), year
+ # where config.BUILDENV_SETTING_JOBS_BASE_DIR is defined in the configuration (see 'app.cfg'), year
# contains four digits, and month contains two digits
build_env_cfg = get_build_env_cfg(cfg)
- jobs_base_dir = build_env_cfg[JOBS_BASE_DIR]
+ jobs_base_dir = build_env_cfg[config.BUILDENV_SETTING_JOBS_BASE_DIR]
year_month = datetime.today().strftime('%Y.%m')
pr_id = 'pr_%s' % pr.number
@@ -367,7 +322,7 @@ def download_pr(repo_name, branch_name, pr, arch_job_dir):
git_clone_cmd, "Clone repo", arch_job_dir, raise_on_error=False
)
if clone_exit_code != 0:
- error_stage = ERROR_GIT_CLONE
+ error_stage = _ERROR_GIT_CLONE
return clone_output, clone_error, clone_exit_code, error_stage
git_checkout_cmd = ' '.join([
@@ -379,7 +334,7 @@ def download_pr(repo_name, branch_name, pr, arch_job_dir):
git_checkout_cmd, "checkout branch '%s'" % branch_name, arch_job_dir, raise_on_error=False
)
if checkout_exit_code != 0:
- error_stage = ERROR_GIT_CHECKOUT
+ error_stage = _ERROR_GIT_CHECKOUT
return checkout_output, checkout_err, checkout_exit_code, error_stage
curl_cmd = f'curl -L https://github.com/{repo_name}/pull/{pr.number}.diff > {pr.number}.diff'
@@ -388,7 +343,7 @@ def download_pr(repo_name, branch_name, pr, arch_job_dir):
curl_cmd, "Obtain patch", arch_job_dir, raise_on_error=False
)
if curl_exit_code != 0:
- error_stage = ERROR_CURL
+ error_stage = _ERROR_CURL
return curl_output, curl_error, curl_exit_code, error_stage
git_apply_cmd = f'git apply {pr.number}.diff'
@@ -397,11 +352,11 @@ def download_pr(repo_name, branch_name, pr, arch_job_dir):
git_apply_cmd, "Apply patch", arch_job_dir, raise_on_error=False
)
if git_apply_exit_code != 0:
- error_stage = ERROR_GIT_APPLY
+ error_stage = _ERROR_GIT_APPLY
return git_apply_output, git_apply_error, git_apply_exit_code, error_stage
# need to return four items also in case everything went fine
- return 'downloading PR succeeded', 'no error while downloading PR', 0, ERROR_NONE
+ return 'downloading PR succeeded', 'no error while downloading PR', 0, _ERROR_NONE
def comment_download_pr(base_repo_name, pr, download_pr_exit_code, download_pr_error, error_stage):
@@ -424,23 +379,23 @@ def comment_download_pr(base_repo_name, pr, download_pr_exit_code, download_pr_e
if download_pr_exit_code != 0:
fn = sys._getframe().f_code.co_name
- download_pr_comments_cfg = config.read_config()[DOWNLOAD_PR_COMMENTS]
- if error_stage == ERROR_GIT_CLONE:
+ download_pr_comments_cfg = config.read_config()[config.SECTION_DOWNLOAD_PR_COMMENTS]
+ if error_stage == _ERROR_GIT_CLONE:
download_comment = (f"`{download_pr_error}`"
- f"{download_pr_comments_cfg[GIT_CLONE_FAILURE]}"
- f"{download_pr_comments_cfg[GIT_CLONE_TIP]}")
- elif error_stage == ERROR_GIT_CHECKOUT:
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_FAILURE]}"
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_TIP]}")
+ elif error_stage == _ERROR_GIT_CHECKOUT:
download_comment = (f"`{download_pr_error}`"
- f"{download_pr_comments_cfg[GIT_CHECKOUT_FAILURE]}"
- f"{download_pr_comments_cfg[GIT_CHECKOUT_TIP]}")
- elif error_stage == ERROR_CURL:
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_FAILURE]}"
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_TIP]}")
+ elif error_stage == _ERROR_CURL:
download_comment = (f"`{download_pr_error}`"
- f"{download_pr_comments_cfg[CURL_FAILURE]}"
- f"{download_pr_comments_cfg[CURL_TIP]}")
- elif error_stage == ERROR_GIT_APPLY:
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_CURL_FAILURE]}"
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_CURL_TIP]}")
+ elif error_stage == _ERROR_GIT_APPLY:
download_comment = (f"`{download_pr_error}`"
- f"{download_pr_comments_cfg[GIT_APPLY_FAILURE]}"
- f"{download_pr_comments_cfg[GIT_APPLY_TIP]}")
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_FAILURE]}"
+ f"{download_pr_comments_cfg[config.DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_TIP]}")
download_comment = pr_comments.create_comment(
repo_name=base_repo_name, pr_number=pr.number, comment=download_comment
@@ -494,7 +449,7 @@ def prepare_jobs(pr, cfg, event_info, action_filter):
"""
fn = sys._getframe().f_code.co_name
- app_name = cfg[GITHUB].get(APP_NAME)
+ app_name = cfg[config.SECTION_GITHUB].get(config.GITHUB_SETTING_APP_NAME)
build_env_cfg = get_build_env_cfg(cfg)
arch_map = get_architecture_targets(cfg)
repocfg = get_repo_cfg(cfg)
@@ -517,14 +472,15 @@ def prepare_jobs(pr, cfg, event_info, action_filter):
for arch, slurm_opt in arch_map.items():
arch_dir = arch.replace('/', '_')
# check if repo_target_map contains an entry for {arch}
- if arch not in repocfg[REPO_TARGET_MAP]:
+ if arch not in repocfg[config.REPO_TARGETS_SETTING_REPO_TARGET_MAP]:
log(f"{fn}(): skipping arch {arch} because repo target map does not define repositories to build for")
continue
- for repo_id in repocfg[REPO_TARGET_MAP][arch]:
+ for repo_id in repocfg[config.REPO_TARGETS_SETTING_REPO_TARGET_MAP][arch]:
# ensure repocfg contains information about the repository repo_id if repo_id != EESSI
# Note, EESSI is a bad/misleading name, it should be more like AS_IN_CONTAINER
if (repo_id != "EESSI" and repo_id != "EESSI-pilot") and repo_id not in repocfg:
- log(f"{fn}(): skipping repo {repo_id}, it is not defined in repo config {repocfg[REPOS_CFG_DIR]}")
+ log(f"{fn}(): skipping repo {repo_id}, it is not defined in repo"
+ "config {repocfg[config.REPO_TARGETS_SETTING_REPOS_CFG_DIR]}")
continue
# if filter exists, check filter against context = (arch, repo, app_name)
@@ -582,73 +538,81 @@ def prepare_job_cfg(job_dir, build_env_cfg, repos_cfg, repo_id, software_subdir,
"""
fn = sys._getframe().f_code.co_name
- jobcfg_dir = os.path.join(job_dir, CFG_DIRNAME)
- # create ini file job.cfg with entries:
+ jobcfg_dir = os.path.join(job_dir, job_metadata.JOB_CFG_DIRECTORY_NAME)
+ # create ini file job.cfg with entries (some values are taken from the
+ # arguments of the function, some from settings in 'app.cfg', some from the
+ # repository's definition, some combine two values):
# [site_config]
- # local_tmp = LOCAL_TMP_VALUE
- # shared_fs_path = SHARED_FS_PATH
- # build_logs_dir = BUILD_LOGS_DIR
+ # local_tmp = config.BUILDENV_SETTING_LOCAL_TMP
+ # shared_fs_path = config.BUILDENV_SETTING_SHARED_FS_PATH
+ # build_logs_dir = config.BUILDENV_SETTING_BUILD_LOGS_DIR
#
# [repository]
- # repos_cfg_dir = JOB_CFG_DIR
- # repo_id = JOB_REPO_ID
- # container = CONTAINER
- # repo_name = REPO_NAME
- # repo_version = REPO_VERSION
+ # repos_cfg_dir = job_dir/job_metadata.JOB_CFG_DIRECTORY_NAME
+ # repo_id = repo_id
+ # container = repos_cfg[cvmfs_repository.REPOS_CFG_CONTAINER]
+ # repo_name = repo_cfg[cvmfs_repository.REPOS_CFG_REPO_NAME]
+ # repo_version = repo_cfg[cvmfs_repository.REPOS_CFG_REPO_VERSION]
#
# [architecture]
- # software_subdir = SOFTWARE_SUBDIR
- # os_type = OS_TYPE
+ # software_subdir = software_subdir
+ # os_type = os_type
job_cfg = configparser.ConfigParser()
- job_cfg[JOB_SITECONFIG] = {}
+ job_cfg[job_metadata.JOB_CFG_SITE_CONFIG_SECTION] = {}
build_env_to_job_cfg_keys = {
- BUILD_LOGS_DIR: BUILD_LOGS_DIR,
- CONTAINER_CACHEDIR: CONTAINER_CACHEDIR,
- HTTP_PROXY: JOB_HTTP_PROXY,
- HTTPS_PROXY: JOB_HTTPS_PROXY,
- LOAD_MODULES: JOB_LOAD_MODULES,
- LOCAL_TMP: JOB_LOCAL_TMP,
- SHARED_FS_PATH: SHARED_FS_PATH,
+ config.BUILDENV_SETTING_BUILD_LOGS_DIR: job_metadata.JOB_CFG_SITE_CONFIG_BUILD_LOGS_DIR,
+ config.BUILDENV_SETTING_CONTAINER_CACHEDIR: job_metadata.JOB_CFG_SITE_CONFIG_CONTAINER_CACHEDIR,
+ config.BUILDENV_SETTING_HTTP_PROXY: job_metadata.JOB_CFG_SITE_CONFIG_HTTP_PROXY,
+ config.BUILDENV_SETTING_HTTPS_PROXY: job_metadata.JOB_CFG_SITE_CONFIG_HTTPS_PROXY,
+ config.BUILDENV_SETTING_LOAD_MODULES: job_metadata.JOB_CFG_SITE_CONFIG_LOAD_MODULES,
+ config.BUILDENV_SETTING_LOCAL_TMP: job_metadata.JOB_CFG_SITE_CONFIG_LOCAL_TMP,
+ config.BUILDENV_SETTING_SHARED_FS_PATH: job_metadata.JOB_CFG_SITE_CONFIG_SHARED_FS_PATH,
}
for build_env_key, job_cfg_key in build_env_to_job_cfg_keys.items():
if build_env_cfg[build_env_key]:
- job_cfg[JOB_SITECONFIG][job_cfg_key] = build_env_cfg[build_env_key]
+ job_cfg[job_metadata.JOB_CFG_SITE_CONFIG_SECTION][job_cfg_key] = build_env_cfg[build_env_key]
- job_cfg[JOB_REPOSITORY] = {}
+ job_cfg[job_metadata.JOB_CFG_REPOSITORY_SECTION] = {}
# directory for repos.cfg
- # NOTE REPOS_CFG_DIR is a global configuration setting for all repositories,
- # hence it is stored in repos_cfg whereas repo_cfg used further below
- # contains setting for a specific repository
- if REPOS_CFG_DIR in repos_cfg and repos_cfg[REPOS_CFG_DIR]:
- job_cfg[JOB_REPOSITORY][JOB_REPOS_CFG_DIR] = jobcfg_dir
+ # NOTE config.REPO_TARGETS_SETTING_REPOS_CFG_DIR is a global configuration
+ # setting for all repositories, hence it is stored in repos_cfg whereas
+ # repo_cfg used further below contains setting for a specific repository
+ repo_section_str = job_metadata.JOB_CFG_REPOSITORY_SECTION
+ cfg_repos_cfg_dir = config.REPO_TARGETS_SETTING_REPOS_CFG_DIR
+ if cfg_repos_cfg_dir in repos_cfg and repos_cfg[cfg_repos_cfg_dir]:
+ job_cfg[repo_section_str][job_metadata.JOB_CFG_REPOSITORY_REPOS_CFG_DIR] = jobcfg_dir
# repo id
- job_cfg[JOB_REPOSITORY][JOB_REPO_ID] = repo_id
+ job_cfg[repo_section_str][job_metadata.JOB_CFG_REPOSITORY_REPO_ID] = repo_id
# settings for a specific repository
if repo_id in repos_cfg:
repo_cfg = repos_cfg[repo_id]
- if repo_cfg[REPOS_CONTAINER]:
- job_cfg[JOB_REPOSITORY][JOB_CONTAINER] = repo_cfg[REPOS_CONTAINER]
- if repo_cfg[REPOS_REPO_NAME]:
- job_cfg[JOB_REPOSITORY][JOB_REPO_NAME] = repo_cfg[REPOS_REPO_NAME]
- if repo_cfg[REPOS_REPO_VERSION]:
- job_cfg[JOB_REPOSITORY][JOB_REPO_VERSION] = repo_cfg[REPOS_REPO_VERSION]
-
- job_cfg[JOB_ARCHITECTURE] = {}
- job_cfg[JOB_ARCHITECTURE][JOB_SOFTWARE_SUBDIR] = software_subdir
- job_cfg[JOB_ARCHITECTURE][JOB_OS_TYPE] = os_type
-
- # copy repos_cfg[REPOS_CFG_DIR]/repos.cfg to
- # copy repos_cfg[REPOS_CFG_DIR]/*.tgz to
- if REPOS_CFG_DIR in repos_cfg and repos_cfg[REPOS_CFG_DIR] and os.path.isdir(repos_cfg[REPOS_CFG_DIR]):
- src = repos_cfg[REPOS_CFG_DIR]
+ if repo_cfg[cvmfs_repository.REPOS_CFG_CONTAINER]:
+ job_cfg_repo_container = job_metadata.JOB_CFG_REPOSITORY_CONTAINER
+ job_cfg[repo_section_str][job_cfg_repo_container] = repo_cfg[cvmfs_repository.REPOS_CFG_CONTAINER]
+ if repo_cfg[cvmfs_repository.REPOS_CFG_REPO_NAME]:
+ job_cfg_repo_name = job_metadata.JOB_CFG_REPOSITORY_REPO_NAME
+ job_cfg[repo_section_str][job_cfg_repo_name] = repo_cfg[cvmfs_repository.REPOS_CFG_REPO_NAME]
+ if repo_cfg[cvmfs_repository.REPOS_CFG_REPO_VERSION]:
+ job_cfg_repo_version = job_metadata.JOB_CFG_REPOSITORY_REPO_VERSION
+ job_cfg[repo_section_str][job_cfg_repo_version] = repo_cfg[cvmfs_repository.REPOS_CFG_REPO_VERSION]
+
+ job_cfg_arch_section = job_metadata.JOB_CFG_ARCHITECTURE_SECTION
+ job_cfg[job_cfg_arch_section] = {}
+ job_cfg[job_cfg_arch_section][job_metadata.JOB_CFG_ARCHITECTURE_SOFTWARE_SUBDIR] = software_subdir
+ job_cfg[job_cfg_arch_section][job_metadata.JOB_CFG_ARCHITECTURE_OS_TYPE] = os_type
+
+ # copy contents of directory containing repository configuration to directory
+ # containing job configuration/metadata
+ if cfg_repos_cfg_dir in repos_cfg and repos_cfg[cfg_repos_cfg_dir] and os.path.isdir(repos_cfg[cfg_repos_cfg_dir]):
+ src = repos_cfg[cfg_repos_cfg_dir]
shutil.copytree(src, jobcfg_dir)
log(f"{fn}(): copied {src} to {jobcfg_dir}")
- # make sure that exists
+ # make sure that exists (in case it wasn't just copied)
os.makedirs(jobcfg_dir, exist_ok=True)
- jobcfg_file = os.path.join(jobcfg_dir, JOB_CFG_FILENAME)
+ jobcfg_file = os.path.join(jobcfg_dir, job_metadata.JOB_CFG_FILENAME)
with open(jobcfg_file, "w") as jcf:
job_cfg.write(jcf)
@@ -669,7 +633,7 @@ def submit_job(job, cfg):
Returns:
tuple of 2 elements containing
- (string): id of the submitted job
- - (string): path JOBS_BASE_DIR/job.year_month/job.pr_id/SLURM_JOBID which
+ - (string): path config.BUILDENV_SETTING_JOBS_BASE_DIR/job.year_month/job.pr_id/SLURM_JOBID which
is a symlink to the job's working directory (job[0] or job.working_dir)
"""
fn = sys._getframe().f_code.co_name
@@ -678,7 +642,7 @@ def submit_job(job, cfg):
# add a default time limit of 24h to the job submit command if no other time
# limit is specified already
- all_opts_str = " ".join([build_env_cfg[SLURM_PARAMS], job.slurm_opts])
+ all_opts_str = " ".join([build_env_cfg[config.BUILDENV_SETTING_SLURM_PARAMS], job.slurm_opts])
all_opts_list = all_opts_str.split(" ")
if any([(opt.startswith("--time") or opt.startswith("-t")) for opt in all_opts_list]):
time_limit = ""
@@ -686,11 +650,11 @@ def submit_job(job, cfg):
time_limit = f"--time={DEFAULT_JOB_TIME_LIMIT}"
command_line = ' '.join([
- build_env_cfg[SUBMIT_COMMAND],
- build_env_cfg[SLURM_PARAMS],
+ build_env_cfg[config.BUILDENV_SETTING_SUBMIT_COMMAND],
+ build_env_cfg[config.BUILDENV_SETTING_SLURM_PARAMS],
time_limit,
job.slurm_opts,
- build_env_cfg[BUILD_JOB_SCRIPT],
+ build_env_cfg[config.BUILDENV_SETTING_BUILD_JOB_SCRIPT],
])
cmdline_output, cmdline_error, cmdline_exit_code = run_cmd(command_line,
@@ -699,14 +663,14 @@ def submit_job(job, cfg):
# sbatch output is 'Submitted batch job JOBID'
# parse job id, add it to array of submitted jobs and create a symlink
- # from JOBS_BASE_DIR/job.year_month/job.pr_id/SLURM_JOBID to the job's
+ # from config.BUILDENV_SETTING_JOBS_BASE_DIR/job.year_month/job.pr_id/SLURM_JOBID to the job's
# working directory
log(f"{fn}(): sbatch out: {cmdline_output}")
log(f"{fn}(): sbatch err: {cmdline_error}")
job_id = cmdline_output.split()[3]
- symlink = os.path.join(build_env_cfg[JOBS_BASE_DIR], job.year_month, job.pr_id, job_id)
+ symlink = os.path.join(build_env_cfg[config.BUILDENV_SETTING_JOBS_BASE_DIR], job.year_month, job.pr_id, job_id)
log(f"{fn}(): create symlink {symlink} -> {job[0]}")
os.symlink(job[0], symlink)
@@ -738,17 +702,18 @@ def create_pr_comment(job, job_id, app_name, pr, gh, symlink):
dt = datetime.now(timezone.utc)
# construct initial job comment
- submitted_job_comments_cfg = config.read_config()[SUBMITTED_JOB_COMMENTS]
- job_comment = (f"{submitted_job_comments_cfg[INITIAL_COMMENT]}"
+ submitted_job_comments_cfg = config.read_config()[config.SECTION_SUBMITTED_JOB_COMMENTS]
+ job_comment = (f"{submitted_job_comments_cfg[config.SUBMITTED_JOB_COMMENTS_SETTING_INITIAL_COMMENT]}"
f"\n|date|job status|comment|\n"
f"|----------|----------|------------------------|\n"
f"|{dt.strftime('%b %d %X %Z %Y')}|"
f"submitted|"
- f"{submitted_job_comments_cfg[AWAITS_RELEASE]}|").format(app_name=app_name,
- arch_name=arch_name,
- symlink=symlink,
- repo_id=job.repo_id,
- job_id=job_id)
+ f"{submitted_job_comments_cfg[config.SUBMITTED_JOB_COMMENTS_SETTING_AWAITS_RELEASE]}|").format(
+ app_name=app_name,
+ arch_name=arch_name,
+ symlink=symlink,
+ repo_id=job.repo_id,
+ job_id=job_id)
# create comment to pull request
repo_name = pr.base.repo.full_name
@@ -783,7 +748,7 @@ def submit_build_jobs(pr, event_info, action_filter):
fn = sys._getframe().f_code.co_name
cfg = config.read_config()
- app_name = cfg[GITHUB].get(APP_NAME)
+ app_name = cfg[config.SECTION_GITHUB].get(config.GITHUB_SETTING_APP_NAME)
# setup job directories (one per element in product of architecture x repositories)
jobs = prepare_jobs(pr, cfg, event_info, action_filter)
@@ -810,7 +775,7 @@ def submit_build_jobs(pr, event_info, action_filter):
pr_comment = pr_comments.PRComment(pr.base.repo.full_name, pr.number, pr_comment.id)
# create _bot_job.metadata file in the job's working directory
- create_metadata_file(job, job_id, pr_comment)
+ job_metadata.create_metadata_file(job, job_id, pr_comment)
return job_id_to_comment_map
@@ -834,16 +799,16 @@ def check_build_permission(pr, event_info):
cfg = config.read_config()
- buildenv = cfg[BUILDENV]
+ buildenv = cfg[config.SECTION_BUILDENV]
- build_permission = buildenv.get(BUILD_PERMISSION, '')
+ build_permission = buildenv.get(config.BUILDENV_SETTING_BUILD_PERMISSION, '')
log(f"{fn}(): build permission '{build_permission}'")
build_labeler = event_info['raw_request_body']['sender']['login']
if build_labeler not in build_permission.split():
log(f"{fn}(): GH account '{build_labeler}' is not authorized to build")
- no_build_permission_comment = buildenv.get(NO_BUILD_PERMISSION_COMMENT)
+ no_build_permission_comment = buildenv.get(config.BUILDENV_SETTING_NO_BUILD_PERMISSION_COMMENT)
repo_name = event_info["raw_request_body"]["repository"]["full_name"]
pr_comments.create_comment(repo_name,
pr.number,
@@ -882,7 +847,9 @@ def request_bot_build_issue_comments(repo_name, pr_number):
for comment in comments:
# iterate through the comments to find the one where the status of the build was in
- if config.read_config()["submitted_job_comments"]['initial_comment'][:20] in comment['body']:
+ submitted_job_comments_section = cfg[config.SECTION_SUBMITTED_JOB_COMMENTS]
+ initial_comment_fmt = submitted_job_comments_section[config.SUBMITTED_JOB_COMMENTS_SETTING_INITIAL_COMMENT]
+ if initial_comment_fmt[:20] in comment['body']:
# get archictecture from comment['body']
first_line = comment['body'].split('\n')[0]
diff --git a/tasks/deploy.py b/tasks/deploy.py
index 8fc8d708..32e7705f 100644
--- a/tasks/deploy.py
+++ b/tasks/deploy.py
@@ -4,8 +4,11 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Thomas Roeblitz (@trz42)
+# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
+# author: Hafsa Naeem (@hafsa-naeem)
# author: Jonas Qvigstad (@jonas-lq)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
@@ -23,22 +26,8 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from connections import github
-from tasks.build import CFG_DIRNAME, JOB_CFG_FILENAME, JOB_REPO_ID, JOB_REPOSITORY
from tasks.build import get_build_env_cfg
-from tools import config, pr_comments, run_cmd
-from tools.job_metadata import read_job_metadata_from_file
-
-
-BUCKET_NAME = "bucket_name"
-DEPLOYCFG = "deploycfg"
-DEPLOY_PERMISSION = "deploy_permission"
-ENDPOINT_URL = "endpoint_url"
-JOBS_BASE_DIR = "jobs_base_dir"
-METADATA_PREFIX = "metadata_prefix"
-NO_DEPLOY_PERMISSION_COMMENT = "no_deploy_permission_comment"
-TARBALL_PREFIX = "tarball_prefix"
-TARBALL_UPLOAD_SCRIPT = "tarball_upload_script"
-UPLOAD_POLICY = "upload_policy"
+from tools import config, job_metadata, pr_comments, run_cmd
def determine_job_dirs(pr_number):
@@ -55,14 +44,14 @@ def determine_job_dirs(pr_number):
job_directories = []
- # a job directory's name has the format cfg[JOBS_BASE_DIR]/YYYY.MM/pr_/JOBID
+ # a job directory's name has the format cfg[config.BUILDENV_SETTING_JOBS_BASE_DIR]/YYYY.MM/pr_/JOBID
# - we may have to scan multiple YYYY.MM directories if the pull request was
# processed over more than one month (that is jobs were run in two or more
# months)
# - we assume that a JOBID is a positive integer
cfg = config.read_config()
build_env_cfg = get_build_env_cfg(cfg)
- jobs_base_dir = build_env_cfg[JOBS_BASE_DIR]
+ jobs_base_dir = build_env_cfg[config.BUILDENV_SETTING_JOBS_BASE_DIR]
log(f"{funcname}(): jobs_base_dir = {jobs_base_dir}")
date_pr_job_pattern = (f"[0-9][0-9][0-9][0-9].[0-9][0-9]/"
@@ -89,10 +78,10 @@ def determine_pr_comment_id(job_dir):
"""
# assumes that last part of job_dir encodes the job's id
job_id = os.path.basename(os.path.normpath(job_dir))
- job_metadata_file = os.path.join(job_dir, f"_bot_job{job_id}.metadata")
- job_metadata = read_job_metadata_from_file(job_metadata_file)
- if job_metadata and "pr_comment_id" in job_metadata:
- return int(job_metadata["pr_comment_id"])
+ metadata_file = os.path.join(job_dir, f"_bot_job{job_id}.metadata")
+ metadata = job_metadata.get_section_from_file(metadata_file, job_metadata.JOB_PR_SECTION)
+ if metadata and "pr_comment_id" in metadata:
+ return int(metadata["pr_comment_id"])
else:
return -1
@@ -118,84 +107,93 @@ def determine_slurm_out(job_dir):
return slurm_out
-def determine_eessi_tarballs(job_dir):
+def determine_artefacts(job_dir):
"""
- Determine paths to EESSI software tarballs in a given job directory.
+ Determine paths to artefacts created by a job in a given job directory.
Args:
job_dir (string): working directory of the job
Returns:
- eessi_tarballs (list): list of paths to all tarballs in job_dir
+ (list): list of paths to all artefacts in job_dir
"""
- # determine all tarballs that are stored in the directory job_dir
- # and whose name matches a certain pattern
- tarball_pattern = "eessi-*software-*.tar.gz"
- glob_str = os.path.join(job_dir, tarball_pattern)
- eessi_tarballs = glob.glob(glob_str)
+ # determine all artefacts that are stored in the directory job_dir
+ # by using the _bot_jobSLURM_JOBID.result file in that job directory
+ job_id = job_metadata.determine_job_id_from_job_directory(job_dir)
+ if job_id == 0:
+ # could not determine job id, returning empty list of artefacts
+ return None
- return eessi_tarballs
+ job_result_file = f"_bot_job{job_id}.result"
+ job_result_file_path = os.path.join(job_dir, job_result_file)
+ job_result = job_metadata.get_section_from_file(job_result_file_path, job_metadata.JOB_RESULT_SECTION)
+ if job_result and job_metadata.JOB_RESULT_ARTEFACTS in job_result:
+ # transform multiline value into a list
+ artefacts_list = job_result[job_metadata.JOB_RESULT_ARTEFACTS].split('\n')
+ # drop elements of length zero
+ artefacts = [af for af in artefacts_list if len(af) > 0]
+ return artefacts
+ else:
+ return None
-def check_build_status(slurm_out, eessi_tarballs):
+
+def check_job_status(job_dir):
"""
Check status of the job in a given directory.
Args:
- slurm_out (string): path to job output file
- eessi_tarballs (list): list of eessi tarballs found for job
+ job_dir (string): path to job directory
Returns:
(bool): True -> job succeeded, False -> job failed
"""
fn = sys._getframe().f_code.co_name
- # TODO use _bot_job.result file to determine result status
- # cases:
- # (1) no result file --> add line with unknown status, found tarball xyz but no result file
- # (2) result file && status = SUCCESS --> return True
- # (3) result file && status = FAILURE --> return False
-
- # Function checks if all modules have been built and if a tarball has
- # been created.
-
- # set some initial values
- no_missing_modules = False
- targz_created = False
-
- # check slurm out for the below strings
- # ^No missing modules!$ --> all software successfully installed
- # ^/eessi_bot_job/eessi-.*-software-.*.tar.gz created!$ -->
- # tarball successfully created
- if os.path.exists(slurm_out):
- re_missing_modules = re.compile(".*No missing installations, party time!.*")
- re_targz_created = re.compile("^/eessi_bot_job/eessi-.*-software-.*.tar.gz created!$")
- outfile = open(slurm_out, "r")
- for line in outfile:
- if re_missing_modules.match(line):
- # no missing modules
- no_missing_modules = True
- log(f"{fn}(): line '{line}' matches '.*No missing installations, party time!.*'")
- if re_targz_created.match(line):
- # tarball created
- targz_created = True
- log(f"{fn}(): line '{line}' matches '^/eessi_bot_job/eessi-.*-software-.*.tar.gz created!$'")
-
- log(f"{fn}(): found {len(eessi_tarballs)} tarballs for '{slurm_out}'")
-
- # we test results from the above check and if there is one tarball only
- if no_missing_modules and targz_created and len(eessi_tarballs) == 1:
- return True
+ # use _bot_job.result file to determine result status
+ # cases:
+ # (0) no job id --> return False
+ # (1) no result file --> return False
+ # (2) result file && status = SUCCESS --> return True
+ # (3) result file && status = FAILURE --> return False
+
+ # case (0): no job id --> return False
+ job_id = job_metadata.determine_job_id_from_job_directory(job_dir)
+ if job_id == 0:
+ # could not determine job id, return False
+ log(f"{fn}(): could not determine job id from directory '{job_dir}'\n")
+ return False
+
+ job_result_file = f"_bot_job{job_id}.result"
+ job_result_file_path = os.path.join(job_dir, job_result_file)
+ job_result = job_metadata.get_section_from_file(job_result_file_path, job_metadata.JOB_RESULT_SECTION)
+
+ job_status = job_metadata.JOB_RESULT_FAILURE
+ if job_result and job_metadata.JOB_RESULT_STATUS in job_result:
+ job_status = job_result[job_metadata.JOB_RESULT_STATUS]
+ else:
+ # case (1): no result file or no status --> return False
+ log(f"{fn}(): no result file '{job_result_file_path}' or reading it failed\n")
+ return False
+
+ log(f"{fn}(): job status is {job_status} (compare against {job_metadata.JOB_RESULT_SUCCESS})\n")
- return False
+ if job_status == job_metadata.JOB_RESULT_SUCCESS:
+ # case (2): result file && status = SUCCESS --> return True
+ log(f"{fn}(): found status 'SUCCESS' from '{job_result_file_path}'\n")
+ return True
+ else:
+ # case (3): result file && status = FAILURE --> return False
+ log(f"{fn}(): found status 'FAILURE' from '{job_result_file_path}'\n")
+ return False
-def update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, state, msg):
+def update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, state, msg):
"""
- Update pull request comment for the given comment id or tarball name
+ Update pull request comment for the given comment id or artefact name
Args:
- tarball (string): name of tarball that is looked for in a PR comment
+ artefact (string): name of artefact that is looked for in a PR comment
repo_name (string): name of the repository (USER_ORG/REPOSITORY)
pr_number (int): pull request number
state (string): value for state column to be used in update
@@ -208,23 +206,23 @@ def update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, state, msg):
repo = gh.get_repo(repo_name)
pull_request = repo.get_pull(pr_number)
- issue_comment = pr_comments.determine_issue_comment(pull_request, pr_comment_id, tarball)
+ issue_comment = pr_comments.determine_issue_comment(pull_request, pr_comment_id, artefact)
if issue_comment:
dt = datetime.now(timezone.utc)
comment_update = (f"\n|{dt.strftime('%b %d %X %Z %Y')}|{state}|"
- f"transfer of `{tarball}` to S3 bucket {msg}|")
+ f"transfer of `{artefact}` to S3 bucket {msg}|")
# append update to existing comment
issue_comment.edit(issue_comment.body + comment_update)
-def append_tarball_to_upload_log(tarball, job_dir):
+def append_artefact_to_upload_log(artefact, job_dir):
"""
- Append tarball to upload log.
+ Append artefact to upload log.
Args:
- tarball (string): name of tarball that has been uploaded
- job_dir (string): directory of the job that built the tarball
+ artefact (string): name of artefact that has been uploaded
+ job_dir (string): directory of the job that built the artefact
Returns:
None (implicitly)
@@ -233,18 +231,19 @@ def append_tarball_to_upload_log(tarball, job_dir):
pr_base_dir = os.path.dirname(job_dir)
uploaded_txt = os.path.join(pr_base_dir, 'uploaded.txt')
with open(uploaded_txt, "a") as upload_log:
- job_plus_tarball = os.path.join(os.path.basename(job_dir), tarball)
- upload_log.write(f"{job_plus_tarball}\n")
+ job_plus_artefact = os.path.join(os.path.basename(job_dir), artefact)
+ upload_log.write(f"{job_plus_artefact}\n")
-def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_comment_id):
+def upload_artefact(job_dir, payload, timestamp, repo_name, pr_number, pr_comment_id):
"""
- Upload built tarball to an S3 bucket.
+ Upload artefact to an S3 bucket.
Args:
job_dir (string): path to the job directory
- build_target (string): eessi-VERSION-COMPONENT-OS-ARCH
- timestamp (int): timestamp of the tarball
+ payload (string): can be any name describing the payload, e.g., for
+ EESSI it could have the format eessi-VERSION-COMPONENT-OS-ARCH
+ timestamp (int): timestamp of the artefact
repo_name (string): repository of the pull request
pr_number (int): number of the pull request
pr_comment_id (int): id of the pull request comment
@@ -254,18 +253,18 @@ def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_co
"""
funcname = sys._getframe().f_code.co_name
- tarball = f"{build_target}-{timestamp}.tar.gz"
- abs_path = os.path.join(job_dir, tarball)
- log(f"{funcname}(): deploying build '{abs_path}'")
+ artefact = f"{payload}-{timestamp}.tar.gz"
+ abs_path = os.path.join(job_dir, artefact)
+ log(f"{funcname}(): uploading '{abs_path}'")
# obtain config settings
cfg = config.read_config()
- deploycfg = cfg[DEPLOYCFG]
- tarball_upload_script = deploycfg.get(TARBALL_UPLOAD_SCRIPT)
- endpoint_url = deploycfg.get(ENDPOINT_URL) or ''
- bucket_spec = deploycfg.get(BUCKET_NAME)
- metadata_prefix = deploycfg.get(METADATA_PREFIX)
- tarball_prefix = deploycfg.get(TARBALL_PREFIX)
+ deploycfg = cfg[config.SECTION_DEPLOYCFG]
+ artefact_upload_script = deploycfg.get(config.DEPLOYCFG_SETTING_ARTEFACT_UPLOAD_SCRIPT)
+ endpoint_url = deploycfg.get(config.DEPLOYCFG_SETTING_ENDPOINT_URL) or ''
+ bucket_spec = deploycfg.get(config.DEPLOYCFG_SETTING_BUCKET_NAME)
+ metadata_prefix = deploycfg.get(config.DEPLOYCFG_SETTING_METADATA_PREFIX)
+ artefact_prefix = deploycfg.get(config.DEPLOYCFG_SETTING_ARTEFACT_PREFIX)
# if bucket_spec value looks like a dict, try parsing it as such
if bucket_spec.lstrip().startswith('{'):
@@ -275,13 +274,13 @@ def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_co
if metadata_prefix.lstrip().startswith('{'):
metadata_prefix = json.loads(metadata_prefix)
- # if tarball_prefix value looks like a dict, try parsing it as such
- if tarball_prefix.lstrip().startswith('{'):
- tarball_prefix = json.loads(tarball_prefix)
+ # if artefact_prefix value looks like a dict, try parsing it as such
+ if artefact_prefix.lstrip().startswith('{'):
+ artefact_prefix = json.loads(artefact_prefix)
- jobcfg_path = os.path.join(job_dir, CFG_DIRNAME, JOB_CFG_FILENAME)
+ jobcfg_path = os.path.join(job_dir, job_metadata.JOB_CFG_DIRECTORY_NAME, job_metadata.JOB_CFG_FILENAME)
jobcfg = config.read_config(jobcfg_path)
- target_repo_id = jobcfg[JOB_REPOSITORY][JOB_REPO_ID]
+ target_repo_id = jobcfg[job_metadata.JOB_CFG_REPOSITORY_SECTION][job_metadata.JOB_CFG_REPOSITORY_REPO_ID]
if isinstance(bucket_spec, str):
bucket_name = bucket_spec
@@ -290,13 +289,13 @@ def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_co
# bucket spec may be a mapping of target repo id to bucket name
bucket_name = bucket_spec.get(target_repo_id)
if bucket_name is None:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
f"failed (no bucket specified for {target_repo_id})")
return
else:
log(f"Using bucket for {target_repo_id}: {bucket_name}")
else:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
f"failed (incorrect bucket spec: {bucket_spec})")
return
@@ -307,31 +306,31 @@ def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_co
# metadata prefix spec may be a mapping of target repo id to metadata prefix
metadata_prefix_arg = metadata_prefix.get(target_repo_id)
if metadata_prefix_arg is None:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
f"failed (no metadata prefix specified for {target_repo_id})")
return
else:
log(f"Using metadata prefix for {target_repo_id}: {metadata_prefix_arg}")
else:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
f"failed (incorrect metadata prefix spec: {metadata_prefix_arg})")
return
- if isinstance(tarball_prefix, str):
- tarball_prefix_arg = tarball_prefix
- log(f"Using specified tarball prefix: {tarball_prefix_arg}")
- elif isinstance(tarball_prefix, dict):
- # tarball prefix spec may be a mapping of target repo id to tarball prefix
- tarball_prefix_arg = tarball_prefix.get(target_repo_id)
- if tarball_prefix_arg is None:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
- f"failed (no tarball prefix specified for {target_repo_id})")
+ if isinstance(artefact_prefix, str):
+ artefact_prefix_arg = artefact_prefix
+ log(f"Using specified artefact prefix: {artefact_prefix_arg}")
+ elif isinstance(artefact_prefix, dict):
+ # artefact prefix spec may be a mapping of target repo id to artefact prefix
+ artefact_prefix_arg = artefact_prefix.get(target_repo_id)
+ if artefact_prefix_arg is None:
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
+ f"failed (no artefact prefix specified for {target_repo_id})")
return
else:
- log(f"Using tarball prefix for {target_repo_id}: {tarball_prefix_arg}")
+ log(f"Using artefact prefix for {target_repo_id}: {artefact_prefix_arg}")
else:
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
- f"failed (incorrect tarball prefix spec: {tarball_prefix_arg})")
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
+ f"failed (incorrect artefact prefix spec: {artefact_prefix_arg})")
return
# run 'eessi-upload-to-staging {abs_path}'
@@ -340,52 +339,53 @@ def upload_tarball(job_dir, build_target, timestamp, repo_name, pr_number, pr_co
# bucket_name = 'eessi-staging'
# if endpoint_url not set use EESSI S3 bucket
# (2) run command
- cmd_args = [tarball_upload_script, ]
+ cmd_args = [artefact_upload_script, ]
+ if len(artefact_prefix_arg) > 0:
+ cmd_args.extend(['--artefact-prefix', artefact_prefix_arg])
if len(bucket_name) > 0:
cmd_args.extend(['--bucket-name', bucket_name])
if len(endpoint_url) > 0:
cmd_args.extend(['--endpoint-url', endpoint_url])
if len(metadata_prefix_arg) > 0:
cmd_args.extend(['--metadata-prefix', metadata_prefix_arg])
- cmd_args.extend(['--repository', repo_name])
- cmd_args.extend(['--pull-request-number', str(pr_number)])
cmd_args.extend(['--pr-comment-id', str(pr_comment_id)])
- if len(tarball_prefix_arg) > 0:
- cmd_args.extend(['--tarball-prefix', tarball_prefix_arg])
+ cmd_args.extend(['--pull-request-number', str(pr_number)])
+ cmd_args.extend(['--repository', repo_name])
cmd_args.append(abs_path)
upload_cmd = ' '.join(cmd_args)
# run_cmd does all the logging we might need
- out, err, ec = run_cmd(upload_cmd, 'Upload tarball to S3 bucket', raise_on_error=False)
+ out, err, ec = run_cmd(upload_cmd, 'Upload artefact to S3 bucket', raise_on_error=False)
if ec == 0:
# add file to 'job_dir/../uploaded.txt'
- append_tarball_to_upload_log(tarball, job_dir)
+ append_artefact_to_upload_log(artefact, job_dir)
# update pull request comment
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "uploaded",
"succeeded")
else:
# update pull request comment
- update_pr_comment(tarball, repo_name, pr_number, pr_comment_id, "not uploaded",
+ update_pr_comment(artefact, repo_name, pr_number, pr_comment_id, "not uploaded",
"failed")
-def uploaded_before(build_target, job_dir):
+def uploaded_before(payload, job_dir):
"""
- Determines if a tarball for a job has been uploaded before. Function
+ Determines if an artefact for a job has been uploaded before. Function
scans the log file named 'job_dir/../uploaded.txt' for the string
- '.*build_target-.*.tar.gz'.
+ '.*{payload}-.*.tar.gz'.
Args:
- build_target (string): eessi-VERSION-COMPONENT-OS-ARCH
+ payload (string): can be any name describing the payload, e.g., for
+ EESSI it could have the format eessi-VERSION-COMPONENT-OS-ARCH
job_dir (string): working directory of the job
Returns:
- (string): name of the first tarball found if any or None.
+ (string): name of the first artefact found if any or None.
"""
funcname = sys._getframe().f_code.co_name
- log(f"{funcname}(): any previous uploads for {build_target}?")
+ log(f"{funcname}(): any previous uploads for {payload}?")
pr_base_dir = os.path.dirname(job_dir)
uploaded_txt = os.path.join(pr_base_dir, "uploaded.txt")
@@ -393,13 +393,13 @@ def uploaded_before(build_target, job_dir):
if os.path.exists(uploaded_txt):
log(f"{funcname}(): upload log '{uploaded_txt}' exists")
- re_string = f".*{build_target}-.*.tar.gz.*"
- re_build_target = re.compile(re_string)
+ re_string = f".*{payload}-.*.tar.gz.*"
+ re_payload = re.compile(re_string)
with open(uploaded_txt, "r") as uploaded_log:
log(f"{funcname}(): scan log for pattern '{re_string}'")
for line in uploaded_log:
- if re_build_target.match(line):
+ if re_payload.match(line):
log(f"{funcname}(): found earlier upload {line.strip()}")
return line.strip()
else:
@@ -424,37 +424,34 @@ def determine_successful_jobs(job_dirs):
successes = []
for job_dir in job_dirs:
- slurm_out = determine_slurm_out(job_dir)
- eessi_tarballs = determine_eessi_tarballs(job_dir)
+ artefacts = determine_artefacts(job_dir)
pr_comment_id = determine_pr_comment_id(job_dir)
- if check_build_status(slurm_out, eessi_tarballs):
- log(f"{funcname}(): SUCCESSFUL build in '{job_dir}'")
+ if check_job_status(job_dir):
+ log(f"{funcname}(): SUCCESSFUL job in '{job_dir}'")
successes.append({'job_dir': job_dir,
- 'slurm_out': slurm_out,
'pr_comment_id': pr_comment_id,
- 'eessi_tarballs': eessi_tarballs})
+ 'artefacts': artefacts})
else:
- log(f"{funcname}(): FAILED build in '{job_dir}'")
+ log(f"{funcname}(): FAILED job in '{job_dir}'")
return successes
-def determine_tarballs_to_deploy(successes, upload_policy):
+def determine_artefacts_to_deploy(successes, upload_policy):
"""
- Determine tarballs to deploy depending on upload policy
+ Determine artefacts to deploy depending on upload policy
Args:
successes (list): list of dictionaries
- {'job_dir':job_dir, 'slurm_out':slurm_out, 'eessi_tarballs':eessi_tarballs}
+ {'job_dir':job_dir, 'pr_comment_id':pr_comment_id, 'artefacts':artefacts}
upload_policy (string): one of 'all', 'latest' or 'once'
'all': deploy all
- 'latest': deploy only the last for each build target
- 'once': deploy only latest if none for this build target has
+ 'latest': deploy only the last for each payload
+ 'once': deploy only latest if none for this payload has
been deployed before
Returns:
- (dictionary): dictionary of dictionaries representing built tarballs to
- be deployed
+ (dictionary): dictionary of dictionaries representing artefacts to be deployed
"""
funcname = sys._getframe().f_code.co_name
@@ -462,52 +459,51 @@ def determine_tarballs_to_deploy(successes, upload_policy):
to_be_deployed = {}
for job in successes:
- # all tarballs for successful job
- tarballs = job["eessi_tarballs"]
- log(f"{funcname}(): num tarballs {len(tarballs)}")
+ # all artefacts for successful job
+ artefacts = job["artefacts"]
+ log(f"{funcname}(): num artefacts {len(artefacts)}")
- # full path to first tarball for successful job
- # Note, only one tarball per job is expected.
- tb0 = tarballs[0]
- log(f"{funcname}(): path to 1st tarball: '{tb0}'")
+ # full path to first artefact for successful job
+ # Note, only one artefact per job is expected.
+ artefact = artefacts[0]
+ log(f"{funcname}(): path to 1st artefact: '{artefact}'")
- # name of tarball file only
- tb0_base = os.path.basename(tb0)
- log(f"{funcname}(): tarball filename: '{tb0_base}'")
+ # name of artefact file only
+ artefact_base = os.path.basename(artefact)
+ log(f"{funcname}(): artefact filename: '{artefact_base}'")
- # tarball name format: eessi-VERSION-COMPONENT-OS-ARCH-TIMESTAMP.tar.gz
- # remove "-TIMESTAMP.tar.gz"
- # build_target format: eessi-VERSION-COMPONENT-OS-ARCH
- build_target = "-".join(tb0_base.split("-")[:-1])
- log(f"{funcname}(): tarball build target '{build_target}'")
+ # artefact name format: PAYLOAD-TIMESTAMP.tar.gz
+ # remove "-TIMESTAMP.tar.gz" (last element when splitting along '-')
+ payload = "-".join(artefact_base.split("-")[:-1])
+ log(f"{funcname}(): artefact payload '{payload}'")
# timestamp in the filename
- timestamp = int(tb0_base.split("-")[-1][:-7])
- log(f"{funcname}(): tarball timestamp {timestamp}")
+ timestamp = int(artefact_base.split("-")[-1][:-7])
+ log(f"{funcname}(): artefact timestamp {timestamp}")
deploy = False
if upload_policy == "all":
deploy = True
elif upload_policy == "latest":
- if build_target in to_be_deployed:
- if to_be_deployed[build_target]["timestamp"] < timestamp:
+ if payload in to_be_deployed:
+ if to_be_deployed[payload]["timestamp"] < timestamp:
# current one will be replaced
deploy = True
else:
deploy = True
elif upload_policy == "once":
- uploaded = uploaded_before(build_target, job["job_dir"])
+ uploaded = uploaded_before(payload, job["job_dir"])
if uploaded is None:
deploy = True
else:
indent_fname = f"{' '*len(funcname + '(): ')}"
- log(f"{funcname}(): tarball for build target '{build_target}'\n"
+ log(f"{funcname}(): artefact for payload '{payload}'\n"
f"{indent_fname}has been uploaded through '{uploaded}'")
if deploy:
- to_be_deployed[build_target] = {"job_dir": job["job_dir"],
- "pr_comment_id": job["pr_comment_id"],
- "timestamp": timestamp}
+ to_be_deployed[payload] = {"job_dir": job["job_dir"],
+ "pr_comment_id": job["pr_comment_id"],
+ "timestamp": timestamp}
return to_be_deployed
@@ -528,8 +524,8 @@ def deploy_built_artefacts(pr, event_info):
log(f"{funcname}(): deploy for PR {pr.number}")
cfg = config.read_config()
- deploy_cfg = cfg[DEPLOYCFG]
- deploy_permission = deploy_cfg.get(DEPLOY_PERMISSION, '')
+ deploy_cfg = cfg[config.SECTION_DEPLOYCFG]
+ deploy_permission = deploy_cfg.get(config.DEPLOYCFG_SETTING_DEPLOY_PERMISSION, '')
log(f"{funcname}(): deploy permission '{deploy_permission}'")
labeler = event_info['raw_request_body']['sender']['login']
@@ -538,7 +534,7 @@ def deploy_built_artefacts(pr, event_info):
# permission to trigger the deployment
if labeler not in deploy_permission.split():
log(f"{funcname}(): GH account '{labeler}' is not authorized to deploy")
- no_deploy_permission_comment = deploy_cfg.get(NO_DEPLOY_PERMISSION_COMMENT)
+ no_deploy_permission_comment = deploy_cfg.get(config.DEPLOYCFG_SETTING_NO_DEPLOY_PERMISSION_COMMENT)
repo_name = event_info["raw_request_body"]["repository"]["full_name"]
pr_comments.create_comment(repo_name,
pr.number,
@@ -548,7 +544,7 @@ def deploy_built_artefacts(pr, event_info):
log(f"{funcname}(): GH account '{labeler}' is authorized to deploy")
# get upload policy from config
- upload_policy = deploy_cfg.get(UPLOAD_POLICY)
+ upload_policy = deploy_cfg.get(config.DEPLOYCFG_SETTING_UPLOAD_POLICY)
log(f"{funcname}(): upload policy '{upload_policy}'")
if upload_policy == "none":
@@ -568,14 +564,13 @@ def deploy_built_artefacts(pr, event_info):
# 3) for the successful ones, determine which to deploy depending on
# the upload policy
- to_be_deployed = determine_tarballs_to_deploy(successes, upload_policy)
+ to_be_deployed = determine_artefacts_to_deploy(successes, upload_policy)
# 4) call function to deploy a single artefact per software subdir
- # - update PR comments (look for comments with build-ts.tar.gz)
repo_name = pr.base.repo.full_name
- for target, job in to_be_deployed.items():
+ for payload, job in to_be_deployed.items():
job_dir = job['job_dir']
timestamp = job['timestamp']
pr_comment_id = job['pr_comment_id']
- upload_tarball(job_dir, target, timestamp, repo_name, pr.number, pr_comment_id)
+ upload_artefact(job_dir, payload, timestamp, repo_name, pr.number, pr_comment_id)
diff --git a/tests/test_app.cfg b/tests/test_app.cfg
index 5ea87b6e..f940c1df 100644
--- a/tests/test_app.cfg
+++ b/tests/test_app.cfg
@@ -1,3 +1,14 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
+
# sample config file for tests (some functions run config.read_config()
# which reads app.cfg by default)
[job_manager]
@@ -14,11 +25,3 @@ awaits_lauch = job awaits launch by Slurm scheduler
running_job = job `{job_id}` is running
[finished_job_comments]
-success = :grin: SUCCESS tarball `{tarball_name}` ({tarball_size} GiB) in job dir
-failure = :cry: FAILURE
-no_slurm_out = No slurm output `{slurm_out}` in job dir
-slurm_out = Found slurm output `{slurm_out}` in job dir
-missing_modules = Slurm output lacks message "No missing modules!".
-no_tarball_message = Slurm output lacks message about created tarball.
-no_matching_tarball = No tarball matching `{tarball_pattern}` found in job dir.
-multiple_tarballs = Found {num_tarballs} tarballs in job dir - only 1 matching `{tarball_pattern}` expected.
diff --git a/tests/test_eessi_bot_job_manager.py b/tests/test_eessi_bot_job_manager.py
index bd492919..5c5a9c05 100644
--- a/tests/test_eessi_bot_job_manager.py
+++ b/tests/test_eessi_bot_job_manager.py
@@ -1,4 +1,4 @@
-# Tests for 'job managaer' task of the EESSI build-and-deploy bot,
+# Tests for 'job manager' task of the EESSI build-and-deploy bot,
# see https://github.com/EESSI/eessi-bot-software-layer
#
# The bot helps with requests to add software installations to the
@@ -7,6 +7,7 @@
# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Jonas Qvigstad (@jonas-lq)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
diff --git a/tests/test_task_build.py b/tests/test_task_build.py
index ea0a6692..6f79fc74 100644
--- a/tests/test_task_build.py
+++ b/tests/test_task_build.py
@@ -4,9 +4,11 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
+# author: Bob Droege (@bedroge)
# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Jacob Ziemke (@jacobz137)
+# author: Pedro Santos Neves (@Neves-P)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
diff --git a/tests/test_tools_job_metadata.py b/tests/test_tools_job_metadata.py
index f5542c6f..0d788248 100644
--- a/tests/test_tools_job_metadata.py
+++ b/tests/test_tools_job_metadata.py
@@ -11,21 +11,21 @@
import os
-from tools.job_metadata import read_job_metadata_from_file
+from tools.job_metadata import get_section_from_file, JOB_PR_SECTION
-def test_read_job_metadata_from_file(tmpdir):
- logfile = os.path.join(tmpdir, 'test_read_job_metadata_from_file.log')
+def test_get_section_from_file(tmpdir):
+ logfile = os.path.join(tmpdir, 'test_get_section_from_file.log')
# if metadata file does not exist, we should get None as return value
path = os.path.join(tmpdir, 'test.metadata')
- assert read_job_metadata_from_file(path, logfile) is None
+ assert get_section_from_file(path, JOB_PR_SECTION, logfile) is None
with open(path, 'w') as fp:
fp.write('''[PR]
repo=test
pr_number=12345''')
- metadata_pr = read_job_metadata_from_file(path, logfile)
+ metadata_pr = get_section_from_file(path, JOB_PR_SECTION, logfile)
expected = {
"repo": "test",
"pr_number": "12345",
diff --git a/tests/test_tools_pr_comments.py b/tests/test_tools_pr_comments.py
index a20293d0..f89b3fd8 100644
--- a/tests/test_tools_pr_comments.py
+++ b/tests/test_tools_pr_comments.py
@@ -4,8 +4,8 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Thomas Roeblitz (@trz42)
# author: Kenneth Hoste (@boegel)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
diff --git a/tools/__init__.py b/tools/__init__.py
index e064db6e..640cae17 100644
--- a/tools/__init__.py
+++ b/tools/__init__.py
@@ -4,8 +4,8 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
# author: Jacob Ziemke (@jacobz137)
# author: Thomas Roeblitz (@trz42)
diff --git a/tools/args.py b/tools/args.py
index a7ca01b2..27b62ab5 100644
--- a/tools/args.py
+++ b/tools/args.py
@@ -5,6 +5,7 @@
# EESSI software layer, see https://github.com/EESSI/software-layer
#
# author: Bob Droege (@bedroge)
+# author: Hafsa Naeem (@hafsa-naeem)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
diff --git a/tools/config.py b/tools/config.py
index ae9b5a43..dcffe03d 100644
--- a/tools/config.py
+++ b/tools/config.py
@@ -5,6 +5,11 @@
# EESSI software layer, see https://github.com/EESSI/software-layer
#
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
+# author: Hafsa Naeem (@hafsa-naeem)
+# author: Jacob Ziemke (@jacobz137)
+# author: Jonas Qvigstad (@jonas-lq)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
@@ -19,6 +24,87 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from .logging import error
+# define configration constants
+# SECTION_sectionname for any section name in app.cfg
+# sectionname_SETTING_settingname for any setting with name settingname in
+# section sectionname
+SECTION_ARCHITECTURETARGETS = 'architecturetargets'
+ARCHITECTURETARGETS_SETTING_ARCH_TARGET_MAP = 'arch_target_map'
+
+SECTION_BOT_CONTROL = 'bot_control'
+BOT_CONTROL_SETTING_COMMAND_PERMISSION = 'command_permission'
+BOT_CONTROL_SETTING_COMMAND_RESPONSE_FMT = 'command_response_fmt'
+
+SECTION_BUILDENV = 'buildenv'
+BUILDENV_SETTING_BUILD_JOB_SCRIPT = 'build_job_script'
+BUILDENV_SETTING_BUILD_LOGS_DIR = 'build_logs_dir'
+BUILDENV_SETTING_BUILD_PERMISSION = 'build_permission'
+BUILDENV_SETTING_CONTAINER_CACHEDIR = 'container_cachedir'
+BUILDENV_SETTING_CVMFS_CUSTOMIZATIONS = 'cvmfs_customizations'
+BUILDENV_SETTING_HTTPS_PROXY = 'https_proxy'
+BUILDENV_SETTING_HTTP_PROXY = 'http_proxy'
+BUILDENV_SETTING_JOBS_BASE_DIR = 'jobs_base_dir'
+BUILDENV_SETTING_LOAD_MODULES = 'load_modules'
+BUILDENV_SETTING_LOCAL_TMP = 'local_tmp'
+BUILDENV_SETTING_NO_BUILD_PERMISSION_COMMENT = 'no_build_permission_comment'
+BUILDENV_SETTING_SHARED_FS_PATH = 'shared_fs_path'
+BUILDENV_SETTING_SLURM_PARAMS = 'slurm_params'
+BUILDENV_SETTING_SUBMIT_COMMAND = 'submit_command'
+
+SECTION_DEPLOYCFG = 'deploycfg'
+DEPLOYCFG_SETTING_ARTEFACT_PREFIX = 'artefact_prefix'
+DEPLOYCFG_SETTING_ARTEFACT_UPLOAD_SCRIPT = 'artefact_upload_script'
+DEPLOYCFG_SETTING_BUCKET_NAME = 'bucket_name'
+DEPLOYCFG_SETTING_DEPLOY_PERMISSION = 'deploy_permission'
+DEPLOYCFG_SETTING_ENDPOINT_URL = 'endpoint_url'
+DEPLOYCFG_SETTING_METADATA_PREFIX = 'metadata_prefix'
+DEPLOYCFG_SETTING_NO_DEPLOY_PERMISSION_COMMENT = 'no_deploy_permission_comment'
+DEPLOYCFG_SETTING_UPLOAD_POLICY = 'upload_policy'
+
+SECTION_DOWNLOAD_PR_COMMENTS = 'download_pr_comments'
+DOWNLOAD_PR_COMMENTS_SETTING_CURL_FAILURE = 'curl_failure'
+DOWNLOAD_PR_COMMENTS_SETTING_CURL_TIP = 'curl_tip'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_FAILURE = 'git_apply_failure'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_APPLY_TIP = 'git_apply_tip'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_FAILURE = 'git_checkout_failure'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_CHECKOUT_TIP = 'git_checkout_tip'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_FAILURE = 'git_clone_failure'
+DOWNLOAD_PR_COMMENTS_SETTING_GIT_CLONE_TIP = 'git_clone_tip'
+
+SECTION_EVENT_HANDLER = 'event_handler'
+EVENT_HANDLER_SETTING_LOG_PATH = 'log_path'
+
+SECTION_FINISHED_JOB_COMMENTS = 'finished_job_comments'
+FINISHED_JOB_COMMENTS_SETTING_JOB_RESULT_UNKNOWN_FMT = 'job_result_unknown_fmt'
+FINISHED_JOB_COMMENTS_SETTING_JOB_TEST_UNKNOWN_FMT = 'job_test_unknown_fmt'
+
+SECTION_GITHUB = 'github'
+GITHUB_SETTING_APP_ID = 'app_id'
+GITHUB_SETTING_APP_NAME = 'app_name'
+GITHUB_SETTING_INSTALLATION_ID = 'installation_id'
+GITHUB_SETTING_PRIVATE_KEY = 'private_key'
+
+SECTION_JOB_MANAGER = 'job_manager'
+JOB_MANAGER_SETTING_LOG_PATH = 'log_path'
+JOB_MANAGER_SETTING_JOB_IDS_DIR = 'job_ids_dir'
+JOB_MANAGER_SETTING_POLL_COMMAND = 'poll_command'
+JOB_MANAGER_SETTING_POLL_INTERVAL = 'poll_interval'
+JOB_MANAGER_SETTING_SCONTROL_COMMAND = 'scontrol_command'
+
+SECTION_NEW_JOB_COMMENTS = 'new_job_comments'
+NEW_JOB_COMMENTS_SETTING_AWAITS_LAUNCH = 'awaits_launch'
+
+SECTION_REPO_TARGETS = 'repo_targets'
+REPO_TARGETS_SETTING_REPO_TARGET_MAP = 'repo_target_map'
+REPO_TARGETS_SETTING_REPOS_CFG_DIR = 'repos_cfg_dir'
+
+SECTION_RUNNING_JOB_COMMENTS = 'running_job_comments'
+RUNNING_JOB_COMMENTS_SETTING_RUNNING_JOB = 'running_job'
+
+SECTION_SUBMITTED_JOB_COMMENTS = 'submitted_job_comments'
+SUBMITTED_JOB_COMMENTS_SETTING_INITIAL_COMMENT = 'initial_comment'
+SUBMITTED_JOB_COMMENTS_SETTING_AWAITS_RELEASE = 'awaits_release'
+
def read_config(path='app.cfg'):
"""
@@ -65,3 +151,4 @@ def check_required_cfg_settings(req_settings, path="app.cfg"):
for item in req_settings[section]:
if item not in cfg[section]:
error(f'Missing configuration item "{item}" in section "{section}" of configuration file {path}.')
+ return True
diff --git a/tools/cvmfs_repository.py b/tools/cvmfs_repository.py
new file mode 100644
index 00000000..a0bec847
--- /dev/null
+++ b/tools/cvmfs_repository.py
@@ -0,0 +1,37 @@
+# This file is part of the EESSI build-and-deploy bot,
+# see https://github.com/EESSI/eessi-bot-software-layer
+#
+# The bot helps with requests to add software installations to the
+# EESSI software layer, see https://github.com/EESSI/software-layer
+#
+# author: Thomas Roeblitz (@trz42)
+#
+# license: GPLv2
+#
+
+# Standard library imports
+# (none yet)
+
+# Third party imports (anything installed into the local Python environment)
+# (none yet)
+
+# Local application imports (anything from EESSI/eessi-bot-software-layer)
+# (none yet)
+
+
+# Constants for settings in JOB_WORKING_DIRECTORY/cfg/repos.cfg
+#
+# Access to a CernVM-FS repository is defined via a repos.cfg file and associated
+# tarballs containing configuration settings per repository.
+#
+# Below, we define constants for the settings of each repository.
+#
+# Note, we do not define a constant for the section name, because for every
+# repository we will use a different section name. For example, '[eessi-2023.06]'
+# would define a section with name 'eessi-2023.06'.
+#
+REPOS_CFG_CONFIG_BUNDLE = "config_bundle"
+REPOS_CFG_CONFIG_MAP = "config_map"
+REPOS_CFG_CONTAINER = "container"
+REPOS_CFG_REPO_NAME = "repo_name"
+REPOS_CFG_REPO_VERSION = "repo_version"
diff --git a/tools/job_metadata.py b/tools/job_metadata.py
index b8cd4f0d..286f71cb 100644
--- a/tools/job_metadata.py
+++ b/tools/job_metadata.py
@@ -21,6 +21,59 @@
# (none yet)
+# the job's working directory (JWD) and subdirectories may contain various
+# files storing metadata for a job
+# below, we define constants for sections and 'settings' in these files
+#
+# job config directory name and filename
+JOB_CFG_DIRECTORY_NAME = "cfg"
+JOB_CFG_FILENAME = "job.cfg"
+
+# JWD/cfg/$JOB_CFG_FILENAME
+JOB_CFG_ARCHITECTURE_SECTION = "architecture"
+JOB_CFG_ARCHITECTURE_OS_TYPE = "os_type"
+JOB_CFG_ARCHITECTURE_SOFTWARE_SUBDIR = "software_subdir"
+
+JOB_CFG_REPOSITORY_SECTION = "repository"
+JOB_CFG_REPOSITORY_CONTAINER = "container"
+JOB_CFG_REPOSITORY_REPOS_CFG_DIR = "repos_cfg_dir"
+JOB_CFG_REPOSITORY_REPO_ID = "repo_id"
+JOB_CFG_REPOSITORY_REPO_NAME = "repo_name"
+JOB_CFG_REPOSITORY_REPO_VERSION = "repo_version"
+
+JOB_CFG_SITE_CONFIG_SECTION = "site_config"
+JOB_CFG_SITE_CONFIG_BUILD_LOGS_DIR = "build_logs_dir"
+JOB_CFG_SITE_CONFIG_CONTAINER_CACHEDIR = "container_cachedir"
+JOB_CFG_SITE_CONFIG_HTTP_PROXY = "http_proxy"
+JOB_CFG_SITE_CONFIG_HTTPS_PROXY = "https_proxy"
+JOB_CFG_SITE_CONFIG_LOAD_MODULES = "load_modules"
+JOB_CFG_SITE_CONFIG_LOCAL_TMP = "local_tmp"
+JOB_CFG_SITE_CONFIG_SHARED_FS_PATH = "shared_fs_path"
+
+# JWD/_bot_jobJOBID.metadata
+JOB_PR_SECTION = "PR"
+JOB_PR_REPO = "repo"
+JOB_PR_PR_NUMBER = "pr_number"
+JOB_PR_PR_COMMENT_ID = "pr_comment_id"
+
+# JWD/_bot_jobJOBID.result
+JOB_RESULT_SECTION = "RESULT"
+# constants representing settings
+JOB_RESULT_ARTEFACTS = "artefacts"
+JOB_RESULT_COMMENT_DESCRIPTION = "comment_description"
+JOB_RESULT_STATUS = "status"
+# constants representing values for JOB_RESULT_STATUS (the values of these
+# constants need to correspond to what the `bot/check-build.sh` script uses when
+# writing the _bot_jobJOBID.result file)
+JOB_RESULT_FAILURE = "FAILURE"
+JOB_RESULT_SUCCESS = "SUCCESS"
+
+# JWD/_bot_jobJOBID.test
+JOB_TEST_SECTION = "TEST"
+JOB_TEST_COMMENT_DESCRIPTION = "comment_description"
+JOB_TEST_STATUS = "status"
+
+
def create_metadata_file(job, job_id, pr_comment):
"""
Create job metadata file in job working directory
@@ -41,15 +94,63 @@ def create_metadata_file(job, job_id, pr_comment):
# create _bot_job.metadata file in the job's working directory
bot_jobfile = configparser.ConfigParser()
- bot_jobfile['PR'] = {'repo': repo_name,
- 'pr_number': pr_number,
- 'pr_comment_id': pr_comment_id}
+ bot_jobfile[JOB_PR_SECTION] = {'repo': repo_name,
+ 'pr_number': pr_number,
+ 'pr_comment_id': pr_comment_id}
bot_jobfile_path = os.path.join(job.working_dir, f'_bot_job{job_id}.metadata')
with open(bot_jobfile_path, 'w') as bjf:
bot_jobfile.write(bjf)
log(f"{fn}(): created job metadata file {bot_jobfile_path}")
+def determine_job_id_from_job_directory(job_directory, log_file=None):
+ """
+ Determine job id from a job directory.
+
+ Args:
+ job_directory (string): path to job directory
+ log_file (string): path to log file
+
+ Returns:
+ (int): job id or 0
+ """
+ # job id could be found in
+ # - current directory name
+ # - part of a 'slurm-JOB_ID.out' file name
+ # - part of a '_bot_jobJOB_ID.metadata' file
+ # For now we just use the first alternative.
+ job_dir_basename = os.path.basename(job_directory)
+ from_dir_job_id = 0
+ if job_dir_basename.replace('.', '', 1).isdigit():
+ from_dir_job_id = int(job_dir_basename)
+ return from_dir_job_id
+
+
+def get_section_from_file(filepath, section, log_file=None):
+ """
+ Read filepath (ini/cfg format) and return contents of a section.
+
+ Args:
+ filepath (string): path to a metadata file
+ section (string): name of the section to obtain contents for
+ log_file (string): path to log file
+
+ Returns:
+ (ConfigParser): instance of ConfigParser corresponding to the section or None
+ """
+ # reuse function from module tools.job_metadata to read metadata file
+ section_contents = None
+ metadata = read_metadata_file(filepath, log_file=log_file)
+ if metadata:
+ # get section
+ if section in metadata:
+ section_contents = metadata[section]
+ else:
+ section_contents = {}
+
+ return section_contents
+
+
def read_metadata_file(metadata_path, log_file=None):
"""
Read metadata file into ConfigParser instance
@@ -80,28 +181,3 @@ def read_metadata_file(metadata_path, log_file=None):
else:
log(f"No metadata file found at {metadata_path}.", log_file)
return None
-
-
-def read_job_metadata_from_file(filepath, log_file=None):
- """
- Read job metadata from file
-
- Args:
- filepath (string): path to job metadata file
- log_file (string): path to log file
-
- Returns:
- job_metadata (dict): dictionary containing job metadata or None
- """
-
- metadata = read_metadata_file(filepath, log_file=log_file)
- if metadata:
- # get PR section
- if "PR" in metadata:
- metadata_pr = metadata["PR"]
- else:
- metadata_pr = {}
- return metadata_pr
- else:
- log(f"Metadata file '{filepath}' does not exist or could not be read")
- return None
diff --git a/tools/logging.py b/tools/logging.py
index 76e2c066..f5a46c3d 100644
--- a/tools/logging.py
+++ b/tools/logging.py
@@ -5,6 +5,7 @@
# EESSI software layer, see https://github.com/EESSI/software-layer
#
# author: Bob Droege (@bedroge)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
diff --git a/tools/permissions.py b/tools/permissions.py
index 59030b05..1b7f40df 100644
--- a/tools/permissions.py
+++ b/tools/permissions.py
@@ -18,9 +18,6 @@
# Local application imports (anything from EESSI/eessi-bot-software-layer)
from tools import config
-BOT_CONTROL = "bot_control"
-COMMAND_PERMISSION = "command_permission"
-
def check_command_permission(account):
"""
@@ -38,10 +35,10 @@ def check_command_permission(account):
cfg = config.read_config()
- bot_ctrl = cfg[BOT_CONTROL]
+ bot_ctrl = cfg[config.SECTION_BOT_CONTROL]
# read command permission from configuration (defined in file app.cfg)
- command_permission = bot_ctrl.get(COMMAND_PERMISSION, '')
+ command_permission = bot_ctrl.get(config.BOT_CONTROL_SETTING_COMMAND_PERMISSION, '')
log(f"{fn}(): command permission '{command_permission}'")
diff --git a/tools/pr_comments.py b/tools/pr_comments.py
index 1b391ed7..f74bbbf2 100644
--- a/tools/pr_comments.py
+++ b/tools/pr_comments.py
@@ -4,11 +4,11 @@
# The bot helps with requests to add software installations to the
# EESSI software layer, see https://github.com/EESSI/software-layer
#
-# author: Kenneth Hoste (@boegel)
# author: Bob Droege (@bedroge)
+# author: Kenneth Hoste (@boegel)
# author: Hafsa Naeem (@hafsa-naeem)
-# author: Thomas Roeblitz (@trz42)
# author: Jonas Qvigstad (@jonas-lq)
+# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#