From 0159bc7a82a24090aa5df650d52509b95364a894 Mon Sep 17 00:00:00 2001 From: Yang Chiu Date: Tue, 26 Sep 2023 11:50:10 +0800 Subject: [PATCH] test: implement backupstore setup (1) add backupstore.py for robot test cases (2) only support s3 now, the subprocess parts in backupstore.py need to be refined to make nfs work (3) fix wrong longorn client url issue when using it out-of-cluster Signed-off-by: Yang Chiu --- e2e/keywords/common.resource | 2 + e2e/libs/backupstore.py | 736 +++++++++++++++++++ e2e/libs/keywords/common_keywords.py | 8 + e2e/libs/longhorn.py | 2 + e2e/libs/recurring_job/rest.py | 55 +- e2e/requirements.txt | 1 + pipelines/utilities/run_longhorn_e2e_test.sh | 8 + 7 files changed, 784 insertions(+), 28 deletions(-) create mode 100644 e2e/libs/backupstore.py diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index 078991263a..e0d93dce4f 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -21,6 +21,7 @@ Set test environment Set Test Variable ${deployment_list} @{statefulset_list} = Create List Set Test Variable ${statefulset_list} + set_backupstore Cleanup test resources cleanup_node_exec @@ -29,3 +30,4 @@ Cleanup test resources cleanup_deployments ${deployment_list} cleanup_statefulsets ${statefulset_list} cleanup_storageclasses + cleanup_backupstore diff --git a/e2e/libs/backupstore.py b/e2e/libs/backupstore.py new file mode 100644 index 0000000000..6ce1b32983 --- /dev/null +++ b/e2e/libs/backupstore.py @@ -0,0 +1,736 @@ +import os +import time +import base64 +import hashlib +import json +import subprocess +from minio import Minio +from minio.error import ResponseError +from urllib.parse import urlparse + +LONGHORN_NAMESPACE = "longhorn-system" + +SETTING_BACKUP_TARGET = "backup-target" +SETTING_BACKUP_TARGET_CREDENTIAL_SECRET = "backup-target-credential-secret" +SETTING_BACKUPSTORE_POLL_INTERVAL = "backupstore-poll-interval" + +BACKUPSTORE_BV_PREFIX = "/backupstore/volumes/" +BACKUPSTORE_LOCK_DURATION = 150 + +TEMP_FILE_PATH = "/tmp/temp_file" + +RETRY_COUNT = 300 +RETRY_INTERVAL = 1 + + +def is_backupTarget_s3(s): + return s.startswith("s3://") + + +def is_backupTarget_nfs(s): + return s.startswith("nfs://") + + +def get_backupstore_url(): + backupstore = os.environ['LONGHORN_BACKUPSTORES'] + backupstore = backupstore.replace(" ", "") + backupstores = backupstore.split(",") + + assert len(backupstores) != 0 + return backupstores + + +def get_backupstore_poll_interval(): + poll_interval = os.environ['LONGHORN_BACKUPSTORE_POLL_INTERVAL'] + assert len(poll_interval) != 0 + return poll_interval + + +def get_backupstores(): + # The try is added to avoid the pdoc3 error while publishing this on + # https://longhorn.github.io/longhorn-tests + try: + backupstore = os.environ['LONGHORN_BACKUPSTORES'] + except KeyError: + return [] + + try: + backupstore = backupstore.replace(" ", "") + backupstores = backupstore.split(",") + for i in range(len(backupstores)): + backupstores[i] = backupstores[i].split(":")[0] + except ValueError: + backupstores = backupstore.split(":")[0] + return backupstores + + +def set_backupstore(client): + backupstores = get_backupstores() + if backupstores[0] == "s3": + set_backupstore_s3(client) + else: + set_backupstore_nfs(client) + + +def reset_backupstore_setting(client): + backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) + client.update(backup_target_setting, value="") + backup_target_credential_setting = client.by_id_setting( + SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) + client.update(backup_target_credential_setting, value="") + backup_store_poll_interval = client.by_id_setting( + SETTING_BACKUPSTORE_POLL_INTERVAL) + client.update(backup_store_poll_interval, value="300") + + +def set_backupstore_s3(client): + backupstores = get_backupstore_url() + poll_interval = get_backupstore_poll_interval() + for backupstore in backupstores: + if is_backupTarget_s3(backupstore): + backupsettings = backupstore.split("$") + set_backupstore_url(client, backupsettings[0]) + set_backupstore_credential_secret(client, backupsettings[1]) + set_backupstore_poll_interval(client, poll_interval) + break + + +def set_backupstore_nfs(client): + backupstores = get_backupstore_url() + poll_interval = get_backupstore_poll_interval() + for backupstore in backupstores: + if is_backupTarget_nfs(backupstore): + set_backupstore_url(client, backupstore) + set_backupstore_credential_secret(client, "") + set_backupstore_poll_interval(client, poll_interval) + mount_nfs_backupstore(client) + break + + +def set_backupstore_url(client, url): + backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) + backup_target_setting = client.update(backup_target_setting, + value=url) + assert backup_target_setting.value == url + + +def set_backupstore_credential_secret(client, credential_secret): + backup_target_credential_setting = client.by_id_setting( + SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) + backup_target_credential_setting = client.update( + backup_target_credential_setting, value=credential_secret) + assert backup_target_credential_setting.value == credential_secret + + +def set_backupstore_poll_interval(client, poll_interval): + backup_store_poll_interval_setting = client.by_id_setting( + SETTING_BACKUPSTORE_POLL_INTERVAL) + backup_target_poll_interal_setting = client.update( + backup_store_poll_interval_setting, value=poll_interval) + assert backup_target_poll_interal_setting.value == poll_interval + + +def mount_nfs_backupstore(client, mount_path="/mnt/nfs"): + cmd = ["mkdir", "-p", mount_path] + subprocess.check_output(cmd) + nfs_backuptarget = client.by_id_setting(SETTING_BACKUP_TARGET).value + nfs_url = urlparse(nfs_backuptarget).netloc + \ + urlparse(nfs_backuptarget).path + cmd = ["mount", "-t", "nfs", "-o", "nfsvers=4.2", nfs_url, mount_path] + subprocess.check_output(cmd) + + +def umount_nfs_backupstore(client, mount_path="/mnt/nfs"): + cmd = ["umount", mount_path] + subprocess.check_output(cmd) + cmd = ["rmdir", mount_path] + subprocess.check_output(cmd) + + +def backupstore_cleanup(client): + backup_volumes = client.list_backup_volume() + + # we delete the whole backup volume, which skips block gc + for backup_volume in backup_volumes: + delete_backup_volume(client, backup_volume.name) + + backup_volumes = client.list_backup_volume() + assert backup_volumes.data == [] + + system_backups_cleanup(client) + reset_backupstore_setting(client) + + if get_backupstores()[0] == "nfs": + umount_nfs_backupstore(client) + + +def minio_get_api_client(client, core_api, minio_secret_name): + secret = core_api.read_namespaced_secret(name=minio_secret_name, + namespace=LONGHORN_NAMESPACE) + + base64_minio_access_key = secret.data['AWS_ACCESS_KEY_ID'] + base64_minio_secret_key = secret.data['AWS_SECRET_ACCESS_KEY'] + base64_minio_endpoint_url = secret.data['AWS_ENDPOINTS'] + base64_minio_cert = secret.data['AWS_CERT'] + + minio_access_key = \ + base64.b64decode(base64_minio_access_key).decode("utf-8") + minio_secret_key = \ + base64.b64decode(base64_minio_secret_key).decode("utf-8") + + minio_endpoint_url = \ + base64.b64decode(base64_minio_endpoint_url).decode("utf-8") + minio_endpoint_url = minio_endpoint_url.replace('https://', '') + + minio_cert_file_path = "/tmp/minio_cert.crt" + with open(minio_cert_file_path, 'w') as minio_cert_file: + base64_minio_cert = \ + base64.b64decode(base64_minio_cert).decode("utf-8") + minio_cert_file.write(base64_minio_cert) + + os.environ["SSL_CERT_FILE"] = minio_cert_file_path + + return Minio(minio_endpoint_url, + access_key=minio_access_key, + secret_key=minio_secret_key, + secure=True) + + +def minio_get_backupstore_bucket_name(client): + backupstore = backupstore_get_backup_target(client) + + assert is_backupTarget_s3(backupstore) + bucket_name = urlparse(backupstore).netloc.split('@')[0] + return bucket_name + + +def minio_get_backupstore_path(client): + backupstore = backupstore_get_backup_target(client) + assert is_backupTarget_s3(backupstore) + backupstore_path = urlparse(backupstore).path.split('$')[0].strip("/") + return backupstore_path + + +def get_nfs_mount_point(client): + nfs_backuptarget = client.by_id_setting(SETTING_BACKUP_TARGET).value + nfs_url = urlparse(nfs_backuptarget).netloc + \ + urlparse(nfs_backuptarget).path + + cmd = ["findmnt", "-t", "nfs4", "-n", "--output", "source,target"] + stdout = subprocess.run(cmd, capture_output=True).stdout + mount_info = stdout.decode().strip().split(" ") + + assert mount_info[0] == nfs_url + return mount_info[1] + + +def backup_volume_path(volume_name): + volume_name_sha512 = \ + hashlib.sha512(volume_name.encode('utf-8')).hexdigest() + + volume_dir_level_1 = volume_name_sha512[0:2] + volume_dir_level_2 = volume_name_sha512[2:4] + + backupstore_bv_path = BACKUPSTORE_BV_PREFIX + \ + volume_dir_level_1 + "/" + \ + volume_dir_level_2 + "/" + \ + volume_name + + return backupstore_bv_path + + +def backupstore_get_backup_volume_prefix(client, volume_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + return minio_get_backup_volume_prefix(client, volume_name) + + elif is_backupTarget_nfs(backupstore): + return nfs_get_backup_volume_prefix(client, volume_name) + + else: + raise NotImplementedError + + +def minio_get_backup_volume_prefix(client, volume_name): + backupstore_bv_path = backup_volume_path(volume_name) + backupstore_path = minio_get_backupstore_path(client) + return backupstore_path + backupstore_bv_path + + +def nfs_get_backup_volume_prefix(client, volume_name): + mount_point = get_nfs_mount_point(client) + return mount_point + backup_volume_path(volume_name) + + +def backupstore_get_backup_target(client): + backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) + return backup_target_setting.value + + +def backupstore_get_secret(client): + backup_target_credential_setting = client.by_id_setting( + SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) + + return backup_target_credential_setting.value + + +def backupstore_get_backup_cfg_file_path(client, volume_name, backup_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + return minio_get_backup_cfg_file_path(volume_name, backup_name) + + elif is_backupTarget_nfs(backupstore): + return nfs_get_backup_cfg_file_path(client, volume_name, backup_name) + + else: + raise NotImplementedError + + +def minio_get_backup_cfg_file_path(client, volume_name, backup_name): + prefix = minio_get_backup_volume_prefix(client, volume_name) + return prefix + "/backups/backup_" + backup_name + ".cfg" + + +def nfs_get_backup_cfg_file_path(client, volume_name, backup_name): + prefix = nfs_get_backup_volume_prefix(client, volume_name) + return prefix + "/backups/backup_" + backup_name + ".cfg" + + +def backupstore_get_volume_cfg_file_path(client, volume_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + return minio_get_volume_cfg_file_path(volume_name) + + elif is_backupTarget_nfs(backupstore): + return nfs_get_volume_cfg_file_path(client, volume_name) + + else: + raise NotImplementedError + + +def nfs_get_volume_cfg_file_path(client, volume_name): + prefix = nfs_get_backup_volume_prefix(client, volume_name) + return prefix + "/volume.cfg" + + +def minio_get_volume_cfg_file_path(client, volume_name): + prefix = minio_get_backup_volume_prefix(client, volume_name) + return prefix + "/volume.cfg" + + +def backupstore_get_backup_blocks_dir(client, volume_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + return minio_get_backup_blocks_dir(volume_name) + + elif is_backupTarget_nfs(backupstore): + return nfs_get_backup_blocks_dir(client, volume_name) + + else: + raise NotImplementedError + + +def minio_get_backup_blocks_dir(client, volume_name): + prefix = minio_get_backup_volume_prefix(client, volume_name) + return prefix + "/blocks" + + +def nfs_get_backup_blocks_dir(client, volume_name): + prefix = nfs_get_backup_volume_prefix(client, volume_name) + return prefix + "/blocks" + + +def backupstore_create_file(client, core_api, file_path, data={}): + backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) + backupstore = backup_target_setting.value + + if is_backupTarget_s3(backupstore): + return mino_create_file_in_backupstore(client, + core_api, + file_path, + data) + elif is_backupTarget_nfs(backupstore): + return nfs_create_file_in_backupstore(file_path, data={}) + + else: + raise NotImplementedError + + +def mino_create_file_in_backupstore(client, core_api, file_path, data={}): # NOQA + backup_target_credential_setting = client.by_id_setting( + SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) + + secret_name = backup_target_credential_setting.value + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + + if len(data) == 0: + data = {"testkey": "test data from mino_create_file_in_backupstore()"} + + with open(TEMP_FILE_PATH, 'w') as temp_file: + json.dump(data, temp_file) + + try: + with open(TEMP_FILE_PATH, 'rb') as temp_file: + temp_file_stat = os.stat(TEMP_FILE_PATH) + minio_api.put_object(bucket_name, + file_path, + temp_file, + temp_file_stat.st_size) + except ResponseError as err: + print(err) + + +def nfs_create_file_in_backupstore(file_path, data={}): + with open(file_path, 'w') as cfg_file: + cfg_file.write(str(data)) + +def backupstore_write_backup_cfg_file(client, core_api, volume_name, backup_name, data): # NOQA + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + minio_write_backup_cfg_file(client, + core_api, + volume_name, + backup_name, + data) + + elif is_backupTarget_nfs(backupstore): + nfs_write_backup_cfg_file(client, + volume_name, + backup_name, + data) + + else: + raise NotImplementedError + + +def nfs_write_backup_cfg_file(client, volume_name, backup_name, data): + nfs_backup_cfg_file_path = nfs_get_backup_cfg_file_path(client, + volume_name, + backup_name) + with open(nfs_backup_cfg_file_path, 'w') as cfg_file: + cfg_file.write(str(data)) + + +def minio_write_backup_cfg_file(client, core_api, volume_name, backup_name, backup_cfg_data): # NOQA + secret_name = backupstore_get_secret(client) + assert secret_name != '' + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + minio_backup_cfg_file_path = minio_get_backup_cfg_file_path(volume_name, + backup_name) + + tmp_backup_cfg_file = "/tmp/backup_" + backup_name + ".cfg" + with open(tmp_backup_cfg_file, 'w') as tmp_bkp_cfg_file: + tmp_bkp_cfg_file.write(str(backup_cfg_data)) + + try: + with open(tmp_backup_cfg_file, 'rb') as tmp_bkp_cfg_file: + tmp_bkp_cfg_file_stat = os.stat(tmp_backup_cfg_file) + minio_api.put_object(bucket_name, + minio_backup_cfg_file_path, + tmp_bkp_cfg_file, + tmp_bkp_cfg_file_stat.st_size) + except ResponseError as err: + print(err) + + +def backupstore_delete_file(client, core_api, file_path): + backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) + backupstore = backup_target_setting.value + + if is_backupTarget_s3(backupstore): + return mino_delete_file_in_backupstore(client, + core_api, + file_path) + + elif is_backupTarget_nfs(backupstore): + return nfs_delete_file_in_backupstore(file_path) + + else: + raise NotImplementedError + + +def mino_delete_file_in_backupstore(client, core_api, file_path): + backup_target_credential_setting = client.by_id_setting( + SETTING_BACKUP_TARGET_CREDENTIAL_SECRET) + + secret_name = backup_target_credential_setting.value + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + + try: + minio_api.remove_object(bucket_name, file_path) + except ResponseError as err: + print(err) + + +def nfs_delete_file_in_backupstore(file_path): + try: + os.remove(file_path) + except Exception as ex: + print("error while deleting file:", + file_path) + print(ex) + + +def backupstore_delete_backup_cfg_file(client, core_api, volume_name, backup_name): # NOQA + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + minio_delete_backup_cfg_file(client, + core_api, + volume_name, + backup_name) + + elif is_backupTarget_nfs(backupstore): + nfs_delete_backup_cfg_file(client, volume_name, backup_name) + + else: + raise NotImplementedError + + +def nfs_delete_backup_cfg_file(client, volume_name, backup_name): + nfs_backup_cfg_file_path = nfs_get_backup_cfg_file_path(client, + volume_name, + backup_name) + try: + os.remove(nfs_backup_cfg_file_path) + except Exception as ex: + print("error while deleting backup cfg file:", + nfs_backup_cfg_file_path) + print(ex) + + +def minio_delete_backup_cfg_file(client, core_api, volume_name, backup_name): + secret_name = backupstore_get_secret(client) + assert secret_name != '' + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + minio_backup_cfg_file_path = minio_get_backup_cfg_file_path(volume_name, + backup_name) + + try: + minio_api.remove_object(bucket_name, minio_backup_cfg_file_path) + except ResponseError as err: + print(err) + + +def backupstore_delete_volume_cfg_file(client, core_api, volume_name): # NOQA + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + minio_delete_volume_cfg_file(client, + core_api, + volume_name) + + elif is_backupTarget_nfs(backupstore): + nfs_delete_volume_cfg_file(client, volume_name) + + else: + raise NotImplementedError + + +def nfs_delete_volume_cfg_file(client, volume_name): + nfs_volume_cfg_path = nfs_get_volume_cfg_file_path(client, volume_name) + try: + os.remove(nfs_volume_cfg_path) + except Exception as ex: + print("error while deleting backup cfg file:", nfs_volume_cfg_path) + print(ex) + + +def minio_delete_volume_cfg_file(client, core_api, volume_name): + secret_name = backupstore_get_secret(client) + assert secret_name != '' + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + minio_volume_cfg_file_path = minio_get_volume_cfg_file_path(volume_name) + + try: + minio_api.remove_object(bucket_name, minio_volume_cfg_file_path) + except ResponseError as err: + print(err) + + +def backupstore_create_dummy_in_progress_backup(client, core_api, volume_name): + dummy_backup_cfg_data = {"Name": "dummy_backup", + "VolumeName": volume_name, + "CreatedTime": ""} + + backupstore_write_backup_cfg_file(client, + core_api, + volume_name, + "backup-dummy", + dummy_backup_cfg_data) + + +def backupstore_corrupt_backup_cfg_file(client, core_api, volume_name, backup_name): # NOQA + corrupt_backup_cfg_data = "{corrupt: definitely" + + backupstore_write_backup_cfg_file(client, + core_api, + volume_name, + backup_name, + corrupt_backup_cfg_data) + + +def backupstore_delete_dummy_in_progress_backup(client, core_api, volume_name): + backupstore_delete_backup_cfg_file(client, + core_api, + volume_name, + "backup-dummy") + + +def backupstore_delete_random_backup_block(client, core_api, volume_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + minio_delete_random_backup_block(client, core_api, volume_name) + + elif is_backupTarget_nfs(backupstore): + nfs_delete_random_backup_block(client, volume_name) + + else: + raise NotImplementedError + + +def nfs_delete_random_backup_block(client, volume_name): + backup_blocks_dir = nfs_get_backup_blocks_dir(client, volume_name) + cmd = ["find", backup_blocks_dir, "-type", "f"] + find_cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE) + head_cmd = subprocess.check_output(["head", "-1"], stdin=find_cmd.stdout) + backup_block_file_path = head_cmd.decode().strip() + + try: + os.remove(backup_block_file_path) + except Exception as ex: + print("error while deleting backup block file:", + backup_block_file_path) + print(ex) + + +def minio_delete_random_backup_block(client, core_api, volume_name): + secret_name = backupstore_get_secret(client) + assert secret_name != '' + + minio_api = minio_get_api_client(client, core_api, secret_name) + + bucket_name = minio_get_backupstore_bucket_name(client) + backup_blocks_dir = minio_get_backup_blocks_dir(volume_name) + + block_object_files = minio_api.list_objects(bucket_name, + prefix=backup_blocks_dir, + recursive=True) + + object_file = block_object_files.__next__().object_name + + try: + minio_api.remove_object(bucket_name, object_file) + except ResponseError as err: + print(err) + + +def backupstore_count_backup_block_files(client, core_api, volume_name): + backupstore = backupstore_get_backup_target(client) + + if is_backupTarget_s3(backupstore): + return minio_count_backup_block_files(client, core_api, volume_name) + + elif is_backupTarget_nfs(backupstore): + return nfs_count_backup_block_files(client, volume_name) + + else: + raise NotImplementedError + + +def nfs_count_backup_block_files(client, volume_name): + backup_blocks_dir = nfs_get_backup_blocks_dir(client, volume_name) + cmd = ["find", backup_blocks_dir, "-type", "f"] + find_cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE) + wc_cmd = subprocess.check_output(["wc", "-l"], stdin=find_cmd.stdout) + backup_blocks_count = int(wc_cmd.decode().strip()) + + return backup_blocks_count + + +def minio_count_backup_block_files(client, core_api, volume_name): + secret_name = backupstore_get_secret(client) + assert secret_name != '' + + minio_api = minio_get_api_client(client, core_api, secret_name) + bucket_name = minio_get_backupstore_bucket_name(client) + backup_blocks_dir = minio_get_backup_blocks_dir(volume_name) + + block_object_files = minio_api.list_objects(bucket_name, + prefix=backup_blocks_dir, + recursive=True) + + block_object_files_list = list(block_object_files) + + return len(block_object_files_list) + + +def backupstore_wait_for_lock_expiration(): + """ + waits 150 seconds which is the lock duration + TODO: once we have implemented the delete functions, + we can switch to removing the locks directly + """ + time.sleep(BACKUPSTORE_LOCK_DURATION) + + +def delete_backup_volume(client, volume_name): + bv = client.by_id_backupVolume(volume_name) + client.delete(bv) + wait_for_backup_volume_delete(client, volume_name) + + +def wait_for_backup_volume_delete(client, name): + for _ in range(RETRY_COUNT): + bvs = client.list_backupVolume() + found = False + for bv in bvs: + if bv.name == name: + found = True + break + if not found: + break + time.sleep(RETRY_INTERVAL) + assert not found + + +def system_backups_cleanup(client): + """ + Clean up all system backups + :param client: The Longhorn client to use in the request. + """ + + system_backups = client.list_system_backup() + for system_backup in system_backups: + # ignore the error when clean up + try: + client.delete(system_backup) + except Exception as e: + name = system_backup['name'] + print("\nException when cleanup system backup ", name) + print(e) + + ok = False + for _ in range(RETRY_COUNT): + system_backups = client.list_system_backup() + if len(system_backups) == 0: + ok = True + break + time.sleep(RETRY_INTERVAL) + assert ok \ No newline at end of file diff --git a/e2e/libs/keywords/common_keywords.py b/e2e/libs/keywords/common_keywords.py index 3c9d55fa71..abf33ad861 100644 --- a/e2e/libs/keywords/common_keywords.py +++ b/e2e/libs/keywords/common_keywords.py @@ -1,4 +1,6 @@ from utility.utility import init_k8s_api_client +from utility.utility import get_longhorn_client +from backupstore import set_backupstore, backupstore_cleanup from node_exec import NodeExec class common_keywords: @@ -15,3 +17,9 @@ def init_node_exec(self, test_name): def cleanup_node_exec(self): NodeExec.get_instance().cleanup() + + def set_backupstore(self): + set_backupstore(get_longhorn_client()) + + def cleanup_backupstore(self): + backupstore_cleanup(get_longhorn_client()) diff --git a/e2e/libs/longhorn.py b/e2e/libs/longhorn.py index 916a6ec252..d2bea600ec 100644 --- a/e2e/libs/longhorn.py +++ b/e2e/libs/longhorn.py @@ -268,6 +268,8 @@ def cb(_link_name=link_name, return result + if type(obj) == str and '/v1/' in obj: + obj = self._url.replace("/v1/schemas", "") + obj[obj.find("/v1/"):] return obj def object_pairs_hook(self, pairs): diff --git a/e2e/libs/recurring_job/rest.py b/e2e/libs/recurring_job/rest.py index 05d257cda6..277226a123 100644 --- a/e2e/libs/recurring_job/rest.py +++ b/e2e/libs/recurring_job/rest.py @@ -120,24 +120,23 @@ def _check_snapshot_created_in_time(self, volume_name, job_name, period_in_sec): snapshot_timestamp = 0 for _ in range(period_in_sec * 2): snapshot_list = filter_cr("longhorn.io", "v1beta2", "longhorn-system", "snapshots", label_selector=label_selector) - try: - if len(snapshot_list['items']) > 0: - for item in snapshot_list['items']: - # this snapshot can be created by snapshot or backup recurring job - # but job_name is in spec.labels.RecurringJob - # and crd doesn't support field selector - # so need to filter by ourselves - if item['spec']['labels']['RecurringJob'] == job_name: - logging(f"Got snapshot {item}") - snapshot_time = snapshot_list['items'][0]['metadata']['creationTimestamp'] - snapshot_time = datetime.strptime(snapshot_time, '%Y-%m-%dT%H:%M:%SZ') - snapshot_timestamp = snapshot_time.timestamp() - logging(f"Got snapshot time = {snapshot_time}, timestamp = {snapshot_timestamp}") - break - if snapshot_timestamp > current_timestamp: - return - except Exception as e: - logging(f"Iterating snapshot list error: {e}") + if len(snapshot_list['items']) > 0: + for item in snapshot_list['items']: + # this snapshot can be created by snapshot or backup recurring job + # but job_name is in spec.labels.RecurringJob + # and crd doesn't support field selector + # so need to filter by ourselves + if 'RecurringJob' in item['status']['labels'] and \ + item['status']['labels']['RecurringJob'] == job_name and \ + item['status']['readyToUse'] == True: + logging(f"Got snapshot {item}") + snapshot_time = item['metadata']['creationTimestamp'] + snapshot_time = datetime.strptime(snapshot_time, '%Y-%m-%dT%H:%M:%SZ') + snapshot_timestamp = snapshot_time.timestamp() + logging(f"Got snapshot time = {snapshot_time}, timestamp = {snapshot_timestamp}") + break + if snapshot_timestamp > current_timestamp: + return time.sleep(1) assert False, f"since {current_time},\ there's no new snapshot created by recurring job \ @@ -152,16 +151,16 @@ def _check_backup_created_in_time(self, volume_name, period_in_sec): backup_timestamp = 0 for _ in range(period_in_sec * 2): backup_list = filter_cr("longhorn.io", "v1beta2", "longhorn-system", "backups", label_selector=label_selector) - try: - if len(backup_list['items']) > 0: - backup_time = backup_list['items'][0]['metadata']['creationTimestamp'] - backup_time = datetime.strptime(backup_time, '%Y-%m-%dT%H:%M:%SZ') - backup_timestamp = backup_time.timestamp() - logging(f"Got backup time = {backup_time}, timestamp = {backup_timestamp}") - if backup_timestamp > current_timestamp: - return - except Exception as e: - logging(f"Iterating backup list error: {e}") + if len(backup_list['items']) > 0: + state = backup_list['items'][0]['status']['state'] + if state != "InProgress" and state != "Completed": + continue + backup_time = backup_list['items'][0]['metadata']['creationTimestamp'] + backup_time = datetime.strptime(backup_time, '%Y-%m-%dT%H:%M:%SZ') + backup_timestamp = backup_time.timestamp() + logging(f"Got backup time = {backup_time}, timestamp = {backup_timestamp}") + if backup_timestamp > current_timestamp: + return time.sleep(1) assert False, f"since {current_time},\ there's no new backup created by recurring job \ diff --git a/e2e/requirements.txt b/e2e/requirements.txt index 81eac4a39a..ff0ea2559a 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -6,3 +6,4 @@ kubernetes==27.2.0 requests==2.31.0 boto3==1.26.86 pyyaml==6.0.1 +minio==5.0.10 \ No newline at end of file diff --git a/pipelines/utilities/run_longhorn_e2e_test.sh b/pipelines/utilities/run_longhorn_e2e_test.sh index e797438e63..589bdc4d52 100755 --- a/pipelines/utilities/run_longhorn_e2e_test.sh +++ b/pipelines/utilities/run_longhorn_e2e_test.sh @@ -65,6 +65,14 @@ run_longhorn_e2e_test(){ } run_longhorn_e2e_test_out_of_cluster(){ + + if [[ ${BACKUP_STORE_TYPE} == "s3" ]]; then + export LONGHORN_BACKUPSTORES='s3://backupbucket@us-east-1/backupstore$minio-secret' + elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then + export LONGHORN_BACKUPSTORES='nfs://longhorn-test-nfs-svc.default:/opt/backupstore' + fi + export LONGHORN_BACKUPSTORE_POLL_INTERVAL="30" + cd e2e pip install -r requirements.txt