From 55de2e6db829b6856481cccc93f0f3080e1aed69 Mon Sep 17 00:00:00 2001 From: Derek Su Date: Wed, 5 Jul 2023 12:42:16 +0800 Subject: [PATCH 1/4] test(volume-encryption): add test_csi_encrypted_block_volume Longhorn 4883 Signed-off-by: Derek Su --- manager/integration/tests/common.py | 67 +++++++++++++++++++++++++++ manager/integration/tests/test_csi.py | 43 ++++++++++++++++- 2 files changed, 108 insertions(+), 2 deletions(-) diff --git a/manager/integration/tests/common.py b/manager/integration/tests/common.py index fb62df1f39..518844d8ed 100644 --- a/manager/integration/tests/common.py +++ b/manager/integration/tests/common.py @@ -1453,6 +1453,35 @@ def finalizer(): return sc_manifest +@pytest.fixture +def crypto_secret(request): + manifest = { + 'apiVersion': 'v1', + 'kind': 'Secret', + 'metadata': { + 'name': 'longhorn-crypto', + 'namespace': 'longhorn-system', + }, + 'stringData': { + 'CRYPTO_KEY_VALUE': 'simple', + 'CRYPTO_KEY_PROVIDER': 'secret' + } + } + + def finalizer(): + api = get_core_api_client() + try: + api.delete_namespaced_secret( + name=manifest['metadata']['name'], + namespace=manifest['metadata']['namespace']) + except ApiException as e: + assert e.status == 404 + + request.addfinalizer(finalizer) + + return manifest + + @pytest.fixture def priority_class(request): priority_class = { @@ -1608,6 +1637,7 @@ def cleanup_client(): if backing_image_feature_supported(client): cleanup_all_backing_images(client) + cleanup_crypto_secret() cleanup_storage_class() if system_backup_feature_supported(client): system_restores_cleanup(client) @@ -3715,6 +3745,43 @@ def wait_statefulset(statefulset_manifest): assert s_set.status.ready_replicas == replicas +def create_crypto_secret(secret_manifest): + api = get_core_api_client() + api.create_namespaced_secret(namespace=LONGHORN_NAMESPACE, + body=secret_manifest) + + +def delete_crypto_secret(secret_manifest): + api = get_core_api_client() + try: + api.delete_namespaced_secret(secret_manifest, + body=k8sclient.V1DeleteOptions()) + except ApiException as e: + assert e.status == 404 + + +def cleanup_crypto_secret(): + secret_deletes = ["longhorn-crypto"] + api = get_core_api_client() + ret = api.list_namespaced_secret(namespace=LONGHORN_NAMESPACE) + for sc in ret.items: + if sc.metadata.name in secret_deletes: + delete_crypto_secret(sc.metadata.name) + + ok = False + for _ in range(RETRY_COUNTS): + ok = True + ret = api.list_namespaced_secret(namespace=LONGHORN_NAMESPACE) + for s in ret.items: + if s.metadata.name in secret_deletes: + ok = False + break + if ok: + break + time.sleep(RETRY_INTERVAL) + assert ok + + def create_storage_class(sc_manifest): api = get_storage_api_client() api.create_storage_class( diff --git a/manager/integration/tests/test_csi.py b/manager/integration/tests/test_csi.py index cddda43ea1..d9e9e16c00 100644 --- a/manager/integration/tests/test_csi.py +++ b/manager/integration/tests/test_csi.py @@ -7,6 +7,7 @@ import time from common import client, core_api, apps_api # NOQA from common import csi_pv, pod_make, pvc, storage_class # NOQA +from common import crypto_secret # NOQA from common import make_deployment_with_pvc # NOQA from common import pod as pod_manifest # NOQA from common import Mi, Gi, DEFAULT_VOLUME_SIZE, EXPANDED_VOLUME_SIZE @@ -14,8 +15,10 @@ from common import VOLUME_CONDITION_SCHEDULED from common import SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY from common import SETTING_REPLICA_REPLENISHMENT_WAIT_INTERVAL +from common import LONGHORN_NAMESPACE from common import create_and_wait_pod, create_pvc_spec, delete_and_wait_pod from common import size_to_string, create_storage_class, create_pvc +from common import create_crypto_secret from common import delete_and_wait_pvc, delete_and_wait_pv from common import wait_and_get_pv_for_pvc from common import generate_random_data, read_volume_data @@ -255,11 +258,48 @@ def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest): 6. Delete the pod and create `pod2` to use the same volume 7. Validate the data in `pod2` is consistent with `test_data` """ + + storage_class['reclaimPolicy'] = 'Retain' + create_storage_class(storage_class) + + create_and_verify_block_volume(client, core_api, storage_class, pvc, + pod_manifest) + + +@pytest.mark.csi # NOQA +def test_csi_encrypted_block_volume(client, core_api, storage_class, crypto_secret, pvc, pod_manifest): # NOQA + """ + Test CSI feature: encrypted block volume + + 1. Create a PVC with encrypted `volumeMode = Block` + 2. Create a pod using the PVC to dynamic provision a volume + 3. Verify the pod creation + 4. Generate `test_data` and write to the block volume directly in the pod + 5. Read the data back for validation + 6. Delete the pod and create `pod2` to use the same volume + 7. Validate the data in `pod2` is consistent with `test_data` + """ + + create_crypto_secret(crypto_secret) + + storage_class['reclaimPolicy'] = 'Retain' + storage_class['parameters']['csi.storage.k8s.io/provisioner-secret-name'] = 'longhorn-crypto' # NOQA + storage_class['parameters']['csi.storage.k8s.io/provisioner-secret-namespace'] = LONGHORN_NAMESPACE # NOQA + storage_class['parameters']['csi.storage.k8s.io/node-publish-secret-name'] = 'longhorn-crypto' # NOQA + storage_class['parameters']['csi.storage.k8s.io/node-publish-secret-namespace'] = LONGHORN_NAMESPACE # NOQA + storage_class['parameters']['csi.storage.k8s.io/node-stage-secret-name'] = 'longhorn-crypto' # NOQA + storage_class['parameters']['csi.storage.k8s.io/node-stage-secret-namespace'] = LONGHORN_NAMESPACE # NOQA + create_storage_class(storage_class) + + create_and_verify_block_volume(client, core_api, storage_class, pvc, + pod_manifest) + + +def create_and_verify_block_volume(client, core_api, storage_class, pvc, pod_manifest): # NOQA pod_name = 'csi-block-volume-test' pvc_name = pod_name + "-pvc" device_path = "/dev/longhorn/longhorn-test-blk" - storage_class['reclaimPolicy'] = 'Retain' pvc['metadata']['name'] = pvc_name pvc['spec']['volumeMode'] = 'Block' pvc['spec']['storageClassName'] = storage_class['metadata']['name'] @@ -280,7 +320,6 @@ def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest): {'name': 'longhorn-blk', 'devicePath': device_path} ] - create_storage_class(storage_class) create_pvc(pvc) pv_name = wait_and_get_pv_for_pvc(core_api, pvc_name).metadata.name create_and_wait_pod(core_api, pod_manifest) From 2f02f1d05360045dee78c272175f039180bb0a87 Mon Sep 17 00:00:00 2001 From: Roger Yao Date: Tue, 26 Sep 2023 09:17:03 +0800 Subject: [PATCH 2/4] Update upgrade test case to support the validation of both 'spec.engineImage' and 'spec.image' fields. Ref: https://github.com/longhorn/longhorn/issues/6777 Signed-off-by: Roger Yao --- manager/integration/tests/test_upgrade.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/manager/integration/tests/test_upgrade.py b/manager/integration/tests/test_upgrade.py index 4c1e7d6d16..428d41c7b2 100644 --- a/manager/integration/tests/test_upgrade.py +++ b/manager/integration/tests/test_upgrade.py @@ -567,7 +567,12 @@ def test_upgrade(longhorn_upgrade_type, if v.name != restore_vol_name: volume = client.by_id_volume(v.name) engine = get_volume_engine(volume) - assert engine.image == new_ei.image + if hasattr(engine, 'engineImage'): + print("Checking engineImage...") + assert engine.engineImage == new_ei.image + else: + print("Checking image...") + assert engine.image == new_ei.image assert engine.currentImage == new_ei.image # Check All volumes data From f8f70eec52b94e5eaaa2fd0130ab5d6fc714884d Mon Sep 17 00:00:00 2001 From: Yang Chiu Date: Wed, 27 Sep 2023 17:40:51 +0800 Subject: [PATCH 3/4] test: remove unused code Signed-off-by: Yang Chiu --- e2e/doc/condition_table.md | 17 - e2e/keywords/replica.resource | 53 -- e2e/keywords/volume.resource | 4 - e2e/libs/engine/engine.py | 19 + e2e/libs/keywords/engine_keywords.py | 26 - e2e/libs/keywords/pod_keywords.py | 34 -- e2e/libs/keywords/replica_keywords.py | 57 -- e2e/libs/node/node.py | 26 + e2e/libs/utils/__init__.py | 2 - e2e/libs/utils/common_utils.py | 138 ----- e2e/libs/utils/config_utils.py | 37 -- e2e/settings.ini | 4 - e2e/tests/node_not_ready/node_power_off.robot | 564 ------------------ 13 files changed, 45 insertions(+), 936 deletions(-) delete mode 100644 e2e/doc/condition_table.md delete mode 100644 e2e/keywords/replica.resource delete mode 100644 e2e/libs/keywords/engine_keywords.py delete mode 100644 e2e/libs/keywords/pod_keywords.py delete mode 100644 e2e/libs/keywords/replica_keywords.py delete mode 100644 e2e/libs/utils/__init__.py delete mode 100644 e2e/libs/utils/common_utils.py delete mode 100644 e2e/libs/utils/config_utils.py delete mode 100644 e2e/settings.ini delete mode 100644 e2e/tests/node_not_ready/node_power_off.robot diff --git a/e2e/doc/condition_table.md b/e2e/doc/condition_table.md deleted file mode 100644 index 132ac56e12..0000000000 --- a/e2e/doc/condition_table.md +++ /dev/null @@ -1,17 +0,0 @@ - ## node_power_off.robot - test case | node 1 | node 2 | node 3 - --|--|--|-- - 1 |
  • replica
  • attached
  • power off

  • | replica | replica - 2 |
  • replica
  • attached

  • |
  • replica
  • power off

  • | replica - 3 |
  • replica

  • |
  • replica

  • |
  • attached
  • power off

  • - 4 |
  • replica
  • power off

  • |
  • replica

  • |
  • attached

  • - 5 |
  • replica
  • attached
  • power off

  • |
  • replica

  • | - 6 |
  • replica
  • attached

  • |
  • replica

  • |
  • power off

  • - 7 |
  • replica
  • attached

  • |
  • replica
  • power off

  • | - 8 |
  • replica
  • attached
  • power off time out

  • | replica | replica - 9 |
  • replica
  • attached

  • |
  • replica
  • power off time out

  • | replica - 10 |
  • replica

  • |
  • replica

  • |
  • attached
  • power off time out

  • - 11 |
  • replica
  • power off time out

  • |
  • replica

  • |
  • attached

  • - 12 |
  • replica
  • attached
  • power off time out

  • |
  • replica

  • | - 13 |
  • replica
  • attached

  • |
  • replica

  • |
  • power off time out

  • - 14 |
  • replica
  • attached

  • |
  • replica
  • power off time out

  • | diff --git a/e2e/keywords/replica.resource b/e2e/keywords/replica.resource deleted file mode 100644 index 94218a9eb9..0000000000 --- a/e2e/keywords/replica.resource +++ /dev/null @@ -1,53 +0,0 @@ -*** Settings *** -Documentation Longhorn replica related keywords - -Library ../libs/keywords/common_keywords.py -Library ../libs/keywords/replica_keywords.py -Resource node.resource - - -*** Keywords *** -All replicas state should eventually be ${expected_replica_state} - FOR ${node_index} IN @{cluster_node_index} - ${target_node_index} = Evaluate ${node_index}+1 - Run keyword And Continue On Failure - ... Wait Until Keyword Succeeds - ... ${retry_timeout_second} seconds - ... ${retry_interval} seconds - ... Replica on node ${target_node_index} state should be ${expected_replica_state} - END - -Replica state on node ${node_index} should eventually be ${expected_replica_state} - Run keyword And Continue On Failure - ... Wait Until Keyword Succeeds - ... ${retry_timeout_second} seconds - ... ${retry_interval} seconds - ... Replica on node ${node_index} state should be ${expected_replica_state} - -Replica on node ${node_index} state should be ${expected_replica_state} - ${target_node_index} = Evaluate ${node_index}-1 - ${replica_current_state} = get_replica_state - ... ${volume_name} - ... ${cluster_node_index}[${target_node_index}] - check_workload_state ${replica_current_state} ${expected_replica_state} - -Wait for replica on node ${node_index} start rebuilding - ${target_node_index} = Evaluate ${node_index}-1 - wait_for_replica_rebuilding_start ${volume_name} ${target_node_index} - -Wait for replica on node ${node_index} complete rebuilding - ${target_node_index} = Evaluate ${node_index}-1 - wait_for_replica_rebuilding_complete ${volume_name} ${target_node_index} - -Delete the replica on node ${node_index} - ${target_node_index} = Evaluate ${node_index}-1 - delete_replica ${volume_name} ${target_node_index} - -Wait until all replicas rebuilt - FOR ${node_index} IN @{cluster_node_index} - Run keyword And Continue On Failure - ... wait_for_replica_rebuilding_complete ${volume_name} ${node_index} - END - -Wait for all replicas to be created - wait_for_replica_created ${volume_name} ${number_of_replicas} diff --git a/e2e/keywords/volume.resource b/e2e/keywords/volume.resource index e5d5eb1053..47815ced3e 100644 --- a/e2e/keywords/volume.resource +++ b/e2e/keywords/volume.resource @@ -47,10 +47,6 @@ Wait until replica ${replica_0} rebuilt, delete replica ${replica_2} wait_for_replica_rebuilding_complete ${volume_name} ${replica_0} delete_replica ${volume_name} ${replica_2} -Wait until replica ${replica_0} rebuilt, delete replica ${replica_2} - wait_for_replica_rebuilding_complete ${volume_name} ${replica_0} - delete_replica ${volume_name} ${replica_2} - Check data is intact check_data ${volume_name} ${volume_data_checksum} diff --git a/e2e/libs/engine/engine.py b/e2e/libs/engine/engine.py index 876ea9212e..9908a07b5a 100644 --- a/e2e/libs/engine/engine.py +++ b/e2e/libs/engine/engine.py @@ -17,3 +17,22 @@ def get_engine(self, volume_name, node_name): # delete engines, if input parameters are empty then will delete all def delete_engine(self, volume_name="", node_name=""): return self.engine.delete_engine(volume_name, node_name) + + def get_engine_state(self, volume_name, node_name): + logging(f"Getting the volume {volume_name} engine on the node {node_name} state") + + resp = self.get_engine(volume_name, node_name) + if resp == "" or resp is None: + raise Exception(f"failed to get the volume {volume_name} engine") + + engines = resp["items"] + if len(engines) == 0: + logging.warning(f"cannot get the volume {volume_name} engines") + return + + engines_states = {} + for engine in engines: + engine_name = engine["metadata"]["name"] + engine_state = engine['status']['currentState'] + engines_states[engine_name] = engine_state + return engines_states diff --git a/e2e/libs/keywords/engine_keywords.py b/e2e/libs/keywords/engine_keywords.py deleted file mode 100644 index f704dc4ffa..0000000000 --- a/e2e/libs/keywords/engine_keywords.py +++ /dev/null @@ -1,26 +0,0 @@ -from utility.utility import logging -from common_keywords import common_keywords - -class engine_keywords: - - def __init__(self): - self.engine = common_keywords.engine_instance - - def get_engine_state(self, volume_name, node_name): - logging(f"Getting the volume {volume_name} engine on the node {node_name} state") - - resp = self.engine.get_engine(volume_name, node_name) - if resp == "" or resp is None: - raise Exception(f"failed to get the volume {volume_name} engine") - - engines = resp["items"] - if len(engines) == 0: - logging.warning(f"cannot get the volume {volume_name} engines") - return - - engines_states = {} - for engine in engines: - engine_name = engine["metadata"]["name"] - engine_state = engine['status']['currentState'] - engines_states[engine_name] = engine_state - return engines_states \ No newline at end of file diff --git a/e2e/libs/keywords/pod_keywords.py b/e2e/libs/keywords/pod_keywords.py deleted file mode 100644 index e9b14efba9..0000000000 --- a/e2e/libs/keywords/pod_keywords.py +++ /dev/null @@ -1,34 +0,0 @@ -import time -from node import Nodes - -retry_count = 200 -retry_interval = 5 - -class pod_keywords: - - #TODO - # keywords layer can only call lower implementation layer to complete its work - # and should not have any business logic here - - def wait_all_pods_evicted(self, node_index): - node_name = Nodes.get_name_by_index(int(node_index)) - - for i in range(retry_count): - pods = [] - pods = Nodes.get_pods_with_node_name(node_name) - evict_done = True - for pod in pods: - # check non DaemonSet Pods are evicted or terminating (deletionTimestamp != None) - pod_type = pod.metadata.owner_references[0].kind - pod_delete_timestamp = pod.metadata.deletion_timestamp - - if pod_type != 'DaemonSet' and pod_delete_timestamp == None: - evict_done = False - break - - if evict_done: - break - - time.sleep(retry_interval) - - assert evict_done, 'failed to evicted Pods' diff --git a/e2e/libs/keywords/replica_keywords.py b/e2e/libs/keywords/replica_keywords.py deleted file mode 100644 index be115b56a1..0000000000 --- a/e2e/libs/keywords/replica_keywords.py +++ /dev/null @@ -1,57 +0,0 @@ -from utility.utility import logging -from node import Nodes -from common_keywords import common_keywords - -class replica_keywords: - - def __init__(self): - self.replica = common_keywords.replica_instance - self.volume = common_keywords.volume_instance - - def delete_replica(self, volume_name, node_index): - node_name = Nodes.get_name_by_index(int(node_index)) - logging(f"Deleting volume {volume_name}'s replica on the node {node_name}") - self.replica.delete_replica(volume_name, node_name) - - def wait_for_replica_rebuilding_start(self, volume_name, node_index): - node_name = Nodes.get_name_by_index(int(node_index)) - logging(f"Waiting volume {volume_name}'s replica on node {node_name} rebuilding start") - self.replica.wait_for_replica_rebuilding_start(volume_name, node_name) - - def wait_for_replica_rebuilding_complete(self, volume_name, node_index): - node_name = Nodes.get_name_by_index(int(node_index)) - logging(f"Waiting volume {volume_name}'s replica on node {node_name} rebuilding complete") - self.replica.wait_for_replica_rebuilding_complete( - volume_name, node_name) - - #TODO - # keywords layer can only call lower implementation layer to complete its work - # and should not have any business logic here - - def get_replica_state(self, volume_name, node_index): - node_name = Nodes.get_name_by_index(int(node_index)) - logging(f"Getting volume {volume_name}'s replica on the node {node_name} state") - - resp = self.replica.get_replica(volume_name, node_name) - assert resp != "", f"failed to get the volume {volume_name} replicas" - - replicas = resp["items"] - if len(replicas) == 0: - return - - replicas_states = {} - for replica in replicas: - replica_name = replica["metadata"]["name"] - replica_state = replica['status']['currentState'] - replicas_states[replica_name] = replica_state - return replicas_states - - def wait_for_replica_created(self, volume_name, expected_replica_count): - # wait for a period of time for the replica to be created - current_replica_count = 0 - count = 1 - while expected_replica_count != current_replica_count and count <= 180: - replicas = self.replica.get_replica(volume_name, "") - current_replica_count = len(replicas) - count += 1 - assert expected_replica_count != current_replica_count, f'replica creation is not ready: {current_replica_count}' diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index 84ddc7fa70..e94ef69b9c 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -68,3 +68,29 @@ def reboot_all_worker_nodes(self, shut_down_time_in_sec=60): waiter = self.aws_client.get_waiter('instance_running') waiter.wait(InstanceIds=instance_ids) logging(f"Started instances") + + def get_all_pods_on_node(self, node_name): + api = client.CoreV1Api() + all_pods = api.list_namespaced_pod(namespace='longhorn-system', field_selector='spec.nodeName=' + node_name) + user_pods = [p for p in all_pods.items if (p.metadata.namespace != 'kube-system')] + return user_pods + + def wait_all_pods_evicted(self, node_name): + for i in range(RETRY_COUNT): + pods = self.get_all_pods_on_node(node_name) + evicted = True + for pod in pods: + # check non DaemonSet Pods are evicted or terminating (deletionTimestamp != None) + pod_type = pod.metadata.owner_references[0].kind + pod_delete_timestamp = pod.metadata.deletion_timestamp + + if pod_type != 'DaemonSet' and pod_delete_timestamp == None: + evicted = False + break + + if evicted: + break + + time.sleep(RETRY_INTERVAL) + + assert evicted, 'failed to evict pods' \ No newline at end of file diff --git a/e2e/libs/utils/__init__.py b/e2e/libs/utils/__init__.py deleted file mode 100644 index 2a3bdca301..0000000000 --- a/e2e/libs/utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from utils import config_utils -from utils import common_utils \ No newline at end of file diff --git a/e2e/libs/utils/common_utils.py b/e2e/libs/utils/common_utils.py deleted file mode 100644 index 2dd048068d..0000000000 --- a/e2e/libs/utils/common_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import requests -import warnings -import string -import random -import time -import socket -import longhorn - -from utils import config_utils -from kubernetes import config, client -from kubernetes.client import Configuration -from longhorn import from_env - -PORT = ":9500" -MAX_SUPPORT_BUNDLE_NUMBER = 20 -RETRY_EXEC_COUNTS = 150 -RETRY_INTERVAL = 1 -RETRY_INTERVAL_LONG = 2 - -def k8s_core_api(): - c = Configuration() - c.assert_hostname = False - Configuration.set_default(c) - config.load_incluster_config() - core_api = client.CoreV1Api() - return core_api - -def k8s_cr_api(): - c = Configuration() - c.assert_hostname = False - Configuration.set_default(c) - config.load_incluster_config() - cr_api = client.CustomObjectsApi() - return cr_api - -def get_longhorn_api_client(): - for i in range(RETRY_EXEC_COUNTS): - try: - config.load_incluster_config() - ips = get_mgr_ips() - - # check if longhorn manager port is open before calling get_client - for ip in ips: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - mgr_port_open = sock.connect_ex((ip, 9500)) - - if mgr_port_open == 0: - client = 'http://' + ip + PORT + '/' #get_client(ip + PORT) - break - return client - except Exception: - time.sleep(RETRY_INTERVAL) - -def get_mgr_ips(): - ret = k8s_core_api().list_pod_for_all_namespaces( - label_selector="app=longhorn-manager", - watch=False) - mgr_ips = [] - for i in ret.items: - mgr_ips.append(i.status.pod_ip) - return mgr_ips - -def get_client(address): - url = 'http://' + address + '/v1/schemas' - c = longhorn.from_env(url=url) - return c - -def generate_volume_name(): - return "vol-" + \ - ''.join(random.choice(string.ascii_lowercase + string.digits) - for _ in range(6)) - -def get_longhorn_client(): - # manually expose longhorn client node port - # otherwise the test is needed to be run in in-cluster environment - # to access longhorn manager cluster ip - longhorn_client_url = get_longhorn_api_client() - longhorn_client = from_env(url=f"{longhorn_client_url}/v1/schemas") - return longhorn_client - -def get_support_bundle_url(): - client = get_longhorn_client() - return client._url.replace('schemas', 'supportbundles') - -def generate_support_bundle(case_name): # NOQA - """ - Generate support bundle into folder ./support_bundle/case_name.zip - - Won't generate support bundle if current support bundle count - greate than MAX_SUPPORT_BUNDLE_NUMBER. - Args: - case_name: support bundle will named case_name.zip - """ - os.makedirs("support_bundle", exist_ok=True) - file_cnt = len(os.listdir("support_bundle")) - - if file_cnt >= MAX_SUPPORT_BUNDLE_NUMBER: - warnings.warn("Ignoring the bundle download because of \ - avoiding overwhelming the disk usage.") - return - - url = get_support_bundle_url() - data = {'description': case_name, 'issueURL': case_name} - try: - res_raw = requests.post(url, json=data) - res_raw.raise_for_status() - res = res_raw.json() - except Exception as e: - warnings.warn(f"Error while generating support bundle: {e}") - return - id = res['data'][0]['id'] - name = res['data'][0]['name'] - - support_bundle_url = '{}/{}/{}'.format(url, id, name) - for i in range(RETRY_EXEC_COUNTS): - res = requests.get(support_bundle_url).json() - - if res['progressPercentage'] == 100: - break - else: - time.sleep(RETRY_INTERVAL_LONG) - - if res['progressPercentage'] != 100: - warnings.warn( - "Timeout to wait support bundle ready, skip download") - return - - # Download support bundle - download_url = '{}/download'.format(support_bundle_url) - try: - r = requests.get(download_url, allow_redirects=True, timeout=300) - r.raise_for_status() - with open('./support_bundle/{0}.zip'.format(case_name), 'wb') as f: - f.write(r.content) - except Exception as e: - warnings.warn("Error occured while downloading support bundle {}.zip\n\ - The error was {}".format(case_name, e)) diff --git a/e2e/libs/utils/config_utils.py b/e2e/libs/utils/config_utils.py deleted file mode 100644 index f2c4f3c453..0000000000 --- a/e2e/libs/utils/config_utils.py +++ /dev/null @@ -1,37 +0,0 @@ -import configparser -import logging - -CONFIG_FILE_PATH = 'settings.ini' - -class Config(object): - - _config = {} - - def __init__(self) -> None: - self.initialize_variables() - - @classmethod - def get(cls, config_name): - if config_name not in cls._config.keys(): - return None - return cls._config[config_name] - - @classmethod - def initialize_variables(cls): - logging.info("initiate environment variables") - - config = configparser.ConfigParser() - config.read(CONFIG_FILE_PATH, encoding='utf-8') - section_name = 'DEFAULT' - if 'CUSTOM' in config.sections(): - section_name = 'CUSTOM' - - cls._config = { - "CLOUD_PROVIDER": config[section_name]["CLOUD_PROVIDER"], - "K8S_DISTRO": config[section_name]["K8S_DISTRO"], - "LONGHORN_CLIENT_URL": config[section_name]["LONGHORN_CLIENT_URL"], - } - - logging.info("initiated variables:") - for var in cls._config: - logging.info(f"{var}={cls._config[var]}") diff --git a/e2e/settings.ini b/e2e/settings.ini deleted file mode 100644 index 8350ceabec..0000000000 --- a/e2e/settings.ini +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -CLOUD_PROVIDER = aws -K8S_DISTRO = rke2 -LONGHORN_CLIENT_URL = http://127.0.0.1:8080/ diff --git a/e2e/tests/node_not_ready/node_power_off.robot b/e2e/tests/node_not_ready/node_power_off.robot deleted file mode 100644 index e9b3836af0..0000000000 --- a/e2e/tests/node_not_ready/node_power_off.robot +++ /dev/null @@ -1,564 +0,0 @@ -*** Settings *** -Documentation Test the Longhorn resillence if cluster node powering off - -Resource ../../keywords/common.resource -Resource ../../keywords/engine.resource -Resource ../../keywords/replica.resource -Resource ../../keywords/node.resource -Resource ../../keywords/volume.resource - -Suite Setup set_test_suite_environment -Test Setup set_test_environment ${TEST NAME} -Test Teardown Cleanup resource and resume state - - -*** Variables *** -${Gi}= 2**30 -${volume_size_gb}= 1 -${sleep_interval}= 300 -${volume_type}= RWO - - -*** Test Cases *** -Node power off with replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | replica | - ... | attached | | | - ... | *power off* | | | - ${number_of_replicas}= Convert To Integer 3 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 1 - - Then Node 1 should have 1 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And Replica state on node 1 should eventually be unknown - And Replica on node 2 state should be running - And Replica on node 3 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with replica - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | replica | - ... | attached | *power off* | | - ${number_of_replicas}= Convert To Integer 3 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 2 - - Then Node 1 should have 1 volume replica - And Node 2 should have 1 volume replica - And Node 2 state should eventually be NotReady - And Volume state should eventually be degraded - And Engine state should eventually be running - And Replica state on node 2 should eventually be stopped - And Replica on node 1 state should be running - And Replica on node 3 state should be running - And Data should be intact - - When Power on node 2 - - Then Node 2 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | | | attached | - ... | | | *power off* | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 3 - And Write data into mount point - - When Power off node 3 - - Then Node 3 should have 0 volume replica - And Node 3 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And All replicas state should eventually be running - - When Power on node 3 - - Then Node 3 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with replica and 1 node with VA no replica - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | | | attached | - ... | *power off* | | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 3 - And Write data into mount point - - When Power off node 1 - - Then Node 1 should have 1 volume replica - And Node 3 should have 0 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be degraded - And Engine state should eventually be running - And Replica state on node 1 should eventually be stopped - And Replica on node 2 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with replica-VA and 1 node with no replica - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | | | - ... | *power off* | | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 1 - - Then Node 1 should have 1 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And Replica state on node 1 should eventually be unknown - And Replica on node 2 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with no replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | | | - ... | | | *power off* | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 3 - - Then Node 1 should have 1 volume replica - And Node 3 should have 0 volume replica - And Node 3 state should eventually be NotReady - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - - When Power on node 3 - - Then Node 3 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off with replica and 1 node with no replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | *power off* | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 2 - - Then Node 1 should have 1 volume replica - And Node 2 should have 1 volume replica - And Node 2 state should eventually be NotReady - And Volume state should eventually be healthy - And Engine state should be running - And Replica on node 1 state should be running - And Replica state on node 2 should eventually be stopped - And Data should be intact - - When Power on node 2 - - Then Node 2 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And Replica on node 1 state should be running - And Replica state on node 2 should eventually be stopped - And Data should be intact - [Teardown] Teardown - -Node power off timeout with replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | replica | - ... | attached | | | - ... | *power off & all pods evicted* | | | - ${number_of_replicas}= Convert To Integer 3 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 1 - And Waiting for pods on node 1 to be evicted - - Then Node 1 should have 1 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And Replica state on node 1 should eventually be unknown - And Replica on node 2 state should be running - And Replica on node 3 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - [Teardown] Teardown - -Node power off timeout with replica - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | replica | - ... | attached | *power off & all pods evicted* | | - ${number_of_replicas}= Convert To Integer 3 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 2 - And Waiting for pods on node 2 to be evicted - - Then Node 1 should have 1 volume replica - And Node 2 should have 1 volume replica - And Node 2 state should eventually be NotReady - And Volume state should eventually be degraded - And Engine state should be running - And Replica on node 1 state should be running - And Replica state on node 2 should eventually be stopped - And Replica on node 3 state should be running - - When Power on node 2 - - Then Node 2 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off timeout with VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | | | attached | - ... | | | *power off & all pods evicted* | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 3 - And Write data into mount point - - When Power off node 3 - And Waiting for pods on node 3 to be evicted - - Then Node 3 should have 0 volume replica - And Node 3 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And All replicas state should eventually be running - - When Power on node 3 - - Then Node 3 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - [Teardown] Teardown - -Node power off timeout with replica and 1 node with VA no replica - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | | | attached | - ... | *power off & all pods evicted* | | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 3 - And Write data into mount point - - When Power off node 1 - And Waiting for pods on node 1 to be evicted - - Then Node 1 should have 1 volume replica - And Node 3 should have 0 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be degraded - And Engine state should be running - And Replica state on node 1 should eventually be stopped - And Replica on node 2 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off timeout with replica-VA and 1 node no replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | | | - ... | *power off & all pods evicted* | | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 1 - And Waiting for pods on node 1 to be evicted - - Then Node 1 should have 1 volume replica - And Node 1 state should eventually be NotReady - And Volume state should eventually be unknown - And Engine state should eventually be unknown - And Replica state on node 1 should eventually be unknown - And Replica on node 2 state should be running - - When Power on node 1 - - Then Node 1 state should eventually be Ready - And Volume state should eventually be healthy - And Engine state should eventually be running - And All replicas state should eventually be running - [Teardown] Teardown - -Node power off timeout with no replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | | *power off & all pods evicted* | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 3 - And Waiting for pods on node 3 to be evicted - - Then Node 1 should have 1 volume replica - And Node 3 should have 0 volume replica - And Node 3 state should eventually be NotReady - And Volume state should be healthy - And Engine state should be running - And All replicas state should eventually be running - - When Power on node 3 - - Then Node 3 state should eventually be Ready - And Volume state should be healthy - And Engine state should be running - And All replicas state should eventually be running - And Data should be intact - [Teardown] Teardown - -Node power off timeout with replica and 1 node with no replica-VA - [Documentation] | =node 1= | =node 2= | =node 3= | - ... | replica | replica | | - ... | attached | *power off & all pods evicted* | | - ${number_of_replicas}= Convert To Integer 2 - Set Test Variable ${number_of_replicas} - - ${volume_type}= Evaluate "${volume_type}".lower() - - ${field1}= Convert To String {"spec": {"size": "${${volume_size_gb} * ${Gi}}"}} - ${field2}= Convert To String {"spec": {"numberOfReplicas": ${number_of_replicas}}} - ${field3}= Convert To String {"spec": {"accessMode": "${volume_type}"}} - @{list_of_fields}= Create List ${field1} ${field2} ${field3} - - Given Create Volume With Fields ${list_of_fields} - And Wait for all replicas to be created - And Attach volume to node 1 - And Write data into mount point - - When Power off node 2 - And Waiting for pods on node 2 to be evicted - - Then Node 1 should have 1 volume replica - And Node 1 should have 1 volume replica - And Node 2 state should eventually be NotReady - And Volume state should be healthy - And Engine state should be running - And Replica on node 1 state should be running - And Replica state on node 2 should eventually be stopped - - When Power on node 2 - - Then Node 2 state should eventually be Ready - And Volume state should be healthy - And Engine state should be running - And Replica on node 1 state should be running - And Replica state on node 2 should eventually be stopped - And Data should be intact - [Teardown] Teardown From d16c87d6fcd855893df581445d8eb302d097d907 Mon Sep 17 00:00:00 2001 From: yangchiu Date: Tue, 3 Oct 2023 00:15:21 +0800 Subject: [PATCH 4/4] test: add README.md (#1541) Co-authored-by: David Ko --- e2e/README.md | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 e2e/README.md diff --git a/e2e/README.md b/e2e/README.md new file mode 100644 index 0000000000..d6a2dafeb2 --- /dev/null +++ b/e2e/README.md @@ -0,0 +1,94 @@ +# Longhorn e2e tests + +### Requirement + +1. A Kubernetes cluster with 3 worker nodes. + - And control node(s) with following taints: + - `node-role.kubernetes.io/master=true:NoExecute` + - `node-role.kubernetes.io/master=true:NoSchedule` +2. Longhorn system has already been successfully deployed in the cluster. +3. Run the environment check script to check if each node in the cluster fulfills the requirements: +``` +curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/master/scripts/environment_check.sh | bash +``` + +### Run the test + +1. Deploy all backupstore servers (including `NFS` server and `Minio` as s3 server) for test purposes. +``` +kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml +``` + +2. Expose Longhorn API: +``` +# for example, using nodeport: +kubectl expose --type=NodePort deployment longhorn-ui -n longhorn-system --port 8000 --name longhorn-ui-nodeport --overrides '{ "apiVersion": "v1","spec":{"ports": [{"port":8000,"protocol":"TCP","targetPort":8000,"nodePort":30000}]}}' +# or using port-forward: +kubectl port-forward services/longhorn-frontend 8080:http -n longhorn-system +``` + +3. Export environment variable `KUBECONFIG`: +``` +export KUBECONFIG=/path/to/your/kubeconfig.yaml +``` + +4. Export environment variable `LONGHORN_CLIENT_URL`: +``` +# for example, if it's exposed by nodeport: +export LONGHORN_CLIENT_URL=http://node-public-ip:30000 +# or exposed by port-foraword: +export LONGHORN_CLIENT_URL=http://localhost:8080 +``` + +5. Prepare test environment and run the test +``` +cd e2e +python -m venv . +source bin/activate +pip install -r requirements.txt + +# to run all the test cases, simply execute: +./run.sh + +# to specify the test case you'd like to run, use "-t" option: +./run.sh -t "Reboot Volume Node While Workload Heavy Writing" + +# to specify the LOOP_COUNT or any other test variables, use "-v" option: +./run.sh -t "Reboot Volume Node While Workload Heavy Writing" -v LOOP_COUNT:100 -v RETRY_COUNT:259200 + +# to specify which test suite you'd like to run, use "-s" option: +./run.sh -s "replica_rebuilding" + +# to modify debug level, use "-L" option: +./run.sh -L DEBUG +``` + +Once the test completed, the test result can be found at /tmp/test-report folder. + +### Architecture + +The e2e robot test framework includes 4 layers: + +``` + --------------------------------------------------------------------- +| | +| tests/*.robot: Test Case Definition | +| | + --------------------------------------------------------------------- +| | +| keywords/*.resource: Keyword Definition | +| | + --------------------------------------------------------------------- +| | +| libs/keywords: Keyword Implementation | +| | + --------------------------------------------------------------------- +| | +| libs/COMPONENT_NAME: Basic operations to manipulate each component | +| (volume, replica, workload, node, etc.) | +| | + --------------------------------------------------------------------- +``` + + __* Each layer can only call functions from the next layer or the same layer. Skip-layer is strictly forbidden. For example, Keyword Definition layer can only call functions in Keyword Implementation layer or Keyword Definition layer, directly call functions in Basic operations layer is strictly forbidden.__ \ No newline at end of file