From e3816d12ea82ecc19c006ae7858c1f9d84705f10 Mon Sep 17 00:00:00 2001 From: Yang Chiu Date: Wed, 13 Nov 2024 16:19:27 +0800 Subject: [PATCH] test(robot): migrate test_replica_auto_balance_disk_in_pressure Signed-off-by: Yang Chiu --- e2e/keywords/common.resource | 2 +- e2e/keywords/node.resource | 32 ++++++++++- e2e/keywords/replica.resource | 8 +++ e2e/keywords/statefulset.resource | 8 +++ e2e/keywords/workload.resource | 10 ++++ e2e/libs/keywords/node_keywords.py | 29 ++++++++-- e2e/libs/keywords/replica_keywords.py | 3 ++ e2e/libs/keywords/statefulset_keywords.py | 6 +-- e2e/libs/node/node.py | 62 ++++++++++++++++++++-- e2e/libs/replica/base.py | 2 +- e2e/libs/replica/crd.py | 6 ++- e2e/libs/replica/replica.py | 4 +- e2e/libs/replica/rest.py | 2 +- e2e/libs/workload/statefulset.py | 6 ++- e2e/tests/regression/test_scheduling.robot | 53 +++++++++++++++++- e2e/tests/regression/test_v2.robot | 1 + e2e/tests/regression/test_volume.robot | 1 + 17 files changed, 213 insertions(+), 22 deletions(-) diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index ee0c548159..05f3bd0186 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -34,7 +34,7 @@ Set test environment ${host_provider}= Get Environment Variable HOST_PROVIDER ${disk_path}= Set Variable If "${host_provider}" == "harvester" /dev/vdc /dev/xvdh FOR ${worker_node} IN @{worker_nodes} - add_disk ${worker_node} block ${disk_path} + add_disk block-disk ${worker_node} block ${disk_path} END Cleanup test resources diff --git a/e2e/keywords/node.resource b/e2e/keywords/node.resource index 60cdcde8ae..16f68be374 100644 --- a/e2e/keywords/node.resource +++ b/e2e/keywords/node.resource @@ -3,12 +3,13 @@ Documentation Node Keywords Library ../libs/keywords/common_keywords.py Library ../libs/keywords/node_keywords.py +Library ../libs/keywords/volume_keywords.py *** Keywords *** Add ${disk_type} type disk ${disk_path} for all worker nodes ${worker_nodes}= get_worker_nodes FOR ${worker_node} IN @{worker_nodes} - add_disk ${worker_node} ${disk_type} ${disk_path} + add_disk ${disk_type}-disk ${worker_node} ${disk_type} ${disk_path} END Set node ${node_id} with @@ -31,3 +32,32 @@ Disable node ${node_id} default disk Enable node ${node_id} default disk ${node_name} = get_node_by_index ${node_id} enable_default_disk ${node_name} + +Disable disk ${disk_id} scheduling on node ${node_id} + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + disable_disk ${node_name} ${disk_name} + +Enable disk ${disk_id} scheduling on node ${node_id} + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + enable_disk ${node_name} ${disk_name} + +Check node ${node_id} disk ${disk_id} is in pressure + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + wait_for_disk_in_pressure ${node_name} ${disk_name} + +Check node ${node_id} disk ${disk_id} is not in pressure + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + wait_for_disk_not_in_pressure ${node_name} ${disk_name} + +Create ${disk_size} Gi disk ${disk_id} on node ${node_id} + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + create_volume ${disk_name} size=${disk_size}Gi numberOfReplicas=1 + attach_volume ${disk_name} ${node_name} + wait_for_volume_healthy ${disk_name} + ${mount_path} = mount_disk ${disk_name} ${node_name} + add_disk ${disk_name} ${node_name} filesystem ${mount_path} diff --git a/e2e/keywords/replica.resource b/e2e/keywords/replica.resource index a1df20eec3..7e1b531e7b 100644 --- a/e2e/keywords/replica.resource +++ b/e2e/keywords/replica.resource @@ -3,8 +3,16 @@ Documentation Longhorn replica related keywords Library ../libs/keywords/common_keywords.py Library ../libs/keywords/replica_keywords.py +Library ../libs/keywords/node_keywords.py *** Keywords *** Volume ${volume_id} replica ${setting_name} should be ${setting_value} ${volume_name} = generate_name_with_suffix volume ${volume_id} validate_replica_setting ${volume_name} ${setting_name} ${setting_value} + +There should be replicas running on node ${node_id} disk ${disk_id} + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + ${disk_uuid} = get_disk_uuid ${node_name} ${disk_name} + ${replicas} = get_replicas volume_name= node_name=${node_name} disk_uuid=${disk_uuid} + Should Be True len(${replicas}) > 0 diff --git a/e2e/keywords/statefulset.resource b/e2e/keywords/statefulset.resource index 6ebf77bda8..5b679c230b 100644 --- a/e2e/keywords/statefulset.resource +++ b/e2e/keywords/statefulset.resource @@ -15,6 +15,14 @@ Create statefulset ${statefulset_id} using ${volume_type} volume with ${sc_name} ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} create_statefulset ${statefulset_name} ${volume_type} ${sc_name} +Create statefulset ${statefulset_id} using ${volume_type} volume with ${sc_name} storageclass and size ${size} Mi + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + create_statefulset ${statefulset_name} ${volume_type} ${sc_name} ${size}Mi + +Create statefulset ${statefulset_id} using ${volume_type} volume with ${sc_name} storageclass and size ${size} Gi + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + create_statefulset ${statefulset_name} ${volume_type} ${sc_name} ${size}Gi + Scale statefulset ${statefulset_id} to ${replicaset_size} ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} scale_statefulset ${statefulset_name} ${replicaset_size} diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index c3eb835b6c..a81ed33b2f 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -8,6 +8,7 @@ Library ../libs/keywords/volume_keywords.py Library ../libs/keywords/workload_keywords.py Library ../libs/keywords/host_keywords.py Library ../libs/keywords/k8s_keywords.py +Library ../libs/keywords/replica_keywords.py *** Keywords *** Create pod ${pod_id} using volume ${volume_id} @@ -213,3 +214,12 @@ Delete Longhorn ${workload_kind} ${workload_name} pod ${pod_name} = get_workload_pod_name ${workload_name} longhorn-system Log ${pod_name} delete_pod ${pod_name} longhorn-system + +Check volume of ${workload_kind} ${workload_id} replica on node ${node_id} disk ${disk_id} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${node_name} = get_node_by_index ${node_id} + ${disk_name} = generate_name_with_suffix disk ${disk_id} + ${disk_uuid} = get_disk_uuid ${node_name} ${disk_name} + ${replicas} = get_replicas volume_name=${volume_name} node_name=${node_name} disk_uuid=${disk_uuid} + Should Be True len(${replicas}) > 0 diff --git a/e2e/libs/keywords/node_keywords.py b/e2e/libs/keywords/node_keywords.py index 832834f6c6..9e120de788 100644 --- a/e2e/libs/keywords/node_keywords.py +++ b/e2e/libs/keywords/node_keywords.py @@ -11,10 +11,14 @@ def __init__(self): def list_node_names_by_role(self, role): return self.node.list_node_names_by_role(role) - def add_disk(self, node_name, type, path): - logging(f"Adding {type} type disk {path} to node {node_name}") + def mount_disk(self, disk_name, node_name): + logging(f"Mount device /dev/longhorn/{disk_name} on node {node_name}") + return self.node.mount_disk(disk_name, node_name) + + def add_disk(self, disk_name, node_name, type, path): + logging(f"Adding {type} type disk {disk_name} {path} to node {node_name}") disk = { - f"{type}-disk": { + f"{disk_name}": { "diskType": type, "path": path, "allowScheduling": True @@ -38,6 +42,13 @@ def set_node(self, node_name, allowScheduling=True, evictionRequested=False): logging(f"Setting node {node_name}; scheduling={allowScheduling}; evictionRequested={evictionRequested}") self.node.set_node(node_name, allowScheduling, evictionRequested) + def disable_disk(self, node_name, disk_name): + self.node.set_disk_scheduling(node_name, disk_name, allowScheduling=False) + + def enable_disk(self, node_name, disk_name): + self.node.set_disk_scheduling(node_name, disk_name, allowScheduling=True) + + def disable_node_scheduling(self, node_name): self.node.set_node_scheduling(node_name, allowScheduling=False) @@ -52,3 +63,15 @@ def reset_node_schedule(self): def check_node_is_not_schedulable(self, node_name): self.node.check_node_schedulable(node_name, schedulable="False") + + def is_disk_in_pressure(self, node_name, disk_name): + return self.node.is_disk_in_pressure(node_name, disk_name) + + def wait_for_disk_in_pressure(self, node_name, disk_name): + self.node.wait_for_disk_in_pressure(node_name, disk_name) + + def wait_for_disk_not_in_pressure(self, node_name, disk_name): + self.node.wait_for_disk_not_in_pressure(node_name, disk_name) + + def get_disk_uuid(self, node_name, disk_name): + return self.node.get_disk_uuid(node_name, disk_name) diff --git a/e2e/libs/keywords/replica_keywords.py b/e2e/libs/keywords/replica_keywords.py index a9f0966c2f..f02b757c6b 100644 --- a/e2e/libs/keywords/replica_keywords.py +++ b/e2e/libs/keywords/replica_keywords.py @@ -8,3 +8,6 @@ def __init__(self): def validate_replica_setting(self, volume_name, setting_name, value): return self.replica.validate_replica_setting(volume_name, setting_name, value) + + def get_replicas(self, volume_name=None, node_name=None, disk_uuid=None): + return self.replica.get(volume_name, node_name, disk_uuid) diff --git a/e2e/libs/keywords/statefulset_keywords.py b/e2e/libs/keywords/statefulset_keywords.py index 4baa61ab61..8b3256c7eb 100644 --- a/e2e/libs/keywords/statefulset_keywords.py +++ b/e2e/libs/keywords/statefulset_keywords.py @@ -28,9 +28,9 @@ def cleanup_statefulsets(self): for statefulset in statefulsets.items: self.delete_statefulset(statefulset.metadata.name) - def create_statefulset(self, name, volume_type="RWO", sc_name="longhorn"): - logging(f'Creating {volume_type} statefulset {name} with {sc_name} storageclass') - create_statefulset(name, volume_type, sc_name) + def create_statefulset(self, name, volume_type="RWO", sc_name="longhorn", size=None): + logging(f'Creating {volume_type} statefulset {name} with {sc_name} storageclass and size = {size}') + create_statefulset(name, volume_type, sc_name, size) def delete_statefulset(self, name): logging(f'Deleting statefulset {name}') diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index 636706d492..97792ba465 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -1,5 +1,6 @@ import time import re +import os from kubernetes import client from robot.libraries.BuiltIn import BuiltIn @@ -9,15 +10,27 @@ from utility.utility import get_longhorn_client from utility.utility import get_retry_count_and_interval from utility.utility import logging - +from node_exec import NodeExec class Node: DEFAULT_DISK_PATH = "/var/lib/longhorn/" + DEFAULT_VOLUME_PATH = "/dev/longhorn/" def __init__(self): self.retry_count, self.retry_interval = get_retry_count_and_interval() + def mount_disk(self, disk_name, node_name): + mount_path = os.path.join(self.DEFAULT_DISK_PATH, disk_name) + device_path = os.path.join(self.DEFAULT_VOLUME_PATH, disk_name) + cmd = f"mkdir -p {mount_path}" + res = NodeExec(node_name).issue_cmd(cmd) + cmd = f"mkfs.ext4 {device_path}" + res = NodeExec(node_name).issue_cmd(cmd) + cmd = f"mount {device_path} {mount_path}" + res = NodeExec(node_name).issue_cmd(cmd) + return mount_path + def update_disks(self, node_name, disks): node = get_longhorn_client().by_id_node(node_name) for _ in range(self.retry_count): @@ -37,9 +50,9 @@ def wait_for_disk_update(self, node_name, disk_num): disks = node.disks for d in disks: if disks[d]["diskUUID"] == "" or \ - not disks[d]["conditions"] or \ - disks[d]["conditions"]["Ready"]["status"] != "True" or \ - disks[d]["conditions"]["Schedulable"]["status"] != "True": + (disks[d]["allowScheduling"] and \ + (not disks[d]["conditions"] or \ + disks[d]["conditions"]["Ready"]["status"] != "True")): all_updated = False break if all_updated: @@ -59,6 +72,10 @@ def reset_disks(self, node_name): for disk_name, disk in iter(node.disks.items()): if disk.path != self.DEFAULT_DISK_PATH: disk.allowScheduling = False + logging(f"Disabling scheduling disk {disk_name} on node {node_name}") + else: + disk.allowScheduling = True + logging(f"Enabling scheduling disk {disk_name} on node {node_name}") self.update_disks(node_name, node.disks) disks = {} @@ -66,8 +83,9 @@ def reset_disks(self, node_name): if disk.path == self.DEFAULT_DISK_PATH: disks[disk_name] = disk disk.allowScheduling = True + logging(f"Keeping disk {disk_name} on node {node_name}") else: - logging(f"Try to remove disk {disk_name} from node {node_name}") + logging(f"Removing disk {disk_name} from node {node_name}") self.update_disks(node_name, disks) def is_accessing_node_by_index(self, node): @@ -183,6 +201,14 @@ def set_default_disk_scheduling(self, node_name, allowScheduling): disk.allowScheduling = allowScheduling self.update_disks(node_name, node.disks) + def set_disk_scheduling(self, node_name, disk_name, allowScheduling): + node = get_longhorn_client().by_id_node(node_name) + + for name, disk in iter(node.disks.items()): + if name == disk_name: + disk.allowScheduling = allowScheduling + self.update_disks(node_name, node.disks) + def check_node_schedulable(self, node_name, schedulable): node = get_longhorn_client().by_id_node(node_name) for _ in range(self.retry_count): @@ -194,3 +220,29 @@ def check_node_schedulable(self, node_name, schedulable): def is_node_schedulable(self, node_name): node = get_longhorn_client().by_id_node(node_name) return node["conditions"]["Schedulable"]["status"] + + def is_disk_in_pressure(self, node_name, disk_name): + node = get_longhorn_client().by_id_node(node_name) + return node["disks"][disk_name]["conditions"]["Schedulable"]["reason"] == "DiskPressure" + + def wait_for_disk_in_pressure(self, node_name, disk_name): + for i in range(self.retry_count): + is_in_pressure = self.is_disk_in_pressure(node_name, disk_name) + logging(f"Waiting for disk {disk_name} on node {node_name} in pressure ... ({i})") + if is_in_pressure: + break + time.sleep(self.retry_interval) + assert self.is_disk_in_pressure(node_name, disk_name), f"Waiting for node {node_name} disk {disk_name} in pressure failed: {get_longhorn_client().by_id_node(node_name)}" + + def wait_for_disk_not_in_pressure(self, node_name, disk_name): + for i in range(self.retry_count): + is_in_pressure = self.is_disk_in_pressure(node_name, disk_name) + logging(f"Waiting for disk {disk_name} on node {node_name} not in pressure ... ({i})") + if not is_in_pressure: + break + time.sleep(self.retry_interval) + assert not self.is_disk_in_pressure(node_name, disk_name), f"Waiting for node {node_name} disk {disk_name} not in pressure failed: {get_longhorn_client().by_id_node(node_name)}" + + def get_disk_uuid(self, node_name, disk_name): + node = get_longhorn_client().by_id_node(node_name) + return node["disks"][disk_name]["diskUUID"] diff --git a/e2e/libs/replica/base.py b/e2e/libs/replica/base.py index a3b6681af6..c9ce12c01c 100644 --- a/e2e/libs/replica/base.py +++ b/e2e/libs/replica/base.py @@ -4,7 +4,7 @@ class Base(ABC): @abstractmethod - def get(self, volume_name, node_name): + def get(self, volume_name, node_name, disk_uuid): return NotImplemented @abstractmethod diff --git a/e2e/libs/replica/crd.py b/e2e/libs/replica/crd.py index c238bfa5e9..f2e4a07124 100644 --- a/e2e/libs/replica/crd.py +++ b/e2e/libs/replica/crd.py @@ -10,12 +10,14 @@ class CRD(Base): def __init__(self): self.obj_api = client.CustomObjectsApi() - def get(self, volume_name, node_name=None): + def get(self, volume_name=None, node_name=None, disk_uuid=None): label_selector = [] - if volume_name != "": + if volume_name: label_selector.append(f"longhornvolume={volume_name}") if node_name: label_selector.append(f"longhornnode={node_name}") + if disk_uuid: + label_selector.append(f"longhorndiskuuid={disk_uuid}") replicas = self.obj_api.list_namespaced_custom_object( group="longhorn.io", diff --git a/e2e/libs/replica/replica.py b/e2e/libs/replica/replica.py index 55893c0cbe..31312a8e82 100644 --- a/e2e/libs/replica/replica.py +++ b/e2e/libs/replica/replica.py @@ -16,8 +16,8 @@ def __init__(self): def delete(self, volume_name="", node_name=""): return self.replica.delete(volume_name, node_name) - def get(self, volume_name, node_name): - return self.replica.get(volume_name, node_name) + def get(self, volume_name, node_name, disk_uuid=None): + return self.replica.get(volume_name, node_name, disk_uuid) def wait_for_rebuilding_start(self, volume_name, node_name): return self.replica.wait_for_rebuilding_start(volume_name,node_name) diff --git a/e2e/libs/replica/rest.py b/e2e/libs/replica/rest.py index f3347157dd..f01bb9ce34 100644 --- a/e2e/libs/replica/rest.py +++ b/e2e/libs/replica/rest.py @@ -12,7 +12,7 @@ class Rest(Base): def __init__(self): pass - def get(self, volume_name, node_name): + def get(self, volume_name, node_name, disk_uuid): return NotImplemented def delete(self, volume_name, node_name): diff --git a/e2e/libs/workload/statefulset.py b/e2e/libs/workload/statefulset.py index cc2571a39e..eebe8b5014 100644 --- a/e2e/libs/workload/statefulset.py +++ b/e2e/libs/workload/statefulset.py @@ -10,7 +10,7 @@ from utility.utility import logging -def create_statefulset(statefulset_name, volume_type, sc_name): +def create_statefulset(statefulset_name, volume_type, sc_name, size): filepath = "./templates/workload/statefulset.yaml" with open(filepath, 'r') as f: namespace = 'default' @@ -30,6 +30,10 @@ def create_statefulset(statefulset_name, volume_type, sc_name): if volume_type == 'RWX': manifest_dict['spec']['volumeClaimTemplates'][0]['spec']['accessModes'][0] = 'ReadWriteMany' + # correct request storage size + if size: + manifest_dict['spec']['volumeClaimTemplates'][0]['spec']['resources']['requests']['storage'] = size + api = client.AppsV1Api() statefulset = api.create_namespaced_stateful_set( body=manifest_dict, diff --git a/e2e/tests/regression/test_scheduling.robot b/e2e/tests/regression/test_scheduling.robot index d0f1fdeb4a..d5375b5290 100644 --- a/e2e/tests/regression/test_scheduling.robot +++ b/e2e/tests/regression/test_scheduling.robot @@ -5,11 +5,13 @@ Test Tags regression Resource ../keywords/common.resource Resource ../keywords/volume.resource +Resource ../keywords/replica.resource Resource ../keywords/setting.resource -Resource ../keywords/deployment.resource -Resource ../keywords/persistentvolumeclaim.resource +Resource ../keywords/storageclass.resource +Resource ../keywords/statefulset.resource Resource ../keywords/workload.resource Resource ../keywords/k8s.resource +Resource ../keywords/node.resource Test Setup Set test environment Test Teardown Cleanup test resources @@ -51,3 +53,50 @@ Test Soft Anti Affinity Scheduling Then Wait until volume 0 replicas rebuilding completed And Wait for volume 0 healthy And Check volume 0 data is intact + +Test Replica Auto Balance Disk In Pressure + [Tags] coretest + [Documentation] Test replica auto balance disk in pressure + ... This test simulates a scenario where a disk reaches a certain + ... pressure threshold (80%), triggering the replica auto balance + ... to rebuild the replicas to another disk with enough available + ... space. Replicas should not be rebuilt at the same time. + ... + ... Issue: https://github.com/longhorn/longhorn/issues/4105 + Given Set setting replica-soft-anti-affinity to false + And Set setting replica-auto-balance-disk-pressure-percentage to 80 + + And Create 1 Gi disk 0 on node 0 + And Create 1 Gi disk 1 on node 0 + And Disable disk 1 scheduling on node 0 + And Disable node 0 default disk + And Disable node 1 scheduling + And Disable node 2 scheduling + + And Create storageclass one-replica with numberOfReplicas=1 dataEngine=${DATA_ENGINE} + # 1 Gi disk, but only 950 Mi available, 950 Mi / 3 = 316 Mi + And Create statefulset 0 using RWO volume with one-replica storageclass and size 316 Mi + And Create statefulset 1 using RWO volume with one-replica storageclass and size 316 Mi + And Create statefulset 2 using RWO volume with one-replica storageclass and size 316 Mi + And Check volume of statefulset 0 replica on node 0 disk 0 + And Check volume of statefulset 1 replica on node 0 disk 0 + And Check volume of statefulset 2 replica on node 0 disk 0 + + # Write 950 Mi * 80% / 3 = 254 Mi data to disk 0 to make it in pressure + And Write 254 MB data to file data.bin in statefulset 0 + And Write 254 MB data to file data.bin in statefulset 1 + And Write 254 MB data to file data.bin in statefulset 2 + And Check node 0 disk 0 is in pressure + + When Enable disk 1 scheduling on node 0 + And set setting replica-auto-balance to best-effort + + # auto balance should happen + Then There should be replicas running on node 0 disk 0 + And There should be replicas running on node 0 disk 1 + And Check node 0 disk 0 is not in pressure + And Check node 0 disk 1 is not in pressure + + And Check statefulset 0 data in file data.bin is intact + And Check statefulset 1 data in file data.bin is intact + And Check statefulset 2 data in file data.bin is intact diff --git a/e2e/tests/regression/test_v2.robot b/e2e/tests/regression/test_v2.robot index fddba633a7..6554876994 100644 --- a/e2e/tests/regression/test_v2.robot +++ b/e2e/tests/regression/test_v2.robot @@ -55,6 +55,7 @@ Degraded Volume Replica Rebuilding END V2 Volume Should Block Trim When Volume Is Degraded + [Tags] cluster Given Set setting auto-salvage to true And Create storageclass longhorn-test with dataEngine=v2 And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass diff --git a/e2e/tests/regression/test_volume.robot b/e2e/tests/regression/test_volume.robot index f6954594d9..c55aa49586 100644 --- a/e2e/tests/regression/test_volume.robot +++ b/e2e/tests/regression/test_volume.robot @@ -19,6 +19,7 @@ Test Teardown Cleanup test resources ${LOOP_COUNT} 1 ${RETRY_COUNT} 300 ${RETRY_INTERVAL} 1 +${DATA_ENGINE} v1 *** Keywords *** Create volume with invalid name should fail