From 4ab685034543cf012004102d5cb53f5fd0c9c628 Mon Sep 17 00:00:00 2001 From: Chin-Ya Huang Date: Wed, 4 Dec 2024 15:58:02 +0800 Subject: [PATCH] test(robot): pvc expand more then storage maximum size should fail longhorn/longhorn-6633 Signed-off-by: Chin-Ya Huang --- e2e/keywords/persistentvolumeclaim.resource | 9 ++++ e2e/keywords/workload.resource | 19 ++++++++ e2e/libs/keywords/common_keywords.py | 6 +++ .../persistentvolumeclaim_keywords.py | 4 ++ e2e/libs/keywords/volume_keywords.py | 20 +++++++++ e2e/libs/node/node.py | 6 ++- .../test_persistentvolumeclaim.robot | 44 +++++++++++++++++++ 7 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 e2e/tests/regression/test_persistentvolumeclaim.robot diff --git a/e2e/keywords/persistentvolumeclaim.resource b/e2e/keywords/persistentvolumeclaim.resource index 73d5b25e6..100740210 100644 --- a/e2e/keywords/persistentvolumeclaim.resource +++ b/e2e/keywords/persistentvolumeclaim.resource @@ -23,3 +23,12 @@ Delete persistentvolumeclaim ${claim_id} Delete persistentvolumeclaim for volume ${volume_id} ${claim_name} = generate_name_with_suffix volume ${volume_id} delete_persistentvolumeclaim ${claim_name} + +Assert persistentvolumeclaim ${claim_id} requested size remains ${size} for at least ${period} seconds + ${claim_name} = generate_name_with_suffix claim ${claim_id} + FOR ${i} IN RANGE ${period} + ${expected_size_byte} = convert_size_to_bytes ${size} to_str=True + ${current_size_byte} = get_claim_requested_size ${claim_name} + Should Be Equal ${current_size_byte} ${expected_size_byte} + Sleep 1 + END diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index 13e0926df..1c4eef61b 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -238,3 +238,22 @@ Expand ${workload_kind} ${workload_id} volume by ${size} ${new_size} = convert_size_to_bytes ${size} expand_workload_claim_size ${workload_name} ${new_size} + +Expand ${workload_kind} ${workload_id} volume more than storage maximum size should fail + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${node_name} = get_volume_node ${volume_name} + ${max_size} = get_volume_node_disk_storage_maximum ${volume_name} ${node_name} + ${new_size} = evaluate ${max_size} + 1 + + Run Keyword And Expect Error Failed to expand* expand_workload_claim_size ${workload_name} ${new_size} skip_retry=True + +Assert volume size of ${workload_kind} ${workload_id} remains ${size} for at least ${period} seconds + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + FOR ${i} IN RANGE ${period} + ${expected_size_byte} = convert_size_to_bytes ${size} to_str=True + ${current_size_byte} = get_volume_size ${volume_name} + Should Be Equal ${current_size_byte} ${expected_size_byte} + Sleep 1 + END diff --git a/e2e/libs/keywords/common_keywords.py b/e2e/libs/keywords/common_keywords.py index 141487559..35d550d32 100644 --- a/e2e/libs/keywords/common_keywords.py +++ b/e2e/libs/keywords/common_keywords.py @@ -1,6 +1,7 @@ from node import Node from node_exec import NodeExec +from utility.utility import convert_size_to_bytes from utility.utility import init_k8s_api_client from utility.utility import generate_name_with_suffix @@ -25,3 +26,8 @@ def get_node_by_index(self, node_id): def cleanup_node_exec(self): for node_name in Node().list_node_names_by_role("all"): NodeExec(node_name).cleanup() + + def convert_size_to_bytes(self, size, to_str=False): + if to_str: + return str(convert_size_to_bytes(size)) + return convert_size_to_bytes(size) diff --git a/e2e/libs/keywords/persistentvolumeclaim_keywords.py b/e2e/libs/keywords/persistentvolumeclaim_keywords.py index 912950251..9c28836d7 100644 --- a/e2e/libs/keywords/persistentvolumeclaim_keywords.py +++ b/e2e/libs/keywords/persistentvolumeclaim_keywords.py @@ -34,3 +34,7 @@ def expand_persistentvolumeclaim_size_by_mib(self, claim_name, size_in_mib): logging(f'Expanding persistentvolumeclaim {claim_name} by {size_in_mib} MiB') self.claim.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size)) + + def get_claim_requested_size(self, claim_name): + claim = self.claim.get(claim_name) + return claim.spec.resources.requests['storage'] diff --git a/e2e/libs/keywords/volume_keywords.py b/e2e/libs/keywords/volume_keywords.py index 369be7266..0b7ec9353 100644 --- a/e2e/libs/keywords/volume_keywords.py +++ b/e2e/libs/keywords/volume_keywords.py @@ -9,6 +9,7 @@ from utility.constant import ANNOT_REPLICA_NAMES from utility.constant import LABEL_TEST from utility.constant import LABEL_TEST_VALUE +from utility.constant import LONGHORN_NAMESPACE from utility.utility import logging from utility.utility import get_retry_count_and_interval @@ -344,3 +345,22 @@ def get_volume_checksum(self, volume_name): def validate_volume_setting(self, volume_name, setting_name, value): return self.volume.validate_volume_setting(volume_name, setting_name, value) + + def get_volume_size(self, volume_name): + volume = self.volume.get(volume_name) + return volume['spec']['size'] + + def get_volume_node_disk_storage_maximum(self, volume_name, node_name): + replica_list = self.replica.get(volume_name, node_name) + replica = replica_list[0] + replica_name = replica['metadata']['name'] + node = self.node.get_node_by_name(node_name, namespace=LONGHORN_NAMESPACE) + for diskName in node.disks: + disk = node.disks[diskName] + + for scheduledReplica in disk['scheduledReplica']: + if scheduledReplica == replica_name: + logging(f"Found replica {scheduledReplica} on node {node_name} scheduled to disk {diskName}") + return disk['storageMaximum'] + + raise Exception(f"Failed to find storageMaximum for volume {volume_name} replica {replica_name} on node {node_name}") diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index d7ed52342..7e48fe5d2 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -6,6 +6,7 @@ from robot.libraries.BuiltIn import BuiltIn from utility.constant import DISK_BEING_SYNCING +from utility.constant import LONGHORN_NAMESPACE from utility.constant import NODE_UPDATE_RETRY_INTERVAL from utility.utility import get_longhorn_client from utility.utility import get_retry_count_and_interval @@ -99,7 +100,10 @@ def get_node_by_index(self, index, role="worker"): nodes = self.list_node_names_by_role(role) return nodes[int(index)] - def get_node_by_name(self, node_name): + def get_node_by_name(self, node_name, namespace="kube-system"): + if namespace == LONGHORN_NAMESPACE: + return get_longhorn_client().by_id_node(node_name) + core_api = client.CoreV1Api() return core_api.read_node(node_name) diff --git a/e2e/tests/regression/test_persistentvolumeclaim.robot b/e2e/tests/regression/test_persistentvolumeclaim.robot new file mode 100644 index 000000000..a79ee437a --- /dev/null +++ b/e2e/tests/regression/test_persistentvolumeclaim.robot @@ -0,0 +1,44 @@ +*** Settings *** +Documentation PersistentVolumeClaim Test Cases + +Test Tags regression + +Resource ../keywords/common.resource +Resource ../keywords/deployment.resource +Resource ../keywords/persistentvolumeclaim.resource +Resource ../keywords/setting.resource +Resource ../keywords/workload.resource + +Test Setup Set test environment +Test Teardown Cleanup test resources + +*** Variables *** +${LOOP_COUNT} 1 +${RETRY_COUNT} 300 +${RETRY_INTERVAL} 1 + +*** Test Cases *** + +Test persistentvolumeclaim expand more than storage maximum size should fail + [Tags] volume expansion + [Documentation] Verify that a PersistentVolumeClaim cannot be expanded beyond + ... the storage maximum size. + ... + ... Issue: https://github.com/longhorn/longhorn/issues/6633 + + Given Set setting storage-over-provisioning-percentage to 100 + And Create persistentvolumeclaim 0 using RWX volume storage_size=2GiB + And Create deployment 0 with persistentvolumeclaim 0 + And Write 10 MB data to file data.txt in deployment 0 + + FOR ${i} IN RANGE ${LOOP_COUNT} + When Expand deployment 0 volume more than storage maximum size should fail + Then Assert volume size of deployment 0 remains 2GiB for at least 5 seconds + And Assert persistentvolumeclaim 0 requested size remains 2GiB for at least 5 seconds + And Check deployment 0 data in file data.txt is intact + + When Expand deployment 0 volume by 1 GiB + Then Assert persistentvolumeclaim 0 requested size remains 3GiB for at least 5 seconds + And Assert volume size of deployment 0 remains 3GiB for at least 5 seconds + And Check deployment 0 data in file data.txt is intact + END