diff --git a/e2e/keywords/persistentvolumeclaim.resource b/e2e/keywords/persistentvolumeclaim.resource index 0ded2f02ca..1007402107 100644 --- a/e2e/keywords/persistentvolumeclaim.resource +++ b/e2e/keywords/persistentvolumeclaim.resource @@ -8,8 +8,9 @@ Library ../libs/keywords/volume_keywords.py *** Keywords *** Create persistentvolumeclaim ${claim_id} using ${volume_type} volume + [Arguments] &{config} ${claim_name} = generate_name_with_suffix claim ${claim_id} - create_persistentvolumeclaim ${claim_name} ${volume_type} + create_persistentvolumeclaim ${claim_name} ${volume_type} &{config} Create persistentvolumeclaim ${claim_id} using ${volume_type} volume with ${sc_name} storageclass ${claim_name} = generate_name_with_suffix claim ${claim_id} @@ -20,5 +21,14 @@ Delete persistentvolumeclaim ${claim_id} delete_persistentvolumeclaim ${claim_name} Delete persistentvolumeclaim for volume ${volume_id} - ${pvc_name} = generate_name_with_suffix volume ${volume_id} - delete_persistentvolumeclaim ${pvc_name} + ${claim_name} = generate_name_with_suffix volume ${volume_id} + delete_persistentvolumeclaim ${claim_name} + +Assert persistentvolumeclaim ${claim_id} requested size remains ${size} for at least ${period} seconds + ${claim_name} = generate_name_with_suffix claim ${claim_id} + FOR ${i} IN RANGE ${period} + ${expected_size_byte} = convert_size_to_bytes ${size} to_str=True + ${current_size_byte} = get_claim_requested_size ${claim_name} + Should Be Equal ${current_size_byte} ${expected_size_byte} + Sleep 1 + END diff --git a/e2e/keywords/statefulset.resource b/e2e/keywords/statefulset.resource index 5b679c230b..bba98e6439 100644 --- a/e2e/keywords/statefulset.resource +++ b/e2e/keywords/statefulset.resource @@ -35,10 +35,6 @@ Scale up statefulset ${statefulset_id} to attach volume ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} scale_statefulset_up ${statefulset_name} -Expand statefulset ${statefulset_id} volume by ${size} MiB - ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} - expand_workload_claim_size_by_mib ${statefulset_name} ${size} - Write ${size} MB data to file ${file_name} in statefulset ${statefulset_id} ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} write_workload_pod_random_data ${statefulset_name} ${size} ${file_name} diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index 18bae1c0e1..44fc7be0f5 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -208,10 +208,10 @@ Trim ${workload_kind} ${workload_id} volume should ${condition} END Delete Longhorn ${workload_kind} ${workload_name} pod on node ${node_id} - ${node_name} = get_node_by_index ${node_id} + ${node_name} = get_node_by_index ${node_id} IF '${workload_name}' == 'engine-image' - ${label_selector} = Set Variable longhorn.io/component=engine-image + ${label_selector} = Set Variable longhorn.io/component=engine-image ELSE IF '${workload_name}' == 'instance-manager' ${label_selector} = Set Variable longhorn.io/component=instance-manager ELSE @@ -232,3 +232,34 @@ Check volume of ${workload_kind} ${workload_id} replica on node ${node_id} disk ${disk_uuid} = get_disk_uuid ${node_name} ${disk_name} ${replicas} = get_replicas volume_name=${volume_name} node_name=${node_name} disk_uuid=${disk_uuid} Should Be True len(${replicas}) > 0 + +Expand ${workload_kind} ${workload_id} volume to ${size} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${new_size} = convert_size_to_bytes ${size} + + expand_workload_claim_size ${workload_name} ${new_size} + +Expand ${workload_kind} ${workload_id} volume with additional ${size} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${new_size} = convert_size_to_bytes ${size} + + expand_workload_claim_size_with_additional_bytes ${workload_name} ${new_size} + +Expand ${workload_kind} ${workload_id} volume more than storage maximum size should fail + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${node_name} = get_volume_node ${volume_name} + ${max_size} = get_volume_node_disk_storage_maximum ${volume_name} ${node_name} + ${new_size} = evaluate ${max_size} + 1 + + Run Keyword And Expect Error Failed to expand* expand_workload_claim_size ${workload_name} ${new_size} skip_retry=True + +Assert volume size of ${workload_kind} ${workload_id} remains ${size} for at least ${period} seconds + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + FOR ${i} IN RANGE ${period} + ${expected_size_byte} = convert_size_to_bytes ${size} to_str=True + ${current_size_byte} = get_volume_size ${volume_name} + Should Be Equal ${current_size_byte} ${expected_size_byte} + Sleep 1 + END diff --git a/e2e/libs/backup/backup.py b/e2e/libs/backup/backup.py index 7bfcfebbe8..9f3e297c23 100644 --- a/e2e/libs/backup/backup.py +++ b/e2e/libs/backup/backup.py @@ -1,8 +1,8 @@ from backup.base import Base from backup.crd import CRD from backup.rest import Rest + from strategy import LonghornOperationStrategy -from utility.utility import logging class Backup(Base): diff --git a/e2e/libs/backup/base.py b/e2e/libs/backup/base.py index 791a7d30a7..228031d294 100644 --- a/e2e/libs/backup/base.py +++ b/e2e/libs/backup/base.py @@ -1,7 +1,9 @@ from abc import ABC, abstractmethod + from utility.utility import set_annotation from utility.utility import get_annotation_value + class Base(ABC): ANNOT_ID = "test.longhorn.io/backup-id" diff --git a/e2e/libs/backup/rest.py b/e2e/libs/backup/rest.py index 7fe9fa3dfe..61e0f7c65d 100644 --- a/e2e/libs/backup/rest.py +++ b/e2e/libs/backup/rest.py @@ -1,11 +1,15 @@ +import time + from backup.base import Base + +from snapshot import Snapshot as RestSnapshot + from utility.utility import logging +from utility.utility import get_all_crs from utility.utility import get_longhorn_client from utility.utility import get_retry_count_and_interval -from utility.utility import get_all_crs + from volume import Rest as RestVolume -from snapshot import Snapshot as RestSnapshot -import time class Rest(Base): diff --git a/e2e/libs/backupstore/base.py b/e2e/libs/backupstore/base.py index 704f6fab68..05775406e8 100644 --- a/e2e/libs/backupstore/base.py +++ b/e2e/libs/backupstore/base.py @@ -1,10 +1,9 @@ from abc import ABC, abstractmethod -import time -import os import hashlib +import os + from kubernetes import client -from utility.utility import get_retry_count_and_interval -from utility.utility import get_longhorn_client + class Base(ABC): diff --git a/e2e/libs/backupstore/nfs.py b/e2e/libs/backupstore/nfs.py index 51c31ced9e..149cd6ec74 100644 --- a/e2e/libs/backupstore/nfs.py +++ b/e2e/libs/backupstore/nfs.py @@ -1,6 +1,8 @@ -from backupstore.base import Base import os import subprocess + +from backupstore.base import Base + from urllib.parse import urlparse class Nfs(Base): diff --git a/e2e/libs/backupstore/s3.py b/e2e/libs/backupstore/s3.py index c19f2635c9..899d1c19e4 100644 --- a/e2e/libs/backupstore/s3.py +++ b/e2e/libs/backupstore/s3.py @@ -1,11 +1,14 @@ -from backupstore.base import Base import os import base64 import json import tempfile import subprocess + from minio import Minio from minio.error import ResponseError + +from backupstore.base import Base + from urllib.parse import urlparse from utility.utility import logging diff --git a/e2e/libs/engine_image/engine_image.py b/e2e/libs/engine_image/engine_image.py index a6cb7f1776..a05662a21b 100644 --- a/e2e/libs/engine_image/engine_image.py +++ b/e2e/libs/engine_image/engine_image.py @@ -1,5 +1,5 @@ import time -from utility.utility import logging + from utility.utility import get_longhorn_client from utility.utility import get_retry_count_and_interval diff --git a/e2e/libs/host/aws.py b/e2e/libs/host/aws.py index bcce99086d..180610f972 100644 --- a/e2e/libs/host/aws.py +++ b/e2e/libs/host/aws.py @@ -1,9 +1,12 @@ import boto3 import time + from host.constant import NODE_REBOOT_DOWN_TIME_SECOND +from host.base import Base + from utility.utility import logging from utility.utility import wait_for_cluster_ready -from host.base import Base + class Aws(Base): diff --git a/e2e/libs/host/harvester.py b/e2e/libs/host/harvester.py index f856cd983d..4e23c88ea4 100644 --- a/e2e/libs/host/harvester.py +++ b/e2e/libs/host/harvester.py @@ -1,14 +1,18 @@ -import requests import os +import requests import time +import urllib3 + +from host.base import Base from host.constant import NODE_REBOOT_DOWN_TIME_SECOND + from utility.utility import logging from utility.utility import wait_for_cluster_ready from utility.utility import get_retry_count_and_interval -from host.base import Base -import urllib3 + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + class Harvester(Base): def __init__(self): diff --git a/e2e/libs/k8s/k8s.py b/e2e/libs/k8s/k8s.py index 5fcb3bb404..82245fb9b6 100644 --- a/e2e/libs/k8s/k8s.py +++ b/e2e/libs/k8s/k8s.py @@ -1,18 +1,21 @@ import time import asyncio + from kubernetes import client from kubernetes.client.rest import ApiException -from workload.pod import create_pod -from workload.pod import delete_pod -from workload.pod import new_pod_manifest -from workload.pod import wait_for_pod_status -from workload.pod import get_pod -from workload.constant import IMAGE_UBUNTU -from utility.utility import subprocess_exec_cmd + +from robot.libraries.BuiltIn import BuiltIn + from utility.utility import logging from utility.utility import get_retry_count_and_interval +from utility.utility import subprocess_exec_cmd from utility.utility import subprocess_exec_cmd_with_timeout -from robot.libraries.BuiltIn import BuiltIn + +from workload.constant import IMAGE_UBUNTU +from workload.pod import create_pod +from workload.pod import delete_pod +from workload.pod import new_pod_manifest + async def restart_kubelet(node_name, downtime_in_sec=10): manifest = new_pod_manifest( diff --git a/e2e/libs/keywords/common_keywords.py b/e2e/libs/keywords/common_keywords.py index 1414875590..35d550d32e 100644 --- a/e2e/libs/keywords/common_keywords.py +++ b/e2e/libs/keywords/common_keywords.py @@ -1,6 +1,7 @@ from node import Node from node_exec import NodeExec +from utility.utility import convert_size_to_bytes from utility.utility import init_k8s_api_client from utility.utility import generate_name_with_suffix @@ -25,3 +26,8 @@ def get_node_by_index(self, node_id): def cleanup_node_exec(self): for node_name in Node().list_node_names_by_role("all"): NodeExec(node_name).cleanup() + + def convert_size_to_bytes(self, size, to_str=False): + if to_str: + return str(convert_size_to_bytes(size)) + return convert_size_to_bytes(size) diff --git a/e2e/libs/keywords/persistentvolumeclaim_keywords.py b/e2e/libs/keywords/persistentvolumeclaim_keywords.py index 0a49fe9fec..c58c212b64 100644 --- a/e2e/libs/keywords/persistentvolumeclaim_keywords.py +++ b/e2e/libs/keywords/persistentvolumeclaim_keywords.py @@ -20,9 +20,9 @@ def cleanup_persistentvolumeclaims(self): for claim in claims.items: self.delete_persistentvolumeclaim(claim.metadata.name) - def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn"): + def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn", storage_size="3GiB"): logging(f'Creating {volume_type} persistentvolumeclaim {name} with {sc_name} storageclass') - return self.claim.create(name, volume_type, sc_name) + return self.claim.create(name, volume_type, sc_name, storage_size) def delete_persistentvolumeclaim(self, name): logging(f'Deleting persistentvolumeclaim {name}') @@ -34,3 +34,7 @@ def expand_persistentvolumeclaim_size_by_mib(self, claim_name, size_in_mib): logging(f'Expanding persistentvolumeclaim {claim_name} by {size_in_mib} MiB') self.claim.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size)) + + def get_claim_requested_size(self, claim_name): + claim = self.claim.get(claim_name) + return claim.spec.resources.requests['storage'] diff --git a/e2e/libs/keywords/volume_keywords.py b/e2e/libs/keywords/volume_keywords.py index 369be72668..0b7ec9353b 100644 --- a/e2e/libs/keywords/volume_keywords.py +++ b/e2e/libs/keywords/volume_keywords.py @@ -9,6 +9,7 @@ from utility.constant import ANNOT_REPLICA_NAMES from utility.constant import LABEL_TEST from utility.constant import LABEL_TEST_VALUE +from utility.constant import LONGHORN_NAMESPACE from utility.utility import logging from utility.utility import get_retry_count_and_interval @@ -344,3 +345,22 @@ def get_volume_checksum(self, volume_name): def validate_volume_setting(self, volume_name, setting_name, value): return self.volume.validate_volume_setting(volume_name, setting_name, value) + + def get_volume_size(self, volume_name): + volume = self.volume.get(volume_name) + return volume['spec']['size'] + + def get_volume_node_disk_storage_maximum(self, volume_name, node_name): + replica_list = self.replica.get(volume_name, node_name) + replica = replica_list[0] + replica_name = replica['metadata']['name'] + node = self.node.get_node_by_name(node_name, namespace=LONGHORN_NAMESPACE) + for diskName in node.disks: + disk = node.disks[diskName] + + for scheduledReplica in disk['scheduledReplica']: + if scheduledReplica == replica_name: + logging(f"Found replica {scheduledReplica} on node {node_name} scheduled to disk {diskName}") + return disk['storageMaximum'] + + raise Exception(f"Failed to find storageMaximum for volume {volume_name} replica {replica_name} on node {node_name}") diff --git a/e2e/libs/keywords/workload_keywords.py b/e2e/libs/keywords/workload_keywords.py index d34fb1567d..1d7aae58f3 100644 --- a/e2e/libs/keywords/workload_keywords.py +++ b/e2e/libs/keywords/workload_keywords.py @@ -28,10 +28,10 @@ from utility.constant import ANNOT_CHECKSUM from utility.constant import ANNOT_EXPANDED_SIZE from utility.constant import LABEL_LONGHORN_COMPONENT +from utility.utility import convert_size_to_bytes from utility.utility import logging from volume import Volume -from volume.constant import MEBIBYTE class workload_keywords: @@ -158,12 +158,21 @@ def wait_for_workload_volume_detached(self, workload_name): logging(f'Waiting for {workload_name} volume {volume_name} to be detached') self.volume.wait_for_volume_detached(volume_name) - def expand_workload_claim_size_by_mib(self, workload_name, size_in_mib, claim_index=0): + def expand_workload_claim_size(self, workload_name, size_in_byte, claim_index=0, skip_retry=False): claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index) - size_in_byte = int(size_in_mib) * MEBIBYTE + current_size = self.persistentvolumeclaim.get(claim_name).spec.resources.requests['storage'] + current_size_byte = convert_size_to_bytes(current_size) - logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} by {size_in_mib} MiB') - self.persistentvolumeclaim.expand(claim_name, size_in_byte) + logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} from {current_size_byte} to {size_in_byte}') + self.persistentvolumeclaim.expand(claim_name, size_in_byte, skip_retry=skip_retry) + + def expand_workload_claim_size_with_additional_bytes(self, workload_name, size_in_byte, claim_index=0, skip_retry=False): + claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index) + current_size = self.persistentvolumeclaim.get(claim_name).spec.resources.requests['storage'] + current_size_byte = convert_size_to_bytes(current_size) + + logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} current size {current_size_byte} with additional {size_in_byte}') + self.persistentvolumeclaim.expand_with_additional_bytes(claim_name, size_in_byte, skip_retry=skip_retry) def wait_for_workload_claim_size_expanded(self, workload_name, claim_index=0): claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index) diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index d7ed523427..7e48fe5d21 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -6,6 +6,7 @@ from robot.libraries.BuiltIn import BuiltIn from utility.constant import DISK_BEING_SYNCING +from utility.constant import LONGHORN_NAMESPACE from utility.constant import NODE_UPDATE_RETRY_INTERVAL from utility.utility import get_longhorn_client from utility.utility import get_retry_count_and_interval @@ -99,7 +100,10 @@ def get_node_by_index(self, index, role="worker"): nodes = self.list_node_names_by_role(role) return nodes[int(index)] - def get_node_by_name(self, node_name): + def get_node_by_name(self, node_name, namespace="kube-system"): + if namespace == LONGHORN_NAMESPACE: + return get_longhorn_client().by_id_node(node_name) + core_api = client.CoreV1Api() return core_api.read_node(node_name) diff --git a/e2e/libs/persistentvolumeclaim/crd.py b/e2e/libs/persistentvolumeclaim/crd.py index 2bd63d7fb4..688ca42f34 100644 --- a/e2e/libs/persistentvolumeclaim/crd.py +++ b/e2e/libs/persistentvolumeclaim/crd.py @@ -84,9 +84,10 @@ def get_volume_name(self, claim_name, claim_namespace="default"): claim = self.get(claim_name, claim_namespace) return claim.spec.volume_name - def expand(self, claim_name, size, namespace="default"): - for i in range(self.retry_count): - logging(f"Trying to expand pvc {claim_name} to size {size} ... ({i})") + def expand(self, claim_name, size, namespace="default", skip_retry=False): + retry_count = 1 if skip_retry else self.retry_count + for i in range(retry_count): + logging(f"Trying to expand PVC {claim_name} to size {size} ... ({i})") try: self.core_v1_api.patch_namespaced_persistent_volume_claim( name=claim_name, diff --git a/e2e/libs/persistentvolumeclaim/persistentvolumeclaim.py b/e2e/libs/persistentvolumeclaim/persistentvolumeclaim.py index 513ff3efe1..71d32afffa 100644 --- a/e2e/libs/persistentvolumeclaim/persistentvolumeclaim.py +++ b/e2e/libs/persistentvolumeclaim/persistentvolumeclaim.py @@ -11,6 +11,7 @@ from utility.constant import ANNOT_EXPANDED_SIZE from utility.constant import LABEL_TEST from utility.constant import LABEL_TEST_VALUE +from utility.utility import convert_size_to_bytes from utility.utility import get_retry_count_and_interval from utility.utility import logging @@ -23,7 +24,9 @@ def __init__(self): if self._strategy == LonghornOperationStrategy.CRD: self.claim = CRD() - def create(self, name, volume_type, sc_name): + def create(self, name, volume_type, sc_name, storage_size="3GiB"): + storage_size_bytes = convert_size_to_bytes(storage_size) + filepath = "./templates/workload/pvc.yaml" with open(filepath, 'r') as f: namespace = 'default' @@ -38,6 +41,9 @@ def create(self, name, volume_type, sc_name): # correct storageclass name manifest_dict['spec']['storageClassName'] = sc_name + # correct storage request + manifest_dict['spec']['resources']['requests']['storage'] = storage_size_bytes + # correct access mode` if volume_type == 'RWX': manifest_dict['spec']['accessModes'][0] = 'ReadWriteMany' @@ -95,11 +101,14 @@ def get_annotation_value(self, claim_name, annotation_key): def get_volume_name(self, claim_name): return self.claim.get_volume_name(claim_name) - def expand(self, claim_name, size_in_byte): + def expand(self, claim_name, size_in_byte, skip_retry=False): + expanded_size = self.claim.expand(claim_name, size_in_byte, skip_retry=skip_retry) + self.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size)) + + def expand_with_additional_bytes(self, claim_name, size_in_byte, skip_retry=False): pvc = self.claim.get(claim_name) current_size = int(pvc.spec.resources.requests['storage']) target_size = current_size + size_in_byte - logging(f"Expanding PVC {claim_name} from {current_size} to {target_size}") - expanded_size = self.claim.expand(claim_name, target_size) + expanded_size = self.claim.expand(claim_name, target_size, skip_retry=skip_retry) self.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size)) diff --git a/e2e/libs/utility/utility.py b/e2e/libs/utility/utility.py index a9a5f16d31..c690895aae 100644 --- a/e2e/libs/utility/utility.py +++ b/e2e/libs/utility/utility.py @@ -164,6 +164,7 @@ def get_cr(group, version, namespace, plural, name): logging(f"Getting namespaced custom object error: {e}") time.sleep(retry_interval) + def get_all_crs(group, version, namespace, plural): api = client.CustomObjectsApi() retry_count, retry_interval = get_retry_count_and_interval() @@ -175,6 +176,7 @@ def get_all_crs(group, version, namespace, plural): logging(f"Getting namespaced custom object error: {e}") time.sleep(retry_interval) + def filter_cr(group, version, namespace, plural, field_selector="", label_selector=""): api = client.CustomObjectsApi() try: @@ -183,6 +185,7 @@ def filter_cr(group, version, namespace, plural, field_selector="", label_select except ApiException as e: logging(f"Listing namespaced custom object: {e}") + def list_namespaced_pod(namespace, label_selector=""): api = client.CoreV1Api() retry_count, retry_interval = get_retry_count_and_interval() @@ -197,6 +200,7 @@ def list_namespaced_pod(namespace, label_selector=""): time.sleep(retry_interval) assert False, f"Failed to list namespaced {namespace} pods" + def set_annotation(group, version, namespace, plural, name, annotation_key, annotation_value): api = client.CustomObjectsApi() # retry conflict error @@ -334,3 +338,18 @@ def get_name_suffix(*args): if arg: suffix += f"-{arg}" return suffix + + +def convert_size_to_bytes(size): + size = size.replace(" ", "") + + if size.endswith("GiB"): + return int(size[:-3]) * 1024 * 1024 * 1024 + + if size.endswith("MiB"): + return int(size[:-3]) * 1024 * 1024 + + if size.isdigit(): + return int(size) + + raise ValueError(f"Invalid size format: {size}") diff --git a/e2e/libs/volume/crd.py b/e2e/libs/volume/crd.py index 80078c081b..5539cc57ca 100644 --- a/e2e/libs/volume/crd.py +++ b/e2e/libs/volume/crd.py @@ -1,5 +1,4 @@ import time -import os from kubernetes import client from kubernetes.client.rest import ApiException diff --git a/e2e/tests/negative/stress_cpu.robot b/e2e/tests/negative/stress_cpu.robot index 68047dc40e..c74cc4fbcb 100644 --- a/e2e/tests/negative/stress_cpu.robot +++ b/e2e/tests/negative/stress_cpu.robot @@ -50,7 +50,7 @@ Stress Volume Node CPU When Volume Is Online Expanding FOR ${i} IN RANGE ${LOOP_COUNT} And Stress CPU of volume nodes - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Check statefulset 0 data in file 0.txt is intact @@ -64,7 +64,7 @@ Stress Volume Node CPU When Volume Is Offline Expanding And Scale down statefulset 0 to detach volume And Stress CPU of all worker nodes - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Wait for statefulset 0 volume detached diff --git a/e2e/tests/negative/stress_filesystem.robot b/e2e/tests/negative/stress_filesystem.robot index 094aa6bf3c..a2d39c9f8b 100644 --- a/e2e/tests/negative/stress_filesystem.robot +++ b/e2e/tests/negative/stress_filesystem.robot @@ -49,7 +49,7 @@ Stress Volume Node Filesystem When Volume Is Online Expanding And Stress filesystem of statefulset 0 volume node FOR ${i} IN RANGE ${LOOP_COUNT} - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Check statefulset 0 data in file data.txt is intact @@ -63,7 +63,7 @@ Stress Volume Node Filesystem When Volume Is Offline Expanding FOR ${i} IN RANGE ${LOOP_COUNT} And Scale down statefulset 0 to detach volume - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Wait for statefulset 0 volume detached And Scale up statefulset 0 to attach volume diff --git a/e2e/tests/negative/stress_memory.robot b/e2e/tests/negative/stress_memory.robot index f566610d76..50c871fb3f 100644 --- a/e2e/tests/negative/stress_memory.robot +++ b/e2e/tests/negative/stress_memory.robot @@ -51,7 +51,7 @@ Stress Volume Node Memory When Volume Is Online Expanding FOR ${i} IN RANGE ${LOOP_COUNT} And Stress memory of volume nodes - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Check statefulset 0 data in file 0.txt is intact @@ -65,7 +65,7 @@ Stress Volume Node Memory When Volume Is Offline Expanding And Scale down statefulset 0 to detach volume And Stress memory of all worker nodes - When Expand statefulset 0 volume by 100 MiB + When Expand statefulset 0 volume with additional 100 MiB Then Wait for statefulset 0 volume size expanded And Wait for statefulset 0 volume detached diff --git a/e2e/tests/regression/test_persistentvolumeclaim.robot b/e2e/tests/regression/test_persistentvolumeclaim.robot new file mode 100644 index 0000000000..d1e146b2b5 --- /dev/null +++ b/e2e/tests/regression/test_persistentvolumeclaim.robot @@ -0,0 +1,45 @@ +*** Settings *** +Documentation PersistentVolumeClaim Test Cases + +Test Tags regression + +Resource ../keywords/common.resource +Resource ../keywords/deployment.resource +Resource ../keywords/persistentvolumeclaim.resource +Resource ../keywords/setting.resource +Resource ../keywords/workload.resource +Resource ../keywords/variables.resource + +Test Setup Set test environment +Test Teardown Cleanup test resources + +*** Variables *** +${LOOP_COUNT} 1 +${RETRY_COUNT} 300 +${RETRY_INTERVAL} 1 + +*** Test Cases *** + +Test persistentvolumeclaim expand more than storage maximum size should fail + [Tags] volume expansion + [Documentation] Verify that a PersistentVolumeClaim cannot be expanded beyond + ... the storage maximum size. + ... + ... Issue: https://github.com/longhorn/longhorn/issues/6633 + + Given Set setting storage-over-provisioning-percentage to 100 + And Create persistentvolumeclaim 0 using RWX volume storage_size=2GiB + And Create deployment 0 with persistentvolumeclaim 0 + And Write 10 MB data to file data.txt in deployment 0 + + FOR ${i} IN RANGE ${LOOP_COUNT} + When Expand deployment 0 volume more than storage maximum size should fail + Then Assert volume size of deployment 0 remains 2GiB for at least 5 seconds + And Assert persistentvolumeclaim 0 requested size remains 2GiB for at least 5 seconds + And Check deployment 0 data in file data.txt is intact + + When Expand deployment 0 volume to 3 GiB + Then Assert persistentvolumeclaim 0 requested size remains 3GiB for at least 5 seconds + And Assert volume size of deployment 0 remains 3GiB for at least 5 seconds + And Check deployment 0 data in file data.txt is intact + END