Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(robot): expand PVC more than storage maximum size #2178

Merged
merged 7 commits into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions e2e/keywords/persistentvolumeclaim.resource
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ Library ../libs/keywords/volume_keywords.py

*** Keywords ***
Create persistentvolumeclaim ${claim_id} using ${volume_type} volume
[Arguments] &{config}
${claim_name} = generate_name_with_suffix claim ${claim_id}
create_persistentvolumeclaim ${claim_name} ${volume_type}
create_persistentvolumeclaim ${claim_name} ${volume_type} &{config}

Create persistentvolumeclaim ${claim_id} using ${volume_type} volume with ${sc_name} storageclass
${claim_name} = generate_name_with_suffix claim ${claim_id}
Expand All @@ -20,5 +21,14 @@ Delete persistentvolumeclaim ${claim_id}
delete_persistentvolumeclaim ${claim_name}

Delete persistentvolumeclaim for volume ${volume_id}
${pvc_name} = generate_name_with_suffix volume ${volume_id}
delete_persistentvolumeclaim ${pvc_name}
${claim_name} = generate_name_with_suffix volume ${volume_id}
delete_persistentvolumeclaim ${claim_name}

Assert persistentvolumeclaim ${claim_id} requested size remains ${size} for at least ${period} seconds
${claim_name} = generate_name_with_suffix claim ${claim_id}
FOR ${i} IN RANGE ${period}
${expected_size_byte} = convert_size_to_bytes ${size} to_str=True
${current_size_byte} = get_claim_requested_size ${claim_name}
Should Be Equal ${current_size_byte} ${expected_size_byte}
Sleep 1
END
4 changes: 0 additions & 4 deletions e2e/keywords/statefulset.resource
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,6 @@ Scale up statefulset ${statefulset_id} to attach volume
${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id}
scale_statefulset_up ${statefulset_name}

Expand statefulset ${statefulset_id} volume by ${size} MiB
${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id}
expand_workload_claim_size_by_mib ${statefulset_name} ${size}

Write ${size} MB data to file ${file_name} in statefulset ${statefulset_id}
${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id}
write_workload_pod_random_data ${statefulset_name} ${size} ${file_name}
Expand Down
35 changes: 33 additions & 2 deletions e2e/keywords/workload.resource
Original file line number Diff line number Diff line change
Expand Up @@ -208,10 +208,10 @@ Trim ${workload_kind} ${workload_id} volume should ${condition}
END

Delete Longhorn ${workload_kind} ${workload_name} pod on node ${node_id}
${node_name} = get_node_by_index ${node_id}
${node_name} = get_node_by_index ${node_id}

IF '${workload_name}' == 'engine-image'
${label_selector} = Set Variable longhorn.io/component=engine-image
${label_selector} = Set Variable longhorn.io/component=engine-image
ELSE IF '${workload_name}' == 'instance-manager'
${label_selector} = Set Variable longhorn.io/component=instance-manager
ELSE
Expand All @@ -232,3 +232,34 @@ Check volume of ${workload_kind} ${workload_id} replica on node ${node_id} disk
${disk_uuid} = get_disk_uuid ${node_name} ${disk_name}
${replicas} = get_replicas volume_name=${volume_name} node_name=${node_name} disk_uuid=${disk_uuid}
Should Be True len(${replicas}) > 0

Expand ${workload_kind} ${workload_id} volume to ${size}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${new_size} = convert_size_to_bytes ${size}

expand_workload_claim_size ${workload_name} ${new_size}

Expand ${workload_kind} ${workload_id} volume with additional ${size}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${new_size} = convert_size_to_bytes ${size}

expand_workload_claim_size_with_additional_bytes ${workload_name} ${new_size}

Expand ${workload_kind} ${workload_id} volume more than storage maximum size should fail
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
${node_name} = get_volume_node ${volume_name}
${max_size} = get_volume_node_disk_storage_maximum ${volume_name} ${node_name}
${new_size} = evaluate ${max_size} + 1

Run Keyword And Expect Error Failed to expand* expand_workload_claim_size ${workload_name} ${new_size} skip_retry=True

Assert volume size of ${workload_kind} ${workload_id} remains ${size} for at least ${period} seconds
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
FOR ${i} IN RANGE ${period}
${expected_size_byte} = convert_size_to_bytes ${size} to_str=True
${current_size_byte} = get_volume_size ${volume_name}
Should Be Equal ${current_size_byte} ${expected_size_byte}
Sleep 1
END
2 changes: 1 addition & 1 deletion e2e/libs/backup/backup.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from backup.base import Base
from backup.crd import CRD
from backup.rest import Rest

from strategy import LonghornOperationStrategy
from utility.utility import logging


class Backup(Base):
Expand Down
2 changes: 2 additions & 0 deletions e2e/libs/backup/base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from abc import ABC, abstractmethod

from utility.utility import set_annotation
from utility.utility import get_annotation_value


class Base(ABC):

ANNOT_ID = "test.longhorn.io/backup-id"
Expand Down
10 changes: 7 additions & 3 deletions e2e/libs/backup/rest.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import time

from backup.base import Base

from snapshot import Snapshot as RestSnapshot

from utility.utility import logging
from utility.utility import get_all_crs
from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval
from utility.utility import get_all_crs

from volume import Rest as RestVolume
from snapshot import Snapshot as RestSnapshot
import time


class Rest(Base):
Expand Down
7 changes: 3 additions & 4 deletions e2e/libs/backupstore/base.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from abc import ABC, abstractmethod
import time
import os
import hashlib
import os

from kubernetes import client
from utility.utility import get_retry_count_and_interval
from utility.utility import get_longhorn_client


class Base(ABC):

Expand Down
4 changes: 3 additions & 1 deletion e2e/libs/backupstore/nfs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from backupstore.base import Base
import os
import subprocess

from backupstore.base import Base

from urllib.parse import urlparse

class Nfs(Base):
Expand Down
5 changes: 4 additions & 1 deletion e2e/libs/backupstore/s3.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
from backupstore.base import Base
import os
import base64
import json
import tempfile
import subprocess

from minio import Minio
from minio.error import ResponseError

from backupstore.base import Base

from urllib.parse import urlparse
from utility.utility import logging

Expand Down
2 changes: 1 addition & 1 deletion e2e/libs/engine_image/engine_image.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import time
from utility.utility import logging

from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval

Expand Down
5 changes: 4 additions & 1 deletion e2e/libs/host/aws.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import boto3
import time

from host.constant import NODE_REBOOT_DOWN_TIME_SECOND
from host.base import Base

from utility.utility import logging
from utility.utility import wait_for_cluster_ready
from host.base import Base


class Aws(Base):

Expand Down
10 changes: 7 additions & 3 deletions e2e/libs/host/harvester.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
import requests
import os
import requests
import time
import urllib3

from host.base import Base
from host.constant import NODE_REBOOT_DOWN_TIME_SECOND

from utility.utility import logging
from utility.utility import wait_for_cluster_ready
from utility.utility import get_retry_count_and_interval
from host.base import Base
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


class Harvester(Base):

def __init__(self):
Expand Down
19 changes: 11 additions & 8 deletions e2e/libs/k8s/k8s.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@
import time
import asyncio

from kubernetes import client
from kubernetes.client.rest import ApiException
from workload.pod import create_pod
from workload.pod import delete_pod
from workload.pod import new_pod_manifest
from workload.pod import wait_for_pod_status
from workload.pod import get_pod
from workload.constant import IMAGE_UBUNTU
from utility.utility import subprocess_exec_cmd

from robot.libraries.BuiltIn import BuiltIn

from utility.utility import logging
from utility.utility import get_retry_count_and_interval
from utility.utility import subprocess_exec_cmd
from utility.utility import subprocess_exec_cmd_with_timeout
from robot.libraries.BuiltIn import BuiltIn

from workload.constant import IMAGE_UBUNTU
from workload.pod import create_pod
from workload.pod import delete_pod
from workload.pod import new_pod_manifest


async def restart_kubelet(node_name, downtime_in_sec=10):
manifest = new_pod_manifest(
Expand Down
6 changes: 6 additions & 0 deletions e2e/libs/keywords/common_keywords.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from node import Node
from node_exec import NodeExec

from utility.utility import convert_size_to_bytes
from utility.utility import init_k8s_api_client
from utility.utility import generate_name_with_suffix

Expand All @@ -25,3 +26,8 @@ def get_node_by_index(self, node_id):
def cleanup_node_exec(self):
for node_name in Node().list_node_names_by_role("all"):
NodeExec(node_name).cleanup()

def convert_size_to_bytes(self, size, to_str=False):
if to_str:
return str(convert_size_to_bytes(size))
return convert_size_to_bytes(size)
c3y1huang marked this conversation as resolved.
Show resolved Hide resolved
8 changes: 6 additions & 2 deletions e2e/libs/keywords/persistentvolumeclaim_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ def cleanup_persistentvolumeclaims(self):
for claim in claims.items:
self.delete_persistentvolumeclaim(claim.metadata.name)

def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn"):
def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn", storage_size="3GiB"):
logging(f'Creating {volume_type} persistentvolumeclaim {name} with {sc_name} storageclass')
return self.claim.create(name, volume_type, sc_name)
return self.claim.create(name, volume_type, sc_name, storage_size)

def delete_persistentvolumeclaim(self, name):
logging(f'Deleting persistentvolumeclaim {name}')
Expand All @@ -34,3 +34,7 @@ def expand_persistentvolumeclaim_size_by_mib(self, claim_name, size_in_mib):

logging(f'Expanding persistentvolumeclaim {claim_name} by {size_in_mib} MiB')
self.claim.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size))

def get_claim_requested_size(self, claim_name):
claim = self.claim.get(claim_name)
return claim.spec.resources.requests['storage']
20 changes: 20 additions & 0 deletions e2e/libs/keywords/volume_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from utility.constant import ANNOT_REPLICA_NAMES
from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.constant import LONGHORN_NAMESPACE
from utility.utility import logging
from utility.utility import get_retry_count_and_interval

Expand Down Expand Up @@ -344,3 +345,22 @@ def get_volume_checksum(self, volume_name):

def validate_volume_setting(self, volume_name, setting_name, value):
return self.volume.validate_volume_setting(volume_name, setting_name, value)

def get_volume_size(self, volume_name):
volume = self.volume.get(volume_name)
return volume['spec']['size']

def get_volume_node_disk_storage_maximum(self, volume_name, node_name):
replica_list = self.replica.get(volume_name, node_name)
replica = replica_list[0]
replica_name = replica['metadata']['name']
node = self.node.get_node_by_name(node_name, namespace=LONGHORN_NAMESPACE)
for diskName in node.disks:
disk = node.disks[diskName]

for scheduledReplica in disk['scheduledReplica']:
if scheduledReplica == replica_name:
logging(f"Found replica {scheduledReplica} on node {node_name} scheduled to disk {diskName}")
return disk['storageMaximum']

raise Exception(f"Failed to find storageMaximum for volume {volume_name} replica {replica_name} on node {node_name}")
19 changes: 14 additions & 5 deletions e2e/libs/keywords/workload_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@
from utility.constant import ANNOT_CHECKSUM
from utility.constant import ANNOT_EXPANDED_SIZE
from utility.constant import LABEL_LONGHORN_COMPONENT
from utility.utility import convert_size_to_bytes
c3y1huang marked this conversation as resolved.
Show resolved Hide resolved
from utility.utility import logging

from volume import Volume
from volume.constant import MEBIBYTE


class workload_keywords:
Expand Down Expand Up @@ -158,12 +158,21 @@ def wait_for_workload_volume_detached(self, workload_name):
logging(f'Waiting for {workload_name} volume {volume_name} to be detached')
self.volume.wait_for_volume_detached(volume_name)

def expand_workload_claim_size_by_mib(self, workload_name, size_in_mib, claim_index=0):
def expand_workload_claim_size(self, workload_name, size_in_byte, claim_index=0, skip_retry=False):
claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index)
size_in_byte = int(size_in_mib) * MEBIBYTE
current_size = self.persistentvolumeclaim.get(claim_name).spec.resources.requests['storage']
current_size_byte = convert_size_to_bytes(current_size)
c3y1huang marked this conversation as resolved.
Show resolved Hide resolved

logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} by {size_in_mib} MiB')
self.persistentvolumeclaim.expand(claim_name, size_in_byte)
logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} from {current_size_byte} to {size_in_byte}')
self.persistentvolumeclaim.expand(claim_name, size_in_byte, skip_retry=skip_retry)
c3y1huang marked this conversation as resolved.
Show resolved Hide resolved

def expand_workload_claim_size_with_additional_bytes(self, workload_name, size_in_byte, claim_index=0, skip_retry=False):
claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index)
current_size = self.persistentvolumeclaim.get(claim_name).spec.resources.requests['storage']
current_size_byte = convert_size_to_bytes(current_size)

logging(f'Expanding {workload_name} persistentvolumeclaim {claim_name} current size {current_size_byte} with additional {size_in_byte}')
self.persistentvolumeclaim.expand_with_additional_bytes(claim_name, size_in_byte, skip_retry=skip_retry)
c3y1huang marked this conversation as resolved.
Show resolved Hide resolved

def wait_for_workload_claim_size_expanded(self, workload_name, claim_index=0):
claim_name = get_workload_persistent_volume_claim_name(workload_name, index=claim_index)
Expand Down
6 changes: 5 additions & 1 deletion e2e/libs/node/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from robot.libraries.BuiltIn import BuiltIn

from utility.constant import DISK_BEING_SYNCING
from utility.constant import LONGHORN_NAMESPACE
from utility.constant import NODE_UPDATE_RETRY_INTERVAL
from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval
Expand Down Expand Up @@ -99,7 +100,10 @@ def get_node_by_index(self, index, role="worker"):
nodes = self.list_node_names_by_role(role)
return nodes[int(index)]

def get_node_by_name(self, node_name):
def get_node_by_name(self, node_name, namespace="kube-system"):
if namespace == LONGHORN_NAMESPACE:
return get_longhorn_client().by_id_node(node_name)

core_api = client.CoreV1Api()
return core_api.read_node(node_name)

Expand Down
7 changes: 4 additions & 3 deletions e2e/libs/persistentvolumeclaim/crd.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,10 @@ def get_volume_name(self, claim_name, claim_namespace="default"):
claim = self.get(claim_name, claim_namespace)
return claim.spec.volume_name

def expand(self, claim_name, size, namespace="default"):
for i in range(self.retry_count):
logging(f"Trying to expand pvc {claim_name} to size {size} ... ({i})")
def expand(self, claim_name, size, namespace="default", skip_retry=False):
retry_count = 1 if skip_retry else self.retry_count
for i in range(retry_count):
logging(f"Trying to expand PVC {claim_name} to size {size} ... ({i})")
try:
self.core_v1_api.patch_namespaced_persistent_volume_claim(
name=claim_name,
Expand Down
Loading
Loading