Skip to content

Commit

Permalink
refactor(negative): adapt libs to keywords
Browse files Browse the repository at this point in the history
longhorn/longhorn-7034

Signed-off-by: Chin-Ya Huang <[email protected]>
  • Loading branch information
c3y1huang committed Mar 1, 2024
1 parent a22060d commit 95c0ba8
Show file tree
Hide file tree
Showing 45 changed files with 943 additions and 432 deletions.
1 change: 1 addition & 0 deletions e2e/libs/instancemanager/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from instancemanager.instancemanager import InstanceManager
35 changes: 35 additions & 0 deletions e2e/libs/instancemanager/instancemanager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import time

from node import Node

from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval
from utility.utility import logging


class InstanceManager:

def __init__(self):
self.node = Node()

def wait_for_all_instance_manager_running(self):
longhorn_client = get_longhorn_client()
worker_nodes = self.node.list_node_names_by_role("worker")

retry_count, retry_interval = get_retry_count_and_interval()
for i in range(retry_count):
try:
instance_managers = longhorn_client.list_instance_manager()
instance_manager_map = {}
for im in instance_managers:
if im.currentState == "running":
instance_manager_map[im.nodeID] = im
if len(instance_manager_map) == len(worker_nodes):
break
except Exception as e:
logging(f"Getting instance manager state error: {e}")

logging(f"Waiting for all instance manager running, retry ({i}) ...")
time.sleep(retry_interval)

assert len(instance_manager_map) == len(worker_nodes), f"expect all instance managers running, instance_managers = {instance_managers}, instance_manager_map = {instance_manager_map}"
4 changes: 2 additions & 2 deletions e2e/libs/instancemanager/utility.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import time

from node.utility import list_node_names_by_role
from node import Node

from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval
from utility.utility import logging

def wait_for_all_instance_manager_running():
longhorn_client = get_longhorn_client()
worker_nodes = list_node_names_by_role("worker")
worker_nodes = Node.list_node_names_by_role("worker")

retry_count, retry_interval = get_retry_count_and_interval()
for i in range(retry_count):
Expand Down
4 changes: 4 additions & 0 deletions e2e/libs/keywords/common_keywords.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from node_exec import NodeExec

from utility.utility import init_k8s_api_client
from utility.utility import generate_name_with_suffix


class common_keywords:
Expand All @@ -17,3 +18,6 @@ def init_node_exec(self, test_name):

def cleanup_node_exec(self):
NodeExec.get_instance().cleanup()

def generate_name_with_suffix(self, kind, suffix):
return generate_name_with_suffix(kind, suffix)
40 changes: 25 additions & 15 deletions e2e/libs/keywords/deployment_keywords.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,32 @@
from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging

from volume import Volume

from workload.deployment import create_deployment
from workload.deployment import delete_deployment
from workload.persistentvolumeclaim import create_persistentvolumeclaim
from workload.persistentvolumeclaim import delete_persistentvolumeclaim
from workload.workload import get_workload_pvc_name
from workload.deployment import list_deployments


class deployment_keywords:

def __init__(self):
pass

def cleanup_deployments(self, deployment_names):
for name in deployment_names:
pvc_name = get_workload_pvc_name(name)
delete_deployment(name)
delete_persistentvolumeclaim(pvc_name)

def create_deployment(self, volume_type="rwo", option=""):
create_persistentvolumeclaim(volume_type, option)
deployment_name = create_deployment(volume_type, option)
return deployment_name
self.volume = Volume()

def cleanup_deployments(self):
deployments = list_deployments(
label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}"
)

logging(f'Cleaning up {len(deployments.items)} deployments')
for deployment in deployments.items:
self.delete_deployment(deployment.metadata.name)

def create_deployment(self, name, claim_name):
logging(f'Creating deployment {name}')
create_deployment(name, claim_name)

def delete_deployment(self, name):
logging(f'Deleting deployment {name}')
delete_deployment(name)
13 changes: 13 additions & 0 deletions e2e/libs/keywords/instancemanager_keywords.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from instancemanager import InstanceManager

from utility.utility import logging


class instancemanager_keywords:

def __init__(self):
self.instancemanager = InstanceManager()

def wait_for_all_instance_manager_running(self):
logging(f'Waiting for all instance manager running')
self.instancemanager.wait_for_all_instance_manager_running()
7 changes: 5 additions & 2 deletions e2e/libs/keywords/kubelet_keywords.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from kubelet.kubelet import restart_kubelet

from utility.utility import logging


class kubelet_keywords:

def restart_kubelet(self, node_name, stop_time_in_sec):
restart_kubelet(node_name, int(stop_time_in_sec))
def restart_kubelet(self, node_name, downtime_in_sec):
logging(f'Restarting kubelet on node {node_name} with downtime {downtime_in_sec} seconds')
restart_kubelet(node_name, int(downtime_in_sec))
5 changes: 5 additions & 0 deletions e2e/libs/keywords/network_keywords.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
from network.network import setup_control_plane_network_latency
from network.network import cleanup_control_plane_network_latency

from utility.utility import logging


class network_keywords:

def setup_control_plane_network_latency(self):
logging(f"Setting up control plane network latency")
setup_control_plane_network_latency()

def cleanup_control_plane_network_latency(self):
logging(f"Cleaning up control plane network latency")
cleanup_control_plane_network_latency()
43 changes: 27 additions & 16 deletions e2e/libs/keywords/node_keywords.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,49 @@
from robot.libraries.BuiltIn import BuiltIn

from instancemanager.utility import wait_for_all_instance_manager_running

from node import Node
from node.utility import get_node_by_index
from node.constant import NODE_REBOOT_DOWN_TIME_SECOND

from utility.utility import logging


class node_keywords:

def __init__(self):
self.volume_keywords = BuiltIn().get_library_instance('volume_keywords')

self.node = Node()

def reboot_volume_node(self, volume_name):
volume_keywords = BuiltIn().get_library_instance('volume_keywords')
volume_node = volume_keywords.get_replica_node_attached_to_volume(volume_name)
self.node.reboot_node(volume_node)
node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "volume node")

logging(f'Rebooting volume {volume_name} node {node_id} with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.node.reboot_node(node_id)

def reboot_replica_node(self, volume_name):
volume_keywords = BuiltIn().get_library_instance('volume_keywords')
replica_node = volume_keywords.get_replica_node_not_attached_to_volume(volume_name)
self.node.reboot_node(replica_node)
node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "replica node")

logging(f'Rebooting volume {volume_name} node {node_id} with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.node.reboot_node(node_id)

def reboot_node_by_index(self, idx, power_off_time_in_min=1):
node_name = get_node_by_index(idx)
self.node.reboot_node(node_name, int(power_off_time_in_min) * 60)
node_name = self.node.get_node_by_index(idx)
reboot_down_time_min = int(power_off_time_in_min) * 60

logging(f'Rebooting node {node_name} with downtime {reboot_down_time_min} minutes')
self.node.reboot_node(node_name, reboot_down_time_min)

def reboot_all_worker_nodes(self, power_off_time_in_min=1):
self.node.reboot_all_worker_nodes(int(power_off_time_in_min) * 60)
reboot_down_time_min = int(power_off_time_in_min) * 60

logging(f'Rebooting all worker nodes with downtime {reboot_down_time_min} minutes')
self.node.reboot_all_worker_nodes(reboot_down_time_min)

def reboot_all_nodes(self):
logging(f'Rebooting all nodes with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.node.reboot_all_nodes()

def reboot_node_by_name(self, node_name, power_off_time_in_min=1):
self.node.reboot_node(node_name, int(power_off_time_in_min) * 60)
def reboot_node_by_name(self, node_name, downtime_in_min=1):
reboot_down_time_min = int(downtime_in_min) * 60

def wait_for_all_instance_manager_running(self):
wait_for_all_instance_manager_running()
logging(f'Rebooting node {node_name} with downtime {reboot_down_time_min} minutes')
self.node.reboot_node(node_name, reboot_down_time_min)
28 changes: 24 additions & 4 deletions e2e/libs/keywords/persistentvolumeclaim_keywords.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from persistentvolumeclaim import PersistentVolumeClaim

from utility.constant import ANNOT_EXPANDED_SIZE
from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging

from volume.constant import MEBIBYTE
Expand All @@ -8,9 +11,26 @@
class persistentvolumeclaim_keywords:

def __init__(self):
self.pvc = PersistentVolumeClaim()
self.claim = PersistentVolumeClaim()

def expand_pvc_size_by_mib(self, claim_name, size_in_mib):
logging(f'Expanding PVC {claim_name} by {size_in_mib} MiB')
def cleanup_persistentvolumeclaims(self):
claims = self.claim.list(label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}")

logging(f'Cleaning up {len(claims.items)} persistentvolumeclaims')
for claim in claims.items:
self.claim.delete(claim.metadata.name)

def create_persistentvolumeclaim(self, name, volume_type="RWO", option=""):
logging(f'Creating persistentvolumeclaim {name}')
return self.claim.create(name, volume_type, option)

def delete_persistentvolumeclaim(self, name):
logging(f'Deleting persistentvolumeclaim {name}')
return self.claim.delete(name)

def expand_persistentvolumeclaim_size_by_mib(self, claim_name, size_in_mib):
size_in_byte = int(size_in_mib) * MEBIBYTE
return self.pvc.expand(claim_name, size_in_byte)
expanded_size = self.claim.expand(claim_name, size_in_byte)

logging(f'Expanding persistentvolumeclaim {claim_name} by {size_in_mib} MiB')
self.claim.add_or_update_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size))
24 changes: 17 additions & 7 deletions e2e/libs/keywords/recurringjob_keywords.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from recurringjob import RecurringJob

from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging


Expand All @@ -8,22 +10,30 @@ class recurringjob_keywords:
def __init__(self):
self.recurringjob = RecurringJob()

def cleanup_recurringjobs(self):
recurringjobs = self.recurringjob.list(
label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}"
)

logging(f'Cleaning up {len(recurringjobs["items"])} recurringjobs')
for recurringjob in recurringjobs['items']:
self.recurringjob.delete(recurringjob['metadata']['name'])

def create_snapshot_recurringjob_for_volume(self, volume_name):
job_name = volume_name + '-snap'

logging(f'Creating snapshot recurringjob {job_name} for volume {volume_name}')
self.recurringjob.create(job_name, task="snapshot")
self.recurringjob.add_to_volume(job_name, volume_name)
self.recurringjob.get(job_name)
logging(f'Created recurringjob {job_name} for volume {volume_name}')


def create_backup_recurringjob_for_volume(self, volume_name):
job_name = volume_name + '-bak'

logging(f'Creating backup recurringjob {job_name} for volume {volume_name}')
self.recurringjob.create(job_name, task="backup")
self.recurringjob.add_to_volume(job_name, volume_name)
self.recurringjob.get(job_name)
logging(f'Created recurringjob {job_name} for volume {volume_name}')

def check_recurringjobs_work(self, volume_name):
logging(f'Checking recurringjobs work for volume {volume_name}')
self.recurringjob.check_jobs_work(volume_name)

def cleanup_recurringjobs(self, volume_names):
self.recurringjob.cleanup(volume_names)
57 changes: 45 additions & 12 deletions e2e/libs/keywords/statefulset_keywords.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,66 @@
from workload.persistentvolumeclaim import delete_persistentvolumeclaim
from robot.libraries.BuiltIn import BuiltIn

from persistentvolumeclaim import PersistentVolumeClaim

from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging

from volume import Volume

from workload.statefulset import create_statefulset
from workload.statefulset import delete_statefulset
from workload.statefulset import get_statefulset
from workload.statefulset import list_statefulsets
from workload.statefulset import scale_statefulset
from workload.statefulset import wait_for_statefulset_replicas_ready
from workload.workload import get_workload_pvc_name



class statefulset_keywords:

def __init__(self):
pass
self.persistentvolumeclaim = PersistentVolumeClaim()
self.volume = Volume()

def cleanup_statefulsets(self):
statefulsets = list_statefulsets(label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}")

logging(f'Cleaning up {len(statefulsets.items)} statefulsets')
for statefulset in statefulsets.items:
self.delete_statefulset(statefulset.metadata.name)

def cleanup_statefulsets(self, statefulset_names):
for name in statefulset_names:
pvc_name = get_workload_pvc_name(name)
delete_statefulset(name)
delete_persistentvolumeclaim(pvc_name)
def create_statefulset(self, name, volume_type="RWO", option=""):
logging(f'Creating statefulset {name}')
create_statefulset(name, volume_type, option)

def create_statefulset(self, volume_type="rwo", option=""):
statefulset_name = create_statefulset(volume_type, option)
return statefulset_name
def delete_statefulset(self, name):
logging(f'Deleting statefulset {name}')
delete_statefulset(name)

def get_statefulset(self, statefulset_name):
return get_statefulset(statefulset_name)

def scale_statefulset(self, statefulset_name, replica_count):
logging(f'Scaling statefulset {statefulset_name} to {replica_count}')
return scale_statefulset(statefulset_name, replica_count)

def scale_statefulset_down(self, statefulset_name):
logging(f'Scaling statefulset {statefulset_name} down')
scale_statefulset(statefulset_name, 0)

workload_keywords = BuiltIn().get_library_instance('workload_keywords')
workload_keywords.wait_for_workload_volume_detached(statefulset_name)

def scale_statefulset_up(self, statefulset_name, replicaset_count=3):
logging(f'Scaling statefulset {statefulset_name} up to {replicaset_count}')
scale_statefulset(statefulset_name, replicaset_count)

workload_keywords = BuiltIn().get_library_instance('workload_keywords')
workload_keywords.wait_for_workload_volume_healthy(statefulset_name)

self.wait_for_statefulset_replicas_ready(statefulset_name, replicaset_count)

def wait_for_statefulset_replicas_ready(self, statefulset_name, expected_ready_count):
return wait_for_statefulset_replicas_ready(statefulset_name, expected_ready_count)
logging(f'Waiting for statefulset {statefulset_name} to have {expected_ready_count} replicas ready')
wait_for_statefulset_replicas_ready(statefulset_name, expected_ready_count)
Loading

0 comments on commit 95c0ba8

Please sign in to comment.