Skip to content

Commit

Permalink
refactor(negative): adapt libs to keywords
Browse files Browse the repository at this point in the history
longhorn/longhorn-7034

Signed-off-by: Chin-Ya Huang <[email protected]>
  • Loading branch information
c3y1huang committed Mar 8, 2024
1 parent f926655 commit 74d1360
Show file tree
Hide file tree
Showing 49 changed files with 1,097 additions and 562 deletions.
1 change: 1 addition & 0 deletions e2e/libs/host/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from host.host import Host
1 change: 1 addition & 0 deletions e2e/libs/host/constant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
NODE_REBOOT_DOWN_TIME_SECOND = 60
77 changes: 77 additions & 0 deletions e2e/libs/host/host.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import boto3
import time
import yaml

from host.constant import NODE_REBOOT_DOWN_TIME_SECOND

from node.node import Node

from utility.utility import logging
from utility.utility import wait_for_cluster_ready


class Host:

def __init__(self):
with open('/tmp/instance_mapping', 'r') as f:
self.mapping = yaml.safe_load(f)
self.aws_client = boto3.client('ec2')

self.node = Node()

def reboot_all_nodes(self, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SECOND):
instance_ids = [value for value in self.mapping.values()]

resp = self.aws_client.stop_instances(InstanceIds=instance_ids, Force=True)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, f"Failed to stop instances {instance_ids} response: {resp}"
logging(f"Stopping instances {instance_ids}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)

logging(f"Wait for {shut_down_time_in_sec} seconds before starting instances")
time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)

wait_for_cluster_ready()

logging(f"Started instances")

def reboot_node(self, reboot_node_name, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SECOND):
instance_ids = [self.mapping[reboot_node_name]]

resp = self.aws_client.stop_instances(InstanceIds=instance_ids, Force=True)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, f"Failed to stop instances {instance_ids} response: {resp}"
logging(f"Stopping instances {instance_ids}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)
logging(f"Stopped instances")

time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
logging(f"Started instances")

def reboot_all_worker_nodes(self, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SECOND):
instance_ids = [self.mapping[value] for value in self.node.list_node_names_by_role("worker")]

resp = self.aws_client.stop_instances(InstanceIds=instance_ids, Force=True)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, f"Failed to stop instances {instance_ids} response: {resp}"
logging(f"Stopping instances {instance_ids}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)
logging(f"Stopped instances")

time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
logging(f"Started instances")
1 change: 1 addition & 0 deletions e2e/libs/instancemanager/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from instancemanager.instancemanager import InstanceManager
35 changes: 35 additions & 0 deletions e2e/libs/instancemanager/instancemanager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import time

from node import Node

from utility.utility import get_longhorn_client
from utility.utility import get_retry_count_and_interval
from utility.utility import logging


class InstanceManager:

def __init__(self):
self.node = Node()

def wait_for_all_instance_manager_running(self):
longhorn_client = get_longhorn_client()
worker_nodes = self.node.list_node_names_by_role("worker")

retry_count, retry_interval = get_retry_count_and_interval()
for i in range(retry_count):
try:
instance_managers = longhorn_client.list_instance_manager()
instance_manager_map = {}
for im in instance_managers:
if im.currentState == "running":
instance_manager_map[im.nodeID] = im
if len(instance_manager_map) == len(worker_nodes):
break
except Exception as e:
logging(f"Getting instance manager state error: {e}")

logging(f"Waiting for all instance manager running, retry ({i}) ...")
time.sleep(retry_interval)

assert len(instance_manager_map) == len(worker_nodes), f"expect all instance managers running, instance_managers = {instance_managers}, instance_manager_map = {instance_manager_map}"
29 changes: 0 additions & 29 deletions e2e/libs/instancemanager/utility.py

This file was deleted.

4 changes: 4 additions & 0 deletions e2e/libs/keywords/common_keywords.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from node_exec import NodeExec

from utility.utility import init_k8s_api_client
from utility.utility import generate_name_with_suffix


class common_keywords:
Expand All @@ -17,3 +18,6 @@ def init_node_exec(self, test_name):

def cleanup_node_exec(self):
NodeExec.get_instance().cleanup()

def generate_name_with_suffix(self, kind, suffix):
return generate_name_with_suffix(kind, suffix)
40 changes: 25 additions & 15 deletions e2e/libs/keywords/deployment_keywords.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,32 @@
from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging

from volume import Volume

from workload.deployment import create_deployment
from workload.deployment import delete_deployment
from workload.persistentvolumeclaim import create_persistentvolumeclaim
from workload.persistentvolumeclaim import delete_persistentvolumeclaim
from workload.workload import get_workload_pvc_name
from workload.deployment import list_deployments


class deployment_keywords:

def __init__(self):
pass

def cleanup_deployments(self, deployment_names):
for name in deployment_names:
pvc_name = get_workload_pvc_name(name)
delete_deployment(name)
delete_persistentvolumeclaim(pvc_name)

def create_deployment(self, volume_type="rwo", option=""):
create_persistentvolumeclaim(volume_type, option)
deployment_name = create_deployment(volume_type, option)
return deployment_name
self.volume = Volume()

def cleanup_deployments(self):
deployments = list_deployments(
label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}"
)

logging(f'Cleaning up {len(deployments.items)} deployments')
for deployment in deployments.items:
self.delete_deployment(deployment.metadata.name)

def create_deployment(self, name, claim_name):
logging(f'Creating deployment {name}')
create_deployment(name, claim_name)

def delete_deployment(self, name):
logging(f'Deleting deployment {name}')
delete_deployment(name)
52 changes: 52 additions & 0 deletions e2e/libs/keywords/host_keywords.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from robot.libraries.BuiltIn import BuiltIn

from host import Host
from host.constant import NODE_REBOOT_DOWN_TIME_SECOND

from node import Node

from utility.utility import logging


class host_keywords:

def __init__(self):
self.volume_keywords = BuiltIn().get_library_instance('volume_keywords')

self.host = Host()
self.node = Node()

def reboot_volume_node(self, volume_name):
node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "volume node")

logging(f'Rebooting volume {volume_name} node {node_id} with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.host.reboot_node(node_id)

def reboot_replica_node(self, volume_name):
node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "replica node")

logging(f'Rebooting volume {volume_name} node {node_id} with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.host.reboot_node(node_id)

def reboot_node_by_index(self, idx, power_off_time_in_min=1):
node_name = self.node.get_node_by_index(idx)
reboot_down_time_min = int(power_off_time_in_min) * 60

logging(f'Rebooting node {node_name} with downtime {reboot_down_time_min} minutes')
self.host.reboot_node(node_name, reboot_down_time_min)

def reboot_all_worker_nodes(self, power_off_time_in_min=1):
reboot_down_time_min = int(power_off_time_in_min) * 60

logging(f'Rebooting all worker nodes with downtime {reboot_down_time_min} minutes')
self.host.reboot_all_worker_nodes(reboot_down_time_min)

def reboot_all_nodes(self):
logging(f'Rebooting all nodes with downtime {NODE_REBOOT_DOWN_TIME_SECOND} seconds')
self.host.reboot_all_nodes()

def reboot_node_by_name(self, node_name, downtime_in_min=1):
reboot_down_time_min = int(downtime_in_min) * 60

logging(f'Rebooting node {node_name} with downtime {reboot_down_time_min} minutes')
self.host.reboot_node(node_name, reboot_down_time_min)
13 changes: 13 additions & 0 deletions e2e/libs/keywords/instancemanager_keywords.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from instancemanager import InstanceManager

from utility.utility import logging


class instancemanager_keywords:

def __init__(self):
self.instancemanager = InstanceManager()

def wait_for_all_instance_manager_running(self):
logging(f'Waiting for all instance manager running')
self.instancemanager.wait_for_all_instance_manager_running()
7 changes: 5 additions & 2 deletions e2e/libs/keywords/kubelet_keywords.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from kubelet.kubelet import restart_kubelet

from utility.utility import logging


class kubelet_keywords:

def restart_kubelet(self, node_name, stop_time_in_sec):
restart_kubelet(node_name, int(stop_time_in_sec))
def restart_kubelet(self, node_name, downtime_in_sec):
logging(f'Restarting kubelet on node {node_name} with downtime {downtime_in_sec} seconds')
restart_kubelet(node_name, int(downtime_in_sec))
5 changes: 5 additions & 0 deletions e2e/libs/keywords/network_keywords.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
from network.network import setup_control_plane_network_latency
from network.network import cleanup_control_plane_network_latency

from utility.utility import logging


class network_keywords:

def setup_control_plane_network_latency(self):
logging(f"Setting up control plane network latency")
setup_control_plane_network_latency()

def cleanup_control_plane_network_latency(self):
logging(f"Cleaning up control plane network latency")
cleanup_control_plane_network_latency()
38 changes: 0 additions & 38 deletions e2e/libs/keywords/node_keywords.py

This file was deleted.

28 changes: 24 additions & 4 deletions e2e/libs/keywords/persistentvolumeclaim_keywords.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from persistentvolumeclaim import PersistentVolumeClaim

from utility.constant import ANNOT_EXPANDED_SIZE
from utility.constant import LABEL_TEST
from utility.constant import LABEL_TEST_VALUE
from utility.utility import logging

from volume.constant import MEBIBYTE
Expand All @@ -8,9 +11,26 @@
class persistentvolumeclaim_keywords:

def __init__(self):
self.pvc = PersistentVolumeClaim()
self.claim = PersistentVolumeClaim()

def expand_pvc_size_by_mib(self, claim_name, size_in_mib):
logging(f'Expanding PVC {claim_name} by {size_in_mib} MiB')
def cleanup_persistentvolumeclaims(self):
claims = self.claim.list(label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}")

logging(f'Cleaning up {len(claims.items)} persistentvolumeclaims')
for claim in claims.items:
self.delete_persistentvolumeclaim(claim.metadata.name)

def create_persistentvolumeclaim(self, name, volume_type="RWO", option=""):
logging(f'Creating persistentvolumeclaim {name}')
return self.claim.create(name, volume_type, option)

def delete_persistentvolumeclaim(self, name):
logging(f'Deleting persistentvolumeclaim {name}')
return self.claim.delete(name)

def expand_persistentvolumeclaim_size_by_mib(self, claim_name, size_in_mib):
size_in_byte = int(size_in_mib) * MEBIBYTE
return self.pvc.expand(claim_name, size_in_byte)
expanded_size = self.claim.expand(claim_name, size_in_byte)

logging(f'Expanding persistentvolumeclaim {claim_name} by {size_in_mib} MiB')
self.claim.set_annotation(claim_name, ANNOT_EXPANDED_SIZE, str(expanded_size))
Loading

0 comments on commit 74d1360

Please sign in to comment.