diff --git a/e2e/keywords/sharemanager.resource b/e2e/keywords/sharemanager.resource index 6fe84fda8..3e8026de2 100644 --- a/e2e/keywords/sharemanager.resource +++ b/e2e/keywords/sharemanager.resource @@ -21,12 +21,12 @@ Check sharemanager ${condition} using headless service Wait for all sharemanager to be deleted wait_for_sharemanagers_deleted -Delete sharemanager of deployment ${deployment_id} and wait for recreation +Delete sharemanager pod of deployment ${deployment_id} and wait for recreation ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} ${volume_name} = get_workload_volume_name ${deployment_name} - delete_sharemanager_and_wait_for_recreation ${volume_name} + delete_sharemanager_pod_and_wait_for_recreation ${volume_name} -Wait for sharemanager of deployment ${deployment_id} running +Wait for sharemanager pod of deployment ${deployment_id} running ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} ${volume_name} = get_workload_volume_name ${deployment_name} - wait_for_share_manager_running ${volume_name} + wait_for_share_manager_pod_running ${volume_name} diff --git a/e2e/libs/keywords/sharemanager_keywords.py b/e2e/libs/keywords/sharemanager_keywords.py index f9d501f34..b541f5b26 100644 --- a/e2e/libs/keywords/sharemanager_keywords.py +++ b/e2e/libs/keywords/sharemanager_keywords.py @@ -7,7 +7,7 @@ from utility.utility import get_retry_count_and_interval from utility.utility import logging - +from utility.utility import get_pod, delete_pod class sharemanager_keywords: @@ -48,14 +48,32 @@ def wait_for_sharemanagers_deleted(self, name=[]): assert AssertionError, f"Failed to wait for all sharemanagers to be deleted" - def delete_sharemanager(self, name): - return self.sharemanager.delete(name) - def delete_sharemanager_and_wait_for_recreation(self, name): - sharemanager = self.sharemanager.get(name) - last_creation_time = sharemanager["metadata"]["creationTimestamp"] - self.sharemanager.delete(name) - self.sharemanager.wait_for_restart(name, last_creation_time) + def delete_sharemanager_pod_and_wait_for_recreation(self, name): + sharemanager_pod_name = "share-manager-" + name + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + last_creation_time = sharemanager_pod.metadata.creation_timestamp + delete_pod(sharemanager_pod_name, "longhorn-system") + + retry_count, retry_interval = get_retry_count_and_interval() + for i in range(retry_count): + time.sleep(retry_interval) + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + if sharemanager_pod == None: + continue + creation_time = sharemanager_pod.metadata.creation_timestamp + if creation_time > last_creation_time: + return + + assert False, f"sharemanager pod {sharemanager_pod_name} not recreated" + + + def wait_for_share_manager_pod_running(self, name): + sharemanager_pod_name = "share-manager-" + name + retry_count, retry_interval = get_retry_count_and_interval() + for i in range(retry_count): + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + if sharemanager_pod.status.phase == "Running": + return - def wait_for_share_manager_running(self, name): - return self.sharemanager.wait_for_running(name) + assert False, f"sharemanager pod {sharemanager_pod_name} not running" diff --git a/e2e/tests/negative/component_resilience.robot b/e2e/tests/negative/component_resilience.robot index 4c5cc5059..fa4563376 100644 --- a/e2e/tests/negative/component_resilience.robot +++ b/e2e/tests/negative/component_resilience.robot @@ -174,8 +174,8 @@ Test Longhorn dynamic provisioned RWX volume recovery And Wait until volume of deployment 0 replica rebuilding started on replica node Then Delete instance-manager of deployment 0 volume and wait for recover - When Delete sharemanager of deployment 0 and wait for recreation - And Wait for sharemanager of deployment 0 running + When Delete sharemanager pod of deployment 0 and wait for recreation + And Wait for sharemanager pod of deployment 0 running And Wait for deployment 0 pods stable And Check deployment 0 data in file data.txt is intact END