Skip to content

Commit

Permalink
Update Test Cases for volume condition type
Browse files Browse the repository at this point in the history
Ref: longhorn/longhorn#6830

Signed-off-by: Roger Yao <[email protected]>
  • Loading branch information
roger-ryao authored and David Ko committed Oct 4, 2023
1 parent 3289e6c commit 27de209
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 26 deletions.
14 changes: 7 additions & 7 deletions manager/integration/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@

DIRECTORY_PATH = '/tmp/longhorn-test/'

VOLUME_CONDITION_SCHEDULED = "scheduled"
VOLUME_CONDITION_RESTORE = "restore"
VOLUME_CONDITION_SCHEDULED = "Scheduled"
VOLUME_CONDITION_RESTORE = "Restore"
VOLUME_CONDITION_STATUS = "status"
VOLUME_CONDITION_TOOMANYSNAPSHOTS = "toomanysnapshots"
VOLUME_CONDITION_TOOMANYSNAPSHOTS = "TooManySnapshots"

CONDITION_STATUS_TRUE = "True"
CONDITION_STATUS_FALSE = "False"
Expand Down Expand Up @@ -1822,16 +1822,16 @@ def wait_scheduling_failure(client, volume_name):
scheduling_failure = False
for i in range(RETRY_COUNTS):
v = client.by_id_volume(volume_name)
if v.conditions.scheduled.status == "False" and \
v.conditions.scheduled.reason == \
if v.conditions.Scheduled.status == "False" and \
v.conditions.Scheduled.reason == \
"ReplicaSchedulingFailure":
scheduling_failure = True
if scheduling_failure:
break
time.sleep(RETRY_INTERVAL)
assert scheduling_failure, f" Scheduled Status = " \
f"{v.conditions.scheduled.status}, Scheduled reason = " \
f"{v.conditions.scheduled.reason}, volume = {v}"
f"{v.conditions.Scheduled.status}, Scheduled reason = " \
f"{v.conditions.Scheduled.reason}, volume = {v}"


def wait_for_device_login(dest_path, name):
Expand Down
2 changes: 1 addition & 1 deletion manager/integration/tests/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3848,7 +3848,7 @@ def test_volume_toomanysnapshots_condition(client, core_api, volume_name): # NO

if count < max_count:
volume = client.by_id_volume(volume_name)
assert volume.conditions.toomanysnapshots.status == "False"
assert volume.conditions.TooManySnapshots.status == "False"
else:
wait_for_volume_condition_toomanysnapshots(client, volume_name,
"status", "True")
Expand Down
30 changes: 15 additions & 15 deletions manager/integration/tests/test_ha.py
Original file line number Diff line number Diff line change
Expand Up @@ -1438,12 +1438,12 @@ def test_all_replica_restore_failure(set_random_backupstore, client, core_api, v
8. Restore a volume from the backup.
9. Wait for the volume restore in progress by checking if:
9.1. `volume.restoreStatus` shows the related restore info.
9.2. `volume.conditions[restore].status == True &&
volume.conditions[restore].reason == "RestoreInProgress"`.
9.2. `volume.conditions[Restore].status == True &&
volume.conditions[Restore].reason == "RestoreInProgress"`.
9.3. `volume.ready == false`.
10. Wait for the restore volume Faulted.
11. Check if `volume.conditions[restore].status == False &&
volume.conditions[restore].reason == "RestoreFailure"`.
11. Check if `volume.conditions[Restore].status == False &&
volume.conditions[Restore].reason == "RestoreFailure"`.
12. Check if `volume.ready == false`.
13. Make sure auto-salvage is not triggered even the feature is enabled.
14. Verify if PV/PVC cannot be created from Longhorn.
Expand Down Expand Up @@ -1484,8 +1484,8 @@ def test_all_replica_restore_failure(set_random_backupstore, client, core_api, v
wait_for_volume_detached(client, res_name)

res_volume = client.by_id_volume(res_name)
assert res_volume.conditions['restore'].status == "False"
assert res_volume.conditions['restore'].reason == "RestoreFailure"
assert res_volume.conditions['Restore'].status == "False"
assert res_volume.conditions['Restore'].reason == "RestoreFailure"
assert res_volume.ready is False
assert res_volume.state == "detached"
assert hasattr(res_volume, 'pvCreate') is False
Expand Down Expand Up @@ -1518,16 +1518,16 @@ def test_single_replica_restore_failure(set_random_backupstore, client, core_api
7. Restore a volume from the backup.
8. Wait for the volume restore start by checking if:
8.1. `volume.restoreStatus` shows the related restore info.
8.2. `volume.conditions[restore].status == True &&
volume.conditions[restore].reason == "RestoreInProgress"`.
8.2. `volume.conditions[Restore].status == True &&
volume.conditions[Restore].reason == "RestoreInProgress"`.
8.3. `volume.ready == false`.
9. Find a way to fail just one replica restore.
e.g. Use iptable to block the restore.
10. Wait for the restore volume Degraded.
11. Wait for the volume restore & rebuild complete and check if:
11.1. `volume.ready == true`
11.2. `volume.conditions[restore].status == False &&
volume.conditions[restore].reason == ""`.
11.2. `volume.conditions[Restore].status == False &&
volume.conditions[Restore].reason == ""`.
12. Create PV/PVC/Pod for the restored volume and wait for the pod start.
13. Check if the restored volume is state `Healthy`
after the attachment.
Expand Down Expand Up @@ -1751,8 +1751,8 @@ def test_engine_crash_for_restore_volume(set_random_backupstore, client, core_ap
8. Wait for the volume reattached.
9. Verify if
9.1. `volume.ready == false`.
9.2. `volume.conditions[restore].status == True &&
volume.conditions[restore].reason == "RestoreInProgress"`.
9.2. `volume.conditions[Restore].status == True &&
volume.conditions[Restore].reason == "RestoreInProgress"`.
10. Wait for the volume restore complete and detached.
11. Recreate a pod for the restored volume and wait for the pod start.
12. Check the data md5sum for the restored data.
Expand Down Expand Up @@ -1856,8 +1856,8 @@ def test_engine_crash_for_dr_volume(set_random_backupstore, client, core_api, vo
12. Wait for the DR volume reattached.
13. Verify the DR volume:
13.1. `volume.ready == false`.
13.2. `volume.conditions[restore].status == True &&
volume.conditions[restore].reason == "RestoreInProgress"`.
13.2. `volume.conditions[Restore].status == True &&
volume.conditions[Restore].reason == "RestoreInProgress"`.
13.3. `volume.standby == true`
14. Activate the DR volume and wait for detached.
15. Create a pod for the restored volume and wait for the pod start.
Expand Down Expand Up @@ -2626,7 +2626,7 @@ def test_replica_failure_during_attaching(settings_reset, client, core_api, volu
common.wait_for_volume_condition_scheduled(client, volume_name_2,
"status", "False")
volume_2 = client.by_id_volume(volume_name_2)
assert volume_2.conditions.scheduled.reason == "ReplicaSchedulingFailure"
assert volume_2.conditions.Scheduled.reason == "ReplicaSchedulingFailure"

update_disks[default_disk_name].allowScheduling = True
update_disks["extra-disk"]["allowScheduling"] = False
Expand Down
4 changes: 2 additions & 2 deletions manager/integration/tests/test_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1374,7 +1374,7 @@ def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset)
assert replica["running"] is False
assert replica["mode"] == ""

assert volume4.conditions.scheduled.reason == \
assert volume4.conditions.Scheduled.reason == \
"LocalReplicaSchedulingFailure"

volume4 = volume4.updateReplicaCount(replicaCount=3)
Expand Down Expand Up @@ -1638,7 +1638,7 @@ def test_soft_anti_affinity_scheduling_volume_disable(client, volume_name): # NO
for i in range(RETRY_COUNTS_SHORT):
volume = client.by_id_volume(volume_name)
assert volume.robustness == VOLUME_ROBUSTNESS_DEGRADED
assert volume.conditions.scheduled.status == "False"
assert volume.conditions.Scheduled.status == "False"

healthy_replica_count = 0
for replica in volume.replicas:
Expand Down
2 changes: 1 addition & 1 deletion manager/integration/tests/test_tagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def test_tag_scheduling_on_update(client, node_default_tags, volume_name): # NO
scheduled = False
for i in range(RETRY_COUNTS):
v = client.by_id_volume(volume_name)
if v.conditions.scheduled.status == "True":
if v.conditions.Scheduled.status == "True":
scheduled = True
if scheduled:
break
Expand Down

0 comments on commit 27de209

Please sign in to comment.