From 90089a599616163680c3a6e8d498af576de843b9 Mon Sep 17 00:00:00 2001 From: yangchiu Date: Thu, 23 Nov 2023 01:00:36 +0000 Subject: [PATCH] Update testing docs Signed-off-by: yangchiu --- integration/test_node.html | 242 ++++++++++++++++++++++++++++++++++++- 1 file changed, 241 insertions(+), 1 deletion(-) diff --git a/integration/test_node.html b/integration/test_node.html index 30c4d03b43..37f5599d53 100644 --- a/integration/test_node.html +++ b/integration/test_node.html @@ -2695,7 +2695,81 @@

Module tests.test_node

def finalizer(): common.cleanup_all_volumes(client) - request.addfinalizer(finalizer) + request.addfinalizer(finalizer) + +@pytest.mark.skip(reason="TODO") # NOQA +def test_drain_with_block_for_eviction_success(): + """ + Test drain completes after evicting replica with node-drain-policy + block-for-eviction + + 1. Set `node-drain-policy` to `block-for-eviction`. + 2. Create a volume. + 3. Ensure (through soft anti-affinity, low replica count, and/or enough + disks) that an evicted replica of the volume can be scheduled elsewhere. + 4. Write data to the volume. + 5. Drain a node one of the volume's replicas is scheduled to. + 6. While the drain is ongoing: + - Verify that the volume never becomes degraded. + - Verify that `node.status.autoEvicting == true`. + - Optionally verify that `replica.spec.evictionRequested == true`. + 7. Verify the drain completes. + 8. Uncordon the node. + 9. Verify the replica on the drained node has moved to a different one. + 10. Verify that `node.status.autoEvicting == false`. + 11. Verify that `replica.spec.evictionRequested == false`. + 12. Verify the volume's data. + """ + +@pytest.mark.skip(reason="TODO") # NOQA +def test_drain_with_block_for_eviction_if_contains_last_replica_success(): + """ + Test drain completes after evicting replicas with node-drain-policy + block-for-eviction-if-contains-last-replica + + 1. Set `node-drain-policy` to + `block-for-eviction-if-contains-last-replica`. + 2. Create one volume with a single replica and another volume with three + replicas. + 3. Ensure (through soft anti-affinity, low replica count, and/or enough + disks) that evicted replicas of both volumes can be scheduled elsewhere. + 4. Write data to the volumes. + 5. Drain a node both volumes have a replica scheduled to. + 6. While the drain is ongoing: + - Verify that the volume with one replica never becomes degraded. + - Verify that the volume with three replicas becomes degraded. + - Verify that `node.status.autoEvicting == true`. + - Optionally verify that `replica.spec.evictionRequested == true` on the + replica for the volume that only has one. + - Optionally verify that `replica.spec.evictionRequested == false` on + the replica for the volume that has three. + 7. Verify the drain completes. + 8. Uncordon the node. + 9. Verify the replica for the volume with one replica has moved to a + different node. + 10. Verify the replica for the volume with three replicas has not moved. + 11. Verify that `node.status.autoEvicting == false`. + 12. Verify that `replica.spec.evictionRequested == false` on all replicas. + 13. Verify the the data in both volumes. + """ + +@pytest.mark.skip(reason="TODO") # NOQA +def test_drain_with_block_for_eviction_failure(): + """ + Test drain never completes with node-drain-policy block-for-eviction + + 1. Set `node-drain-policy` to `block-for-eviction`. + 2. Create a volume. + 3. Ensure (through soft anti-affinity, high replica count, and/or not + enough disks) that an evicted replica of the volume cannot be scheduled + elsewhere. + 4. Write data to the volume. + 5. Drain a node one of the volume's replicas is scheduled to. + 6. While the drain is ongoing: + - Verify that `node.status.autoEvicting == true`. + - Verify that `replica.spec.evictionRequested == true`. + 7. Verify the drain never completes. + """
@@ -3194,6 +3268,169 @@

Functions

cleanup_volume_by_name(client, vol_name) +
+def test_drain_with_block_for_eviction_failure() +
+
+

Test drain never completes with node-drain-policy block-for-eviction

+
    +
  1. Set node-drain-policy to block-for-eviction.
  2. +
  3. Create a volume.
  4. +
  5. Ensure (through soft anti-affinity, high replica count, and/or not +enough disks) that an evicted replica of the volume cannot be scheduled +elsewhere.
  6. +
  7. Write data to the volume.
  8. +
  9. Drain a node one of the volume's replicas is scheduled to.
  10. +
  11. While the drain is ongoing:
  12. +
  13. Verify that node.status.autoEvicting == true.
  14. +
  15. Verify that replica.spec.evictionRequested == true.
  16. +
  17. Verify the drain never completes.
  18. +
+
+ +Expand source code + +
@pytest.mark.skip(reason="TODO")  # NOQA
+def test_drain_with_block_for_eviction_failure():
+    """
+    Test drain never completes with node-drain-policy block-for-eviction
+
+    1. Set `node-drain-policy` to `block-for-eviction`.
+    2. Create a volume.
+    3. Ensure (through soft anti-affinity, high replica count, and/or not
+       enough disks) that an evicted replica of the volume cannot be scheduled
+       elsewhere.
+    4. Write data to the volume.
+    5. Drain a node one of the volume's replicas is scheduled to.
+    6. While the drain is ongoing:
+       - Verify that `node.status.autoEvicting == true`.
+       - Verify that `replica.spec.evictionRequested == true`.
+    7. Verify the drain never completes.
+    """
+
+
+
+def test_drain_with_block_for_eviction_if_contains_last_replica_success() +
+
+

Test drain completes after evicting replicas with node-drain-policy +block-for-eviction-if-contains-last-replica

+
    +
  1. Set node-drain-policy to +block-for-eviction-if-contains-last-replica.
  2. +
  3. Create one volume with a single replica and another volume with three +replicas.
  4. +
  5. Ensure (through soft anti-affinity, low replica count, and/or enough +disks) that evicted replicas of both volumes can be scheduled elsewhere.
  6. +
  7. Write data to the volumes.
  8. +
  9. Drain a node both volumes have a replica scheduled to.
  10. +
  11. While the drain is ongoing:
  12. +
  13. Verify that the volume with one replica never becomes degraded.
  14. +
  15. Verify that the volume with three replicas becomes degraded.
  16. +
  17. Verify that node.status.autoEvicting == true.
  18. +
  19. Optionally verify that replica.spec.evictionRequested == true on the +replica for the volume that only has one.
  20. +
  21. Optionally verify that replica.spec.evictionRequested == false on +the replica for the volume that has three.
  22. +
  23. Verify the drain completes.
  24. +
  25. Uncordon the node.
  26. +
  27. Verify the replica for the volume with one replica has moved to a +different node.
  28. +
  29. Verify the replica for the volume with three replicas has not moved.
  30. +
  31. Verify that node.status.autoEvicting == false.
  32. +
  33. Verify that replica.spec.evictionRequested == false on all replicas.
  34. +
  35. Verify the the data in both volumes.
  36. +
+
+ +Expand source code + +
@pytest.mark.skip(reason="TODO")  # NOQA
+def test_drain_with_block_for_eviction_if_contains_last_replica_success():
+    """
+    Test drain completes after evicting replicas with node-drain-policy
+    block-for-eviction-if-contains-last-replica
+
+    1. Set `node-drain-policy` to
+       `block-for-eviction-if-contains-last-replica`.
+    2. Create one volume with a single replica and another volume with three
+       replicas.
+    3. Ensure (through soft anti-affinity, low replica count, and/or enough
+    disks) that evicted replicas of both volumes can be scheduled elsewhere.
+    4. Write data to the volumes.
+    5. Drain a node both volumes have a replica scheduled to.
+    6. While the drain is ongoing:
+       - Verify that the volume with one replica never becomes degraded.
+       - Verify that the volume with three replicas becomes degraded.
+       - Verify that `node.status.autoEvicting == true`.
+       - Optionally verify that `replica.spec.evictionRequested == true` on the
+         replica for the volume that only has one.
+       - Optionally verify that `replica.spec.evictionRequested == false` on
+         the replica for the volume that has three.
+    7. Verify the drain completes.
+    8. Uncordon the node.
+    9. Verify the replica for the volume with one replica has moved to a
+       different node.
+    10. Verify the replica for the volume with three replicas has not moved.
+    11. Verify that `node.status.autoEvicting == false`.
+    12. Verify that `replica.spec.evictionRequested == false` on all replicas.
+    13. Verify the the data in both volumes.
+    """
+
+
+
+def test_drain_with_block_for_eviction_success() +
+
+

Test drain completes after evicting replica with node-drain-policy +block-for-eviction

+
    +
  1. Set node-drain-policy to block-for-eviction.
  2. +
  3. Create a volume.
  4. +
  5. Ensure (through soft anti-affinity, low replica count, and/or enough +disks) that an evicted replica of the volume can be scheduled elsewhere.
  6. +
  7. Write data to the volume.
  8. +
  9. Drain a node one of the volume's replicas is scheduled to.
  10. +
  11. While the drain is ongoing:
  12. +
  13. Verify that the volume never becomes degraded.
  14. +
  15. Verify that node.status.autoEvicting == true.
  16. +
  17. Optionally verify that replica.spec.evictionRequested == true.
  18. +
  19. Verify the drain completes.
  20. +
  21. Uncordon the node.
  22. +
  23. Verify the replica on the drained node has moved to a different one.
  24. +
  25. Verify that node.status.autoEvicting == false.
  26. +
  27. Verify that replica.spec.evictionRequested == false.
  28. +
  29. Verify the volume's data.
  30. +
+
+ +Expand source code + +
@pytest.mark.skip(reason="TODO")  # NOQA
+def test_drain_with_block_for_eviction_success():
+    """
+    Test drain completes after evicting replica with node-drain-policy
+    block-for-eviction
+
+    1. Set `node-drain-policy` to `block-for-eviction`.
+    2. Create a volume.
+    3. Ensure (through soft anti-affinity, low replica count, and/or enough
+       disks) that an evicted replica of the volume can be scheduled elsewhere.
+    4. Write data to the volume.
+    5. Drain a node one of the volume's replicas is scheduled to.
+    6. While the drain is ongoing:
+       - Verify that the volume never becomes degraded.
+       - Verify that `node.status.autoEvicting == true`.
+       - Optionally verify that `replica.spec.evictionRequested == true`.
+    7. Verify the drain completes.
+    8. Uncordon the node.
+    9. Verify the replica on the drained node has moved to a different one.
+    10. Verify that `node.status.autoEvicting == false`.
+    11. Verify that `replica.spec.evictionRequested == false`.
+    12. Verify the volume's data.
+    """
+
+
def test_node_config_annotation(client, core_api, reset_default_disk_label, reset_disk_and_tag_annotations, reset_disk_settings)
@@ -5950,6 +6187,9 @@

Index

  • test_disable_scheduling_on_cordoned_node
  • test_disk_eviction_with_node_level_soft_anti_affinity_disabled
  • test_disk_migration
  • +
  • test_drain_with_block_for_eviction_failure
  • +
  • test_drain_with_block_for_eviction_if_contains_last_replica_success
  • +
  • test_drain_with_block_for_eviction_success
  • test_node_config_annotation
  • test_node_config_annotation_invalid
  • test_node_config_annotation_missing