Skip to content

Commit

Permalink
Update testing docs
Browse files Browse the repository at this point in the history
Signed-off-by: c3y1huang <[email protected]>
  • Loading branch information
c3y1huang committed Dec 8, 2023
1 parent 0fa8584 commit 704dbf5
Show file tree
Hide file tree
Showing 29 changed files with 371 additions and 867 deletions.
2 changes: 1 addition & 1 deletion index.html
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta name="generator" content="Hugo 0.120.4">
<meta name="generator" content="Hugo 0.121.0">
<meta charset="utf-8">
<title>
Longhorn Manual Test Cases
Expand Down
527 changes: 94 additions & 433 deletions index.xml

Large diffs are not rendered by default.

182 changes: 182 additions & 0 deletions integration/test_zone.html
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ <h1 class="title">Module <code>tests.test_zone</code></h1>
from common import pvc, pod # NOQA
from common import volume_name # NOQA

from common import cleanup_node_disks
from common import get_self_host_id

from common import create_and_wait_pod
Expand Down Expand Up @@ -531,6 +532,84 @@ <h1 class="title">Module <code>tests.test_zone</code></h1>
assert z3_r_count == 2


def test_replica_auto_balance_when_disabled_disk_scheduling_in_zone(client, core_api, volume_name): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when disk scheduling is disabled on nodes
in a zone.

Issue: https://github.com/longhorn/longhorn/issues/6508

Given `replica-soft-anti-affinity` setting is `true`.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And disk scheduling is disabled on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.

When set `replica-auto-balance` to `best-effort`.

Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.
&#34;&#34;&#34;
# Set `replica-soft-anti-affinity` to `true`.
update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, &#34;true&#34;)

# Assign nodes to respective zones
node1, node2, node3 = client.list_node()
set_k8s_node_zone_label(core_api, node1.name, ZONE1)
set_k8s_node_zone_label(core_api, node2.name, ZONE2)
set_k8s_node_zone_label(core_api, node3.name, ZONE3)
wait_longhorn_node_zone_updated(client)

# Disable disk scheduling on node 3
cleanup_node_disks(client, node3.name)

# Create a volume with 3 replicas
num_of_replicas = 3
volume = client.create_volume(name=volume_name,
numberOfReplicas=num_of_replicas)

# Wait for the volume to detach and attach it to the test pod node
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())

# Define a function to assert replica count
def assert_replica_count(is_stable=False):
for _ in range(RETRY_COUNTS):
time.sleep(RETRY_INTERVAL)

zone3_replica_count = get_zone_replica_count(
client, volume_name, ZONE3, chk_running=True)
assert zone3_replica_count == 0

total_replica_count = \
get_zone_replica_count(
client, volume_name, ZONE1, chk_running=True) + \
get_zone_replica_count(
client, volume_name, ZONE2, chk_running=True)

if is_stable:
assert total_replica_count == num_of_replicas
elif total_replica_count == num_of_replicas:
break

assert total_replica_count == 3

# Perform the initial assertion to ensure the replica count is as expected
assert_replica_count()

# Update the replica-auto-balance setting to `best-effort`
update_setting(client, SETTING_REPLICA_AUTO_BALANCE, &#34;best-effort&#34;)

# Perform the final assertion to ensure the replica count is as expected,
# and stable after the setting update
assert_replica_count(is_stable=True)


def test_replica_auto_balance_when_replica_on_unschedulable_node(client, core_api, volume_name, request): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when replica already running on
Expand Down Expand Up @@ -1322,6 +1401,108 @@ <h2 class="section-title" id="header-functions">Functions</h2>
assert check_z2_r_count == z2_r_count</code></pre>
</details>
</dd>
<dt id="tests.test_zone.test_replica_auto_balance_when_disabled_disk_scheduling_in_zone"><code class="name flex">
<span>def <span class="ident">test_replica_auto_balance_when_disabled_disk_scheduling_in_zone</span></span>(<span>client, core_api, volume_name)</span>
</code></dt>
<dd>
<div class="desc"><p>Scenario: replica auto-balance when disk scheduling is disabled on nodes
in a zone.</p>
<p>Issue: <a href="https://github.com/longhorn/longhorn/issues/6508">https://github.com/longhorn/longhorn/issues/6508</a></p>
<p>Given <code>replica-soft-anti-affinity</code> setting is <code>true</code>.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And disk scheduling is disabled on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.</p>
<p>When set <code>replica-auto-balance</code> to <code>best-effort</code>.</p>
<p>Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def test_replica_auto_balance_when_disabled_disk_scheduling_in_zone(client, core_api, volume_name): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when disk scheduling is disabled on nodes
in a zone.

Issue: https://github.com/longhorn/longhorn/issues/6508

Given `replica-soft-anti-affinity` setting is `true`.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And disk scheduling is disabled on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.

When set `replica-auto-balance` to `best-effort`.

Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.
&#34;&#34;&#34;
# Set `replica-soft-anti-affinity` to `true`.
update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, &#34;true&#34;)

# Assign nodes to respective zones
node1, node2, node3 = client.list_node()
set_k8s_node_zone_label(core_api, node1.name, ZONE1)
set_k8s_node_zone_label(core_api, node2.name, ZONE2)
set_k8s_node_zone_label(core_api, node3.name, ZONE3)
wait_longhorn_node_zone_updated(client)

# Disable disk scheduling on node 3
cleanup_node_disks(client, node3.name)

# Create a volume with 3 replicas
num_of_replicas = 3
volume = client.create_volume(name=volume_name,
numberOfReplicas=num_of_replicas)

# Wait for the volume to detach and attach it to the test pod node
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())

# Define a function to assert replica count
def assert_replica_count(is_stable=False):
for _ in range(RETRY_COUNTS):
time.sleep(RETRY_INTERVAL)

zone3_replica_count = get_zone_replica_count(
client, volume_name, ZONE3, chk_running=True)
assert zone3_replica_count == 0

total_replica_count = \
get_zone_replica_count(
client, volume_name, ZONE1, chk_running=True) + \
get_zone_replica_count(
client, volume_name, ZONE2, chk_running=True)

if is_stable:
assert total_replica_count == num_of_replicas
elif total_replica_count == num_of_replicas:
break

assert total_replica_count == 3

# Perform the initial assertion to ensure the replica count is as expected
assert_replica_count()

# Update the replica-auto-balance setting to `best-effort`
update_setting(client, SETTING_REPLICA_AUTO_BALANCE, &#34;best-effort&#34;)

# Perform the final assertion to ensure the replica count is as expected,
# and stable after the setting update
assert_replica_count(is_stable=True)</code></pre>
</details>
</dd>
<dt id="tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node"><code class="name flex">
<span>def <span class="ident">test_replica_auto_balance_when_replica_on_unschedulable_node</span></span>(<span>client, core_api, volume_name, request)</span>
</code></dt>
Expand Down Expand Up @@ -2366,6 +2547,7 @@ <h1>Index</h1>
<li><code><a title="tests.test_zone.k8s_node_zone_tags" href="#tests.test_zone.k8s_node_zone_tags">k8s_node_zone_tags</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_node_duplicates_in_multiple_zones" href="#tests.test_zone.test_replica_auto_balance_node_duplicates_in_multiple_zones">test_replica_auto_balance_node_duplicates_in_multiple_zones</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_should_respect_node_selector" href="#tests.test_zone.test_replica_auto_balance_should_respect_node_selector">test_replica_auto_balance_should_respect_node_selector</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_when_disabled_disk_scheduling_in_zone" href="#tests.test_zone.test_replica_auto_balance_when_disabled_disk_scheduling_in_zone">test_replica_auto_balance_when_disabled_disk_scheduling_in_zone</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node" href="#tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node">test_replica_auto_balance_when_replica_on_unschedulable_node</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_zone_best_effort" href="#tests.test_zone.test_replica_auto_balance_zone_best_effort">test_replica_auto_balance_zone_best_effort</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_zone_best_effort_with_data_locality" href="#tests.test_zone.test_replica_auto_balance_zone_best_effort_with_data_locality">test_replica_auto_balance_zone_best_effort_with_data_locality</a></code></li>
Expand Down
Loading

0 comments on commit 704dbf5

Please sign in to comment.