Skip to content

Commit

Permalink
Update testing docs
Browse files Browse the repository at this point in the history
Signed-off-by: c3y1huang <[email protected]>
  • Loading branch information
c3y1huang committed Dec 13, 2023
1 parent aa42025 commit d3cb9af
Showing 1 changed file with 192 additions and 0 deletions.
192 changes: 192 additions & 0 deletions integration/test_zone.html
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ <h1 class="title">Module <code>tests.test_zone</code></h1>
from common import cleanup_node_disks
from common import get_self_host_id

from common import get_update_disks
from common import update_node_disks

from common import create_and_wait_pod
from common import create_pv_for_volume
from common import create_pvc_for_volume
Expand Down Expand Up @@ -610,6 +613,88 @@ <h1 class="title">Module <code>tests.test_zone</code></h1>
assert_replica_count(is_stable=True)


def test_replica_auto_balance_when_no_storage_available_in_zone(client, core_api, volume_name): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when there is no storage available on nodes
in a zone.

Issue: https://github.com/longhorn/longhorn/issues/6671

Given `replica-soft-anti-affinity` setting is `true`.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And fill up the storage on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.

When set `replica-auto-balance` to `best-effort`.

Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.
&#34;&#34;&#34;
# Set `replica-soft-anti-affinity` to `true`.
update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, &#34;true&#34;)

# Assign nodes to respective zones
node1, node2, node3 = client.list_node()
set_k8s_node_zone_label(core_api, node1.name, ZONE1)
set_k8s_node_zone_label(core_api, node2.name, ZONE2)
set_k8s_node_zone_label(core_api, node3.name, ZONE3)
wait_longhorn_node_zone_updated(client)

# Fill up the storage on node 3
for _, disk in node3.disks.items():
disk.storageReserved = disk.storageMaximum

update_disks = get_update_disks(node3.disks)
update_node_disks(client, node3.name, disks=update_disks, retry=True)

# Create a volume with 3 replicas
num_of_replicas = 3
volume = client.create_volume(name=volume_name,
numberOfReplicas=num_of_replicas)

# Wait for ht evolume to detach and attache it to the test pod node
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())

# Define a function to assert replica count
def assert_replica_count(is_stable=False):
for _ in range(RETRY_COUNTS):
time.sleep(RETRY_INTERVAL)

zone3_replica_count = get_zone_replica_count(
client, volume_name, ZONE3, chk_running=True)
assert zone3_replica_count == 0

total_replica_count = \
get_zone_replica_count(
client, volume_name, ZONE1, chk_running=True) + \
get_zone_replica_count(
client, volume_name, ZONE2, chk_running=True)

if is_stable:
assert total_replica_count == num_of_replicas
elif total_replica_count == num_of_replicas:
break

assert total_replica_count == 3

# Perform the initial assertion to ensure the replica count is as expected
assert_replica_count()

# Update the replica-auto-balance setting to `best-effort`
update_setting(client, SETTING_REPLICA_AUTO_BALANCE, &#34;best-effort&#34;)

# Perform the final assertion to ensure the replica count is as expected,
# and stable after the setting update
assert_replica_count(is_stable=True)


def test_replica_auto_balance_when_replica_on_unschedulable_node(client, core_api, volume_name, request): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when replica already running on
Expand Down Expand Up @@ -1503,6 +1588,112 @@ <h2 class="section-title" id="header-functions">Functions</h2>
assert_replica_count(is_stable=True)</code></pre>
</details>
</dd>
<dt id="tests.test_zone.test_replica_auto_balance_when_no_storage_available_in_zone"><code class="name flex">
<span>def <span class="ident">test_replica_auto_balance_when_no_storage_available_in_zone</span></span>(<span>client, core_api, volume_name)</span>
</code></dt>
<dd>
<div class="desc"><p>Scenario: replica auto-balance when there is no storage available on nodes
in a zone.</p>
<p>Issue: <a href="https://github.com/longhorn/longhorn/issues/6671">https://github.com/longhorn/longhorn/issues/6671</a></p>
<p>Given <code>replica-soft-anti-affinity</code> setting is <code>true</code>.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And fill up the storage on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.</p>
<p>When set <code>replica-auto-balance</code> to <code>best-effort</code>.</p>
<p>Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def test_replica_auto_balance_when_no_storage_available_in_zone(client, core_api, volume_name): # NOQA
&#34;&#34;&#34;
Scenario: replica auto-balance when there is no storage available on nodes
in a zone.

Issue: https://github.com/longhorn/longhorn/issues/6671

Given `replica-soft-anti-affinity` setting is `true`.
And node-1 is in zone-1.
node-2 is in zone-2.
node-3 is in zone-3.
And fill up the storage on node-3.
And create a volume with 3 replicas.
And attach the volume to test pod node.
And 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.

When set `replica-auto-balance` to `best-effort`.

Then 3 replicas running in zone-1 and zone-2.
0 replicas running in zone-3.
And replica count remains stable across zones and nodes.
&#34;&#34;&#34;
# Set `replica-soft-anti-affinity` to `true`.
update_setting(client, SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY, &#34;true&#34;)

# Assign nodes to respective zones
node1, node2, node3 = client.list_node()
set_k8s_node_zone_label(core_api, node1.name, ZONE1)
set_k8s_node_zone_label(core_api, node2.name, ZONE2)
set_k8s_node_zone_label(core_api, node3.name, ZONE3)
wait_longhorn_node_zone_updated(client)

# Fill up the storage on node 3
for _, disk in node3.disks.items():
disk.storageReserved = disk.storageMaximum

update_disks = get_update_disks(node3.disks)
update_node_disks(client, node3.name, disks=update_disks, retry=True)

# Create a volume with 3 replicas
num_of_replicas = 3
volume = client.create_volume(name=volume_name,
numberOfReplicas=num_of_replicas)

# Wait for ht evolume to detach and attache it to the test pod node
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())

# Define a function to assert replica count
def assert_replica_count(is_stable=False):
for _ in range(RETRY_COUNTS):
time.sleep(RETRY_INTERVAL)

zone3_replica_count = get_zone_replica_count(
client, volume_name, ZONE3, chk_running=True)
assert zone3_replica_count == 0

total_replica_count = \
get_zone_replica_count(
client, volume_name, ZONE1, chk_running=True) + \
get_zone_replica_count(
client, volume_name, ZONE2, chk_running=True)

if is_stable:
assert total_replica_count == num_of_replicas
elif total_replica_count == num_of_replicas:
break

assert total_replica_count == 3

# Perform the initial assertion to ensure the replica count is as expected
assert_replica_count()

# Update the replica-auto-balance setting to `best-effort`
update_setting(client, SETTING_REPLICA_AUTO_BALANCE, &#34;best-effort&#34;)

# Perform the final assertion to ensure the replica count is as expected,
# and stable after the setting update
assert_replica_count(is_stable=True)</code></pre>
</details>
</dd>
<dt id="tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node"><code class="name flex">
<span>def <span class="ident">test_replica_auto_balance_when_replica_on_unschedulable_node</span></span>(<span>client, core_api, volume_name, request)</span>
</code></dt>
Expand Down Expand Up @@ -2548,6 +2739,7 @@ <h1>Index</h1>
<li><code><a title="tests.test_zone.test_replica_auto_balance_node_duplicates_in_multiple_zones" href="#tests.test_zone.test_replica_auto_balance_node_duplicates_in_multiple_zones">test_replica_auto_balance_node_duplicates_in_multiple_zones</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_should_respect_node_selector" href="#tests.test_zone.test_replica_auto_balance_should_respect_node_selector">test_replica_auto_balance_should_respect_node_selector</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_when_disabled_disk_scheduling_in_zone" href="#tests.test_zone.test_replica_auto_balance_when_disabled_disk_scheduling_in_zone">test_replica_auto_balance_when_disabled_disk_scheduling_in_zone</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_when_no_storage_available_in_zone" href="#tests.test_zone.test_replica_auto_balance_when_no_storage_available_in_zone">test_replica_auto_balance_when_no_storage_available_in_zone</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node" href="#tests.test_zone.test_replica_auto_balance_when_replica_on_unschedulable_node">test_replica_auto_balance_when_replica_on_unschedulable_node</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_zone_best_effort" href="#tests.test_zone.test_replica_auto_balance_zone_best_effort">test_replica_auto_balance_zone_best_effort</a></code></li>
<li><code><a title="tests.test_zone.test_replica_auto_balance_zone_best_effort_with_data_locality" href="#tests.test_zone.test_replica_auto_balance_zone_best_effort_with_data_locality">test_replica_auto_balance_zone_best_effort_with_data_locality</a></code></li>
Expand Down

0 comments on commit d3cb9af

Please sign in to comment.