Skip to content

Commit

Permalink
Merge branch 'master' into enable-robot-qase-sync
Browse files Browse the repository at this point in the history
  • Loading branch information
khushboo-rancher authored Dec 18, 2024
2 parents 2523902 + a921646 commit 0121e4e
Show file tree
Hide file tree
Showing 19 changed files with 398 additions and 117 deletions.
3 changes: 3 additions & 0 deletions e2e/libs/keywords/k8s_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ async def restart_kubelet_tasks():
)

done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
for task in done:
if task.exception():
assert False, task.exception()
logging(f"All kubelets on nodes {node_list} are restarted after downtime {downtime_in_sec} seconds")

await restart_kubelet_tasks()
Expand Down
3 changes: 3 additions & 0 deletions e2e/libs/keywords/network_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ async def disconnect_network_tasks():
)

done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
for task in done:
if task.exception():
assert False, task.exception()
logging(f"All networks on nodes {node_list} are recovered after disconnection time {disconnection_time_in_sec} seconds")

await disconnect_network_tasks()
Expand Down
3 changes: 3 additions & 0 deletions e2e/libs/keywords/persistentvolumeclaim_keywords.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from persistentvolumeclaim import PersistentVolumeClaim
from volume import Volume

from utility.constant import ANNOT_EXPANDED_SIZE
from utility.constant import LABEL_TEST
Expand All @@ -12,13 +13,15 @@ class persistentvolumeclaim_keywords:

def __init__(self):
self.claim = PersistentVolumeClaim()
self.volume = Volume()

def cleanup_persistentvolumeclaims(self):
claims = self.claim.list(label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}")

logging(f'Cleaning up {len(claims)} persistentvolumeclaims')
for claim in claims:
self.delete_persistentvolumeclaim(claim.metadata.name)
self.volume.wait_for_volume_deleted(claim.spec.volume_name)

def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn", storage_size="3GiB"):
logging(f'Creating {volume_type} persistentvolumeclaim {name} with {sc_name} storageclass')
Expand Down
6 changes: 6 additions & 0 deletions e2e/libs/keywords/volume_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,9 @@ async def wait_for_both_replica_rebuildings():
]

done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
for task in done:
if task.exception():
assert False, task.exception()
logging(f"Observed {done.pop().get_name()} and {done.pop().get_name()} started replica rebuilding first")

await wait_for_both_replica_rebuildings()
Expand All @@ -215,6 +218,9 @@ async def wait_for_replica_rebuilding():
]

done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
for task in done:
if task.exception():
assert False, task.exception()
logging(f"Observed {done.pop().get_name()} started replica rebuilding")

await wait_for_replica_rebuilding()
Expand Down
3 changes: 3 additions & 0 deletions e2e/libs/keywords/workload_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,9 @@ async def wait_for_workloads_tasks():
)

done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
for task in done:
if task.exception():
assert False, task.exception()
logging(f"All workloads {workloads} pods are stably running now")

await wait_for_workloads_tasks()
Expand Down
5 changes: 3 additions & 2 deletions e2e/libs/volume/crd.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def delete(self, volume_name):
plural="volumes",
name=volume_name
)
self.wait_for_volume_delete(volume_name)
self.wait_for_volume_deleted(volume_name)
except Exception as e:
logging(f"Deleting volume error: {e}")

Expand Down Expand Up @@ -208,8 +208,9 @@ def get_annotation_value(self, volume_name, annotation_key):
volume = self.get(volume_name)
return volume['metadata']['annotations'].get(annotation_key)

def wait_for_volume_delete(self, volume_name):
def wait_for_volume_deleted(self, volume_name):
for i in range(self.retry_count):
logging(f"Waiting for volume {volume_name} deleted ... ({i})")
try:
self.obj_api.get_namespaced_custom_object(
group="longhorn.io",
Expand Down
3 changes: 3 additions & 0 deletions e2e/libs/volume/volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ def create(self, volume_name, size, numberOfReplicas, frontend, migratable, data
def delete(self, volume_name):
return self.volume.delete(volume_name)

def wait_for_volume_deleted(self, volume_name):
return self.volume.wait_for_volume_deleted(volume_name)

def attach(self, volume_name, node_name, disable_frontend):
return self.volume.attach(volume_name, node_name, disable_frontend)

Expand Down
1 change: 1 addition & 0 deletions manager/integration/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ markers =
cluster_autoscaler
long_running
volume_backup_restore
v2_volume_test
67 changes: 57 additions & 10 deletions manager/integration/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@
ISCSI_DEV_PATH = "/dev/disk/by-path"
ISCSI_PROCESS = "iscsid"

BLOCK_DEV_PATH = "/dev/xvdh"

VOLUME_FIELD_STATE = "state"
VOLUME_STATE_ATTACHED = "attached"
VOLUME_STATE_DETACHED = "detached"
Expand Down Expand Up @@ -216,6 +218,7 @@
SETTING_BACKUP_CONCURRENT_LIMIT = "backup-concurrent-limit"
SETTING_RESTORE_CONCURRENT_LIMIT = "restore-concurrent-limit"
SETTING_V1_DATA_ENGINE = "v1-data-engine"
SETTING_V2_DATA_ENGINE = "v2-data-engine"
SETTING_ALLOW_EMPTY_NODE_SELECTOR_VOLUME = \
"allow-empty-node-selector-volume"
SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY = "replica-disk-soft-anti-affinity"
Expand Down Expand Up @@ -320,6 +323,13 @@
BACKINGIMAGE_FAILED_EVICT_MSG = \
"since there is no other healthy backing image copy"

# set default data engine for test
enable_v2 = os.environ.get('RUN_V2_TEST')
if enable_v2 == "true":
DATA_ENGINE = "v2"
else:
DATA_ENGINE = "v1"

# customize the timeout for HDD
disktype = os.environ.get('LONGHORN_DISK_TYPE')
if disktype == "hdd":
Expand Down Expand Up @@ -428,7 +438,8 @@ def cleanup_all_volumes(client):
def create_volume_and_backup(client, vol_name, vol_size, backup_data_size):
client.create_volume(name=vol_name,
numberOfReplicas=1,
size=str(vol_size))
size=str(vol_size),
dataEngine=DATA_ENGINE)
volume = wait_for_volume_detached(client, vol_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, vol_name)
Expand Down Expand Up @@ -547,7 +558,7 @@ def create_and_check_volume(client, volume_name,
numberOfReplicas=num_of_replicas,
backingImage=backing_image, frontend=frontend,
snapshotDataIntegrity=snapshot_data_integrity,
accessMode=access_mode)
accessMode=access_mode, dataEngine=DATA_ENGINE)
volume = wait_for_volume_detached(client, volume_name)
assert volume.name == volume_name
assert volume.size == size
Expand Down Expand Up @@ -1566,6 +1577,9 @@ def storage_class(request):
},
'reclaimPolicy': 'Delete'
}
if DATA_ENGINE == 'v2':
sc_manifest['parameters']['dataEngine'] = 'v2'
sc_manifest['parameters']['fsType'] = 'ext4'

def finalizer():
api = get_storage_api_client()
Expand Down Expand Up @@ -1789,6 +1803,10 @@ def cleanup_client():
reset_engine_image(client)
wait_for_all_instance_manager_running(client)

enable_v2 = os.environ.get('RUN_V2_TEST')
if enable_v2 == "true":
return

# check replica subdirectory of default disk path
if not os.path.exists(DEFAULT_REPLICA_DIRECTORY):
subprocess.check_call(
Expand Down Expand Up @@ -3594,6 +3612,14 @@ def cleanup_test_disks(client):


def reset_disks_for_all_nodes(client): # NOQA
enable_v2 = os.environ.get('RUN_V2_TEST')
if enable_v2 == "true":
default_disk_path = BLOCK_DEV_PATH
disk_type = "block"
else:
default_disk_path = DEFAULT_DISK_PATH
disk_type = "filesystem"

nodes = client.list_node()
for node in nodes:
# Reset default disk if there are more than 1 disk
Expand All @@ -3603,7 +3629,7 @@ def reset_disks_for_all_nodes(client): # NOQA
cleanup_required = True
if len(node.disks) == 1:
for _, disk in iter(node.disks.items()):
if disk.path != DEFAULT_DISK_PATH:
if disk.path != default_disk_path:
cleanup_required = True
if cleanup_required:
update_disks = get_update_disks(node.disks)
Expand All @@ -3618,7 +3644,8 @@ def reset_disks_for_all_nodes(client): # NOQA
node = wait_for_disk_update(client, node.name, 0)
if len(node.disks) == 0:
default_disk = {"default-disk":
{"path": DEFAULT_DISK_PATH,
{"path": default_disk_path,
"diskType": disk_type,
"allowScheduling": True}}
node = update_node_disks(client, node.name, disks=default_disk,
retry=True)
Expand All @@ -3630,8 +3657,11 @@ def reset_disks_for_all_nodes(client): # NOQA
for name, disk in iter(disks.items()):
update_disk = disk
update_disk.allowScheduling = True
update_disk.storageReserved = \
int(update_disk.storageMaximum * 30 / 100)
if disk_type == "filesystem":
reserved_storage = int(update_disk.storageMaximum * 30 / 100)
else:
reserved_storage = 0
update_disk.storageReserved = reserved_storage
update_disk.tags = []
update_disks[name] = update_disk
node = update_node_disks(client, node.name, disks=update_disks,
Expand All @@ -3644,7 +3674,7 @@ def reset_disks_for_all_nodes(client): # NOQA
"storageScheduled", 0)
wait_for_disk_status(client, node.name, name,
"storageReserved",
int(update_disk.storageMaximum * 30 / 100))
reserved_storage)


def reset_settings(client):
Expand Down Expand Up @@ -3685,6 +3715,20 @@ def reset_settings(client):
if setting_name == "registry-secret":
continue

enable_v2 = os.environ.get('RUN_V2_TEST') == "true"
v1_setting_value = "false" if enable_v2 else "true"
v2_setting_value = "true" if enable_v2 else "false"

if setting_name == "v1-data-engine":
setting = client.by_id_setting(SETTING_V1_DATA_ENGINE)
client.update(setting, value=v1_setting_value)
continue

if setting_name == "v2-data-engine":
setting = client.by_id_setting(SETTING_V2_DATA_ENGINE)
client.update(setting, value=v2_setting_value)
continue

s = client.by_id_setting(setting_name)
if s.value != setting_default_value and not setting_readonly:
try:
Expand Down Expand Up @@ -5827,7 +5871,8 @@ def restore_backup_and_get_data_checksum(client, core_api, backup, pod,
data_checksum = {}

client.create_volume(name=restore_volume_name, size=str(1 * Gi),
fromBackup=backup.url)
fromBackup=backup.url,
dataEngine=DATA_ENGINE)
volume = wait_for_volume_detached(client, restore_volume_name)
create_pv_for_volume(client, core_api, volume, restore_pv_name)
create_pvc_for_volume(client, core_api, volume, restore_pvc_name)
Expand Down Expand Up @@ -6099,7 +6144,8 @@ def create_rwx_volume_with_storageclass(client,

def create_volume(client, vol_name, size, node_id, r_num):
volume = client.create_volume(name=vol_name, size=size,
numberOfReplicas=r_num)
numberOfReplicas=r_num,
dataEngine=DATA_ENGINE)
assert volume.numberOfReplicas == r_num
assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV

Expand Down Expand Up @@ -6367,7 +6413,8 @@ def create_deployment_and_write_data(client, # NOQA
apps_api = get_apps_api_client()
volume = client.create_volume(name=volume_name,
size=size,
numberOfReplicas=replica_count)
numberOfReplicas=replica_count,
dataEngine=DATA_ENGINE)
volume = wait_for_volume_detached(client, volume_name)

pvc_name = volume_name + "-pvc"
Expand Down
Loading

0 comments on commit 0121e4e

Please sign in to comment.