diff --git a/e2e/libs/keywords/k8s_keywords.py b/e2e/libs/keywords/k8s_keywords.py index ad5f66b65..64e54f8bf 100644 --- a/e2e/libs/keywords/k8s_keywords.py +++ b/e2e/libs/keywords/k8s_keywords.py @@ -35,6 +35,9 @@ async def restart_kubelet_tasks(): ) done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + for task in done: + if task.exception(): + assert False, task.exception() logging(f"All kubelets on nodes {node_list} are restarted after downtime {downtime_in_sec} seconds") await restart_kubelet_tasks() diff --git a/e2e/libs/keywords/network_keywords.py b/e2e/libs/keywords/network_keywords.py index 9566fb070..0400c3720 100644 --- a/e2e/libs/keywords/network_keywords.py +++ b/e2e/libs/keywords/network_keywords.py @@ -35,6 +35,9 @@ async def disconnect_network_tasks(): ) done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + for task in done: + if task.exception(): + assert False, task.exception() logging(f"All networks on nodes {node_list} are recovered after disconnection time {disconnection_time_in_sec} seconds") await disconnect_network_tasks() diff --git a/e2e/libs/keywords/persistentvolumeclaim_keywords.py b/e2e/libs/keywords/persistentvolumeclaim_keywords.py index 947bc2c0d..73ab14d35 100644 --- a/e2e/libs/keywords/persistentvolumeclaim_keywords.py +++ b/e2e/libs/keywords/persistentvolumeclaim_keywords.py @@ -1,4 +1,5 @@ from persistentvolumeclaim import PersistentVolumeClaim +from volume import Volume from utility.constant import ANNOT_EXPANDED_SIZE from utility.constant import LABEL_TEST @@ -12,6 +13,7 @@ class persistentvolumeclaim_keywords: def __init__(self): self.claim = PersistentVolumeClaim() + self.volume = Volume() def cleanup_persistentvolumeclaims(self): claims = self.claim.list(label_selector=f"{LABEL_TEST}={LABEL_TEST_VALUE}") @@ -19,6 +21,7 @@ def cleanup_persistentvolumeclaims(self): logging(f'Cleaning up {len(claims)} persistentvolumeclaims') for claim in claims: self.delete_persistentvolumeclaim(claim.metadata.name) + self.volume.wait_for_volume_deleted(claim.spec.volume_name) def create_persistentvolumeclaim(self, name, volume_type="RWO", sc_name="longhorn", storage_size="3GiB"): logging(f'Creating {volume_type} persistentvolumeclaim {name} with {sc_name} storageclass') diff --git a/e2e/libs/keywords/volume_keywords.py b/e2e/libs/keywords/volume_keywords.py index 4a7aa6aa5..02d891ffa 100644 --- a/e2e/libs/keywords/volume_keywords.py +++ b/e2e/libs/keywords/volume_keywords.py @@ -200,6 +200,9 @@ async def wait_for_both_replica_rebuildings(): ] done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + for task in done: + if task.exception(): + assert False, task.exception() logging(f"Observed {done.pop().get_name()} and {done.pop().get_name()} started replica rebuilding first") await wait_for_both_replica_rebuildings() @@ -215,6 +218,9 @@ async def wait_for_replica_rebuilding(): ] done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + for task in done: + if task.exception(): + assert False, task.exception() logging(f"Observed {done.pop().get_name()} started replica rebuilding") await wait_for_replica_rebuilding() diff --git a/e2e/libs/keywords/workload_keywords.py b/e2e/libs/keywords/workload_keywords.py index 1d7aae58f..bd5041c2b 100644 --- a/e2e/libs/keywords/workload_keywords.py +++ b/e2e/libs/keywords/workload_keywords.py @@ -138,6 +138,9 @@ async def wait_for_workloads_tasks(): ) done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + for task in done: + if task.exception(): + assert False, task.exception() logging(f"All workloads {workloads} pods are stably running now") await wait_for_workloads_tasks() diff --git a/e2e/libs/volume/crd.py b/e2e/libs/volume/crd.py index 22edf3289..bfe78b069 100644 --- a/e2e/libs/volume/crd.py +++ b/e2e/libs/volume/crd.py @@ -93,7 +93,7 @@ def delete(self, volume_name): plural="volumes", name=volume_name ) - self.wait_for_volume_delete(volume_name) + self.wait_for_volume_deleted(volume_name) except Exception as e: logging(f"Deleting volume error: {e}") @@ -208,8 +208,9 @@ def get_annotation_value(self, volume_name, annotation_key): volume = self.get(volume_name) return volume['metadata']['annotations'].get(annotation_key) - def wait_for_volume_delete(self, volume_name): + def wait_for_volume_deleted(self, volume_name): for i in range(self.retry_count): + logging(f"Waiting for volume {volume_name} deleted ... ({i})") try: self.obj_api.get_namespaced_custom_object( group="longhorn.io", diff --git a/e2e/libs/volume/volume.py b/e2e/libs/volume/volume.py index d664b908f..09e7f0d79 100644 --- a/e2e/libs/volume/volume.py +++ b/e2e/libs/volume/volume.py @@ -21,6 +21,9 @@ def create(self, volume_name, size, numberOfReplicas, frontend, migratable, data def delete(self, volume_name): return self.volume.delete(volume_name) + def wait_for_volume_deleted(self, volume_name): + return self.volume.wait_for_volume_deleted(volume_name) + def attach(self, volume_name, node_name, disable_frontend): return self.volume.attach(volume_name, node_name, disable_frontend) diff --git a/manager/integration/pytest.ini b/manager/integration/pytest.ini index d7622d68c..e2e77de6d 100644 --- a/manager/integration/pytest.ini +++ b/manager/integration/pytest.ini @@ -18,3 +18,4 @@ markers = cluster_autoscaler long_running volume_backup_restore + v2_volume_test \ No newline at end of file diff --git a/manager/integration/tests/common.py b/manager/integration/tests/common.py index 45b2e6a26..9101b4936 100644 --- a/manager/integration/tests/common.py +++ b/manager/integration/tests/common.py @@ -86,6 +86,8 @@ ISCSI_DEV_PATH = "/dev/disk/by-path" ISCSI_PROCESS = "iscsid" +BLOCK_DEV_PATH = "/dev/xvdh" + VOLUME_FIELD_STATE = "state" VOLUME_STATE_ATTACHED = "attached" VOLUME_STATE_DETACHED = "detached" @@ -216,6 +218,7 @@ SETTING_BACKUP_CONCURRENT_LIMIT = "backup-concurrent-limit" SETTING_RESTORE_CONCURRENT_LIMIT = "restore-concurrent-limit" SETTING_V1_DATA_ENGINE = "v1-data-engine" +SETTING_V2_DATA_ENGINE = "v2-data-engine" SETTING_ALLOW_EMPTY_NODE_SELECTOR_VOLUME = \ "allow-empty-node-selector-volume" SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY = "replica-disk-soft-anti-affinity" @@ -320,6 +323,13 @@ BACKINGIMAGE_FAILED_EVICT_MSG = \ "since there is no other healthy backing image copy" +# set default data engine for test +enable_v2 = os.environ.get('RUN_V2_TEST') +if enable_v2 == "true": + DATA_ENGINE = "v2" +else: + DATA_ENGINE = "v1" + # customize the timeout for HDD disktype = os.environ.get('LONGHORN_DISK_TYPE') if disktype == "hdd": @@ -428,7 +438,8 @@ def cleanup_all_volumes(client): def create_volume_and_backup(client, vol_name, vol_size, backup_data_size): client.create_volume(name=vol_name, numberOfReplicas=1, - size=str(vol_size)) + size=str(vol_size), + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, vol_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, vol_name) @@ -547,7 +558,7 @@ def create_and_check_volume(client, volume_name, numberOfReplicas=num_of_replicas, backingImage=backing_image, frontend=frontend, snapshotDataIntegrity=snapshot_data_integrity, - accessMode=access_mode) + accessMode=access_mode, dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) assert volume.name == volume_name assert volume.size == size @@ -1566,6 +1577,9 @@ def storage_class(request): }, 'reclaimPolicy': 'Delete' } + if DATA_ENGINE == 'v2': + sc_manifest['parameters']['dataEngine'] = 'v2' + sc_manifest['parameters']['fsType'] = 'ext4' def finalizer(): api = get_storage_api_client() @@ -1789,6 +1803,10 @@ def cleanup_client(): reset_engine_image(client) wait_for_all_instance_manager_running(client) + enable_v2 = os.environ.get('RUN_V2_TEST') + if enable_v2 == "true": + return + # check replica subdirectory of default disk path if not os.path.exists(DEFAULT_REPLICA_DIRECTORY): subprocess.check_call( @@ -3594,6 +3612,14 @@ def cleanup_test_disks(client): def reset_disks_for_all_nodes(client): # NOQA + enable_v2 = os.environ.get('RUN_V2_TEST') + if enable_v2 == "true": + default_disk_path = BLOCK_DEV_PATH + disk_type = "block" + else: + default_disk_path = DEFAULT_DISK_PATH + disk_type = "filesystem" + nodes = client.list_node() for node in nodes: # Reset default disk if there are more than 1 disk @@ -3603,7 +3629,7 @@ def reset_disks_for_all_nodes(client): # NOQA cleanup_required = True if len(node.disks) == 1: for _, disk in iter(node.disks.items()): - if disk.path != DEFAULT_DISK_PATH: + if disk.path != default_disk_path: cleanup_required = True if cleanup_required: update_disks = get_update_disks(node.disks) @@ -3618,7 +3644,8 @@ def reset_disks_for_all_nodes(client): # NOQA node = wait_for_disk_update(client, node.name, 0) if len(node.disks) == 0: default_disk = {"default-disk": - {"path": DEFAULT_DISK_PATH, + {"path": default_disk_path, + "diskType": disk_type, "allowScheduling": True}} node = update_node_disks(client, node.name, disks=default_disk, retry=True) @@ -3630,8 +3657,11 @@ def reset_disks_for_all_nodes(client): # NOQA for name, disk in iter(disks.items()): update_disk = disk update_disk.allowScheduling = True - update_disk.storageReserved = \ - int(update_disk.storageMaximum * 30 / 100) + if disk_type == "filesystem": + reserved_storage = int(update_disk.storageMaximum * 30 / 100) + else: + reserved_storage = 0 + update_disk.storageReserved = reserved_storage update_disk.tags = [] update_disks[name] = update_disk node = update_node_disks(client, node.name, disks=update_disks, @@ -3644,7 +3674,7 @@ def reset_disks_for_all_nodes(client): # NOQA "storageScheduled", 0) wait_for_disk_status(client, node.name, name, "storageReserved", - int(update_disk.storageMaximum * 30 / 100)) + reserved_storage) def reset_settings(client): @@ -3685,6 +3715,20 @@ def reset_settings(client): if setting_name == "registry-secret": continue + enable_v2 = os.environ.get('RUN_V2_TEST') == "true" + v1_setting_value = "false" if enable_v2 else "true" + v2_setting_value = "true" if enable_v2 else "false" + + if setting_name == "v1-data-engine": + setting = client.by_id_setting(SETTING_V1_DATA_ENGINE) + client.update(setting, value=v1_setting_value) + continue + + if setting_name == "v2-data-engine": + setting = client.by_id_setting(SETTING_V2_DATA_ENGINE) + client.update(setting, value=v2_setting_value) + continue + s = client.by_id_setting(setting_name) if s.value != setting_default_value and not setting_readonly: try: @@ -5827,7 +5871,8 @@ def restore_backup_and_get_data_checksum(client, core_api, backup, pod, data_checksum = {} client.create_volume(name=restore_volume_name, size=str(1 * Gi), - fromBackup=backup.url) + fromBackup=backup.url, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, restore_volume_name) create_pv_for_volume(client, core_api, volume, restore_pv_name) create_pvc_for_volume(client, core_api, volume, restore_pvc_name) @@ -6099,7 +6144,8 @@ def create_rwx_volume_with_storageclass(client, def create_volume(client, vol_name, size, node_id, r_num): volume = client.create_volume(name=vol_name, size=size, - numberOfReplicas=r_num) + numberOfReplicas=r_num, + dataEngine=DATA_ENGINE) assert volume.numberOfReplicas == r_num assert volume.frontend == VOLUME_FRONTEND_BLOCKDEV @@ -6367,7 +6413,8 @@ def create_deployment_and_write_data(client, # NOQA apps_api = get_apps_api_client() volume = client.create_volume(name=volume_name, size=size, - numberOfReplicas=replica_count) + numberOfReplicas=replica_count, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) pvc_name = volume_name + "-pvc" diff --git a/manager/integration/tests/test_basic.py b/manager/integration/tests/test_basic.py index 6c1f75607..1c316713b 100644 --- a/manager/integration/tests/test_basic.py +++ b/manager/integration/tests/test_basic.py @@ -104,6 +104,7 @@ from common import BACKUP_TARGET_MESSAGE_EMPTY_URL from common import BACKUP_TARGET_MESSAGES_INVALID from common import wait_scheduling_failure +from common import DATA_ENGINE from backupstore import backupstore_delete_volume_cfg_file from backupstore import backupstore_cleanup @@ -123,7 +124,7 @@ BACKUPSTORE = get_backupstores() - +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_hosts(client): # NOQA """ @@ -149,6 +150,7 @@ def test_hosts(client): # NOQA client.by_id_node(host_id[0]).address +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_settings(client): # NOQA """ @@ -248,6 +250,7 @@ def volume_rw_test(dev): check_device_data(dev, data) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_volume_basic(client, volume_name): # NOQA """ @@ -264,15 +267,17 @@ def test_volume_basic(client, volume_name): # NOQA def volume_basic_test(client, volume_name, backing_image=""): # NOQA num_hosts = len(client.list_node()) num_replicas = 3 - with pytest.raises(Exception): volume = client.create_volume(name="wrong_volume-name-1.0", size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = client.create_volume(name="wrong_volume-name", size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = client.create_volume(name="wrong_volume-name", size=SIZE, numberOfReplicas=2, - frontend="invalid_frontend") + frontend="invalid_frontend", + dataEngine=DATA_ENGINE) volume = create_and_check_volume(client, volume_name, num_of_replicas=num_replicas, @@ -634,6 +639,7 @@ def backup_failure_predicate(b): assert volume.lastBackupAt == "" +@pytest.mark.v2_volume_test # NOQA def test_backup_block_deletion(set_random_backupstore, client, core_api, volume_name): # NOQA """ Test backup block deletion @@ -773,7 +779,8 @@ def test_dr_volume_activated_with_failed_replica(set_random_backupstore, client, dr_vol_name = "dr-" + volume_name dr_vol = client.create_volume(name=dr_vol_name, size=SIZE, numberOfReplicas=2, fromBackup=backup.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) check_volume_last_backup(client, dr_vol_name, backup.name) wait_for_backup_restore_completed(client, dr_vol_name, backup.name) @@ -801,6 +808,7 @@ def test_dr_volume_activated_with_failed_replica(set_random_backupstore, client, check_volume_data(dr_vol, data, False) +@pytest.mark.v2_volume_test # NOQA def test_dr_volume_with_backup_block_deletion(set_random_backupstore, client, core_api, volume_name): # NOQA """ Test DR volume last backup after block deletion. @@ -861,7 +869,8 @@ def test_dr_volume_with_backup_block_deletion(set_random_backupstore, client, co dr_vol_name = "dr-" + volume_name client.create_volume(name=dr_vol_name, size=SIZE, numberOfReplicas=2, fromBackup=backup1.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) check_volume_last_backup(client, dr_vol_name, backup1.name) wait_for_backup_restore_completed(client, dr_vol_name, backup1.name) @@ -893,6 +902,7 @@ def test_dr_volume_with_backup_block_deletion(set_random_backupstore, client, co check_volume_data(dr_vol, final_data, False) +@pytest.mark.v2_volume_test # NOQA def test_dr_volume_with_backup_block_deletion_abort_during_backup_in_progress(set_random_backupstore, client, core_api, volume_name): # NOQA """ Test DR volume last backup after block deletion aborted. This will set the @@ -959,7 +969,8 @@ def test_dr_volume_with_backup_block_deletion_abort_during_backup_in_progress(se dr_vol_name = "dr-" + volume_name client.create_volume(name=dr_vol_name, size=SIZE, numberOfReplicas=2, fromBackup=backup1.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) check_volume_last_backup(client, dr_vol_name, backup1.name) wait_for_backup_restore_completed(client, dr_vol_name, backup1.name) @@ -992,6 +1003,7 @@ def test_dr_volume_with_backup_block_deletion_abort_during_backup_in_progress(se check_volume_data(dr_vol, final_data, False) +@pytest.mark.v2_volume_test # NOQA def test_dr_volume_with_backup_and_backup_volume_deleted(set_random_backupstore, client, core_api, volume_name): # NOQA """ Test DR volume can be activated after delete all backups. @@ -1047,10 +1059,12 @@ def test_dr_volume_with_backup_and_backup_volume_deleted(set_random_backupstore, dr_vol_name2 = "dr-" + volume_name + "2" client.create_volume(name=dr_vol_name1, size=SIZE, numberOfReplicas=2, fromBackup=backup1.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) client.create_volume(name=dr_vol_name2, size=SIZE, numberOfReplicas=2, fromBackup=backup1.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) check_volume_last_backup(client, dr_vol_name1, backup1.name) wait_for_backup_restore_completed(client, dr_vol_name1, backup1.name) check_volume_last_backup(client, dr_vol_name2, backup1.name) @@ -1086,6 +1100,7 @@ def test_dr_volume_with_backup_and_backup_volume_deleted(set_random_backupstore, check_volume_data(dr_vol2, data0, False) +@pytest.mark.v2_volume_test # NOQA def test_backup_volume_list(set_random_backupstore, client, core_api): # NOQA """ Test backup volume list @@ -1172,6 +1187,7 @@ def verify_no_err(): backupstore_cleanup(client) +@pytest.mark.v2_volume_test # NOQA def test_backup_metadata_deletion(set_random_backupstore, client, core_api, volume_name): # NOQA """ Test backup metadata deletion @@ -1361,6 +1377,7 @@ def test_backup_metadata_deletion(set_random_backupstore, client, core_api, volu assert (not found1) & (not found2) & (not found3) & (not found4) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_backup(set_random_backupstore, client, volume_name): # NOQA """ @@ -1405,7 +1422,8 @@ def backupstore_test(client, host_id, volname, size, compression_method): # NOQ restore_name = generate_volume_name() volume = client.create_volume(name=restore_name, size=size, numberOfReplicas=2, - fromBackup=b.url) + fromBackup=b.url, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_restoration_completed(client, restore_name) volume = common.wait_for_volume_detached(client, restore_name) @@ -1431,6 +1449,7 @@ def backupstore_test(client, host_id, volname, size, compression_method): # NOQ volume = wait_for_volume_delete(client, restore_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_backup_labels(set_random_backupstore, client, random_labels, volume_name): # NOQA """ @@ -1521,13 +1540,16 @@ def restore_inc_test(client, core_api, volume_name, pod): # NOQA sb_volume2_name = "sb-2-" + volume_name client.create_volume(name=sb_volume0_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) client.create_volume(name=sb_volume1_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) client.create_volume(name=sb_volume2_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) wait_for_backup_restore_completed(client, sb_volume0_name, backup0.name) wait_for_backup_restore_completed(client, sb_volume1_name, backup0.name) wait_for_backup_restore_completed(client, sb_volume2_name, backup0.name) @@ -1699,6 +1721,7 @@ def restore_inc_test(client, core_api, volume_name, pod): # NOQA delete_and_wait_pv(core_api, sb_volume2_name) +@pytest.mark.v2_volume_test # NOQA def test_deleting_backup_volume(set_random_backupstore, client, volume_name): # NOQA """ Test deleting backup volumes @@ -1719,6 +1742,7 @@ def test_deleting_backup_volume(set_random_backupstore, client, volume_name): # cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA @pytest.mark.skipif('nfs' not in BACKUPSTORE, reason='This test is only applicable for nfs') # NOQA def test_listing_backup_volume(client, backing_image=""): # NOQA @@ -1885,6 +1909,7 @@ def test_listing_backup_volume(client, backing_image=""): # NOQA assert len(volumes) == 0 +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_volume_multinode(client, volume_name): # NOQA """ @@ -1897,7 +1922,8 @@ def test_volume_multinode(client, volume_name): # NOQA volume = client.create_volume(name=volume_name, size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) @@ -1918,6 +1944,7 @@ def test_volume_multinode(client, volume_name): # NOQA assert len(volumes) == 0 +@pytest.mark.v2_volume_test # NOQA def test_pvc_storage_class_name_from_backup_volume(set_random_backupstore, # NOQA core_api, client, volume_name, # NOQA pvc_name, pvc, pod_make, # NOQA @@ -2013,7 +2040,8 @@ def test_pvc_storage_class_name_from_backup_volume(set_random_backupstore, # NOQ restore_name = generate_volume_name() volume = client.create_volume(name=restore_name, size=volume_size, numberOfReplicas=3, - fromBackup=b.url) + fromBackup=b.url, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_restoration_completed(client, restore_name) volume = common.wait_for_volume_detached(client, restore_name) @@ -2046,6 +2074,7 @@ def test_pvc_storage_class_name_from_backup_volume(set_random_backupstore, # NOQ assert resp == test_data +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_volume_scheduling_failure(client, volume_name): # NOQA ''' @@ -2071,7 +2100,8 @@ def test_volume_scheduling_failure(client, volume_name): # NOQA "allowScheduling", False) volume = client.create_volume(name=volume_name, size=SIZE, - numberOfReplicas=3) + numberOfReplicas=3, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_condition_scheduled(client, volume_name, "status", @@ -2104,6 +2134,7 @@ def test_volume_scheduling_failure(client, volume_name): # NOQA wait_for_volume_delete(client, volume_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_setting_default_replica_count(client, volume_name): # NOQA """ @@ -2117,7 +2148,8 @@ def test_setting_default_replica_count(client, volume_name): # NOQA old_value = setting.value setting = client.update(setting, value="5") - volume = client.create_volume(name=volume_name, size=SIZE) + volume = client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) assert len(volume.replicas) == int(setting.value) @@ -2127,6 +2159,7 @@ def test_setting_default_replica_count(client, volume_name): # NOQA setting = client.update(setting, value=old_value) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_volume_update_replica_count(client, volume_name): # NOQA """ @@ -2175,6 +2208,7 @@ def test_volume_update_replica_count(client, volume_name): # NOQA wait_for_volume_delete(client, volume_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_attach_without_frontend(client, volume_name): # NOQA """ @@ -2235,6 +2269,7 @@ def test_attach_without_frontend(client, volume_name): # NOQA wait_for_volume_delete(client, volume_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_storage_class_from_backup(set_random_backupstore, volume_name, pvc_name, storage_class, client, core_api, pod_make): # NOQA """ @@ -2555,13 +2590,16 @@ def test_restore_inc_with_offline_expansion(set_random_backupstore, client, core dr_volume2_name = "dr-expand-2-" + volume_name client.create_volume(name=dr_volume0_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) client.create_volume(name=dr_volume1_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) client.create_volume(name=dr_volume2_name, size=SIZE, numberOfReplicas=2, fromBackup=backup0.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) wait_for_backup_restore_completed(client, dr_volume0_name, backup0.name) wait_for_backup_restore_completed(client, dr_volume1_name, backup0.name) wait_for_backup_restore_completed(client, dr_volume2_name, backup0.name) @@ -2721,6 +2759,7 @@ def test_restore_inc_with_offline_expansion(set_random_backupstore, client, core assert len(volumes) == 0 +@pytest.mark.v2_volume_test # NOQA def test_engine_image_daemonset_restart(client, apps_api, volume_name): # NOQA """ Test restarting engine image daemonset @@ -3162,6 +3201,7 @@ def test_expansion_with_scheduling_failure( delete_and_wait_pv(core_api, test_pv_name) +@pytest.mark.v2_volume_test # NOQA def test_backup_lock_deletion_during_restoration(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA """ Test backup locks @@ -3200,7 +3240,8 @@ def test_backup_lock_deletion_during_restoration(set_random_backupstore, client, _, b = common.find_backup(client, std_volume_name, snap1.name) client.create_volume(name=restore_volume_name, fromBackup=b.url, - numberOfReplicas=3) + numberOfReplicas=3, + dataEngine=DATA_ENGINE) wait_for_volume_restoration_start(client, restore_volume_name, b.name) backup_volume = client.by_id_backupVolume(std_volume_name) @@ -3233,6 +3274,7 @@ def test_backup_lock_deletion_during_restoration(set_random_backupstore, client, assert b is None +@pytest.mark.v2_volume_test # NOQA def test_backup_lock_deletion_during_backup(set_random_backupstore, client, core_api, volume_name, csi_pv, pvc, pod_make): # NOQA """ Test backup locks @@ -3296,7 +3338,7 @@ def test_backup_lock_deletion_during_backup(set_random_backupstore, client, core assert b1 is None client.create_volume(name=restore_volume_name_1, fromBackup=b2.url, - numberOfReplicas=3) + numberOfReplicas=3, dataEngine=DATA_ENGINE) wait_for_volume_restoration_completed(client, restore_volume_name_1) restore_volume_1 = wait_for_volume_detached(client, restore_volume_name_1) @@ -3426,7 +3468,8 @@ def test_backup_lock_restoration_during_deletion(set_random_backupstore, client, backup_volume.backupDelete(name=b2.name) - client.create_volume(name=restore_volume_name, fromBackup=b1.url) + client.create_volume(name=restore_volume_name, fromBackup=b1.url, + dataEngine=DATA_ENGINE) wait_for_volume_detached(client, restore_volume_name) restore_volume = client.by_id_volume(restore_volume_name) assert restore_volume[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_FAULTED @@ -3443,6 +3486,7 @@ def test_backup_lock_restoration_during_deletion(set_random_backupstore, client, assert b2 is None +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_allow_volume_creation_with_degraded_availability(client, volume_name): # NOQA """ @@ -3554,6 +3598,7 @@ def test_allow_volume_creation_with_degraded_availability(client, volume_name): check_volume_data(volume, data) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_allow_volume_creation_with_degraded_availability_error(client, volume_name): # NOQA """ @@ -3633,6 +3678,7 @@ def test_allow_volume_creation_with_degraded_availability_error(client, volume_n check_volume_data(volume, data) +@pytest.mark.v2_volume_test # NOQA def test_multiple_volumes_creation_with_degraded_availability(set_random_backupstore, client, core_api, apps_api, storage_class, statefulset): # NOQA """ Scenario: verify multiple volumes with degraded availability can be @@ -3760,7 +3806,8 @@ def test_allow_volume_creation_with_degraded_availability_restore(set_random_bac # restore volume dst_vol_name = generate_volume_name() client.create_volume(name=dst_vol_name, size=str(1*Gi), - numberOfReplicas=3, fromBackup=backup.url) + numberOfReplicas=3, fromBackup=backup.url, + dataEngine=DATA_ENGINE) common.wait_for_volume_replica_count(client, dst_vol_name, 3) common.wait_for_volume_restoration_start(client, dst_vol_name, backup.name) common.wait_for_volume_degraded(client, dst_vol_name) @@ -3877,7 +3924,8 @@ def test_allow_volume_creation_with_degraded_availability_dr(set_random_backupst numberOfReplicas=3, fromBackup=backup.url, frontend="", - standby=True) + standby=True, + dataEngine=DATA_ENGINE) common.wait_for_volume_replica_count(client, dst_vol_name, 3) wait_for_volume_restoration_start(client, dst_vol_name, backup.name) wait_for_volume_condition_scheduled(client, dst_vol_name, @@ -4099,6 +4147,7 @@ def check_volume_and_snapshot_after_corrupting_volume_metadata_file(client, core wait_for_snapshot_count(volume, 2) +@pytest.mark.v2_volume_test # NOQA def test_volume_metafile_deleted(client, core_api, volume_name, csi_pv, pvc, pod, pod_make): # NOQA """ Scenario: @@ -4192,6 +4241,7 @@ def test_volume_metafile_empty(client, core_api, volume_name, csi_pv, pvc, pod, ) +@pytest.mark.v2_volume_test # NOQA def test_volume_metafile_deleted_when_writing_data(client, core_api, volume_name, csi_pv, pvc, pod, pod_make): # NOQA """ Scenario: @@ -4333,7 +4383,8 @@ def test_expand_pvc_with_size_round_up(client, core_api, volume_name): # NOQA wait_for_volume_delete(client, volume_name) -def test_workload_with_fsgroup(core_api, statefulset): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_workload_with_fsgroup(core_api, statefulset, storage_class): # NOQA """ 1. Deploy a StatefulSet workload that uses Longhorn volume and has securityContext set: @@ -4355,6 +4406,7 @@ def test_workload_with_fsgroup(core_api, statefulset): # NOQA """ statefulset_name = 'statefulset-non-root-access' pod_name = statefulset_name + '-0' + create_storage_class(storage_class) statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ @@ -4363,7 +4415,7 @@ def test_workload_with_fsgroup(core_api, statefulset): # NOQA statefulset_name statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ - = 'longhorn' + = storage_class['metadata']['name'] statefulset['spec']['template']['spec']['securityContext'] = { 'runAsUser': 1000, 'runAsGroup': 1000, @@ -4377,6 +4429,7 @@ def test_workload_with_fsgroup(core_api, statefulset): # NOQA get_pod_data_md5sum(core_api, pod_name, "/data/test") +@pytest.mark.v2_volume_test # NOQA def test_backuptarget_available_during_engine_image_not_ready(client, apps_api): # NOQA """ Test backup target available during engine image not ready @@ -4451,6 +4504,7 @@ def test_backuptarget_available_during_engine_image_not_ready(client, apps_api): common.wait_for_backup_target_available(client, False) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.skipif('s3' not in BACKUPSTORE, reason='This test is only applicable for s3') # NOQA def test_aws_iam_role_arn(client, core_api): # NOQA """ @@ -4518,6 +4572,7 @@ def test_aws_iam_role_arn(client, core_api): # NOQA core_api, im_label, anno_key, None) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_restore_basic(set_random_backupstore, client, core_api, volume_name, pod): # NOQA """ @@ -4616,6 +4671,7 @@ def test_restore_basic(set_random_backupstore, client, core_api, volume_name, po assert output == '' +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_default_storage_class_syncup(core_api, request): # NOQA """ @@ -5213,7 +5269,7 @@ def backup_failure_predicate(b): @pytest.mark.coretest # NOQA def test_backup_failed_enable_auto_cleanup(set_random_backupstore, # NOQA - client, core_api, volume_name): # NOQA + client, core_api, volume_name): # NOQA """ Test the failed backup would be automatically deleted. @@ -5263,6 +5319,7 @@ def test_backup_failed_disable_auto_cleanup(set_random_backupstore, # NOQA pass +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize( "access_mode,overridden_restored_access_mode", [ @@ -5274,7 +5331,7 @@ def test_backup_volume_restore_with_access_mode(core_api, # NOQA set_random_backupstore, # NOQA client, # NOQA access_mode, # NOQA - overridden_restored_access_mode): # NOQA + overridden_restored_access_mode): # NOQA """ Test the backup w/ the volume access mode, then restore a volume w/ the original access mode or being overridden. @@ -5292,7 +5349,8 @@ def test_backup_volume_restore_with_access_mode(core_api, # NOQA size=str(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=2, accessMode=access_mode, - migratable=True if access_mode == "rwx" else False) + migratable=True if access_mode == "rwx" else False, + dataEngine=DATA_ENGINE) wait_for_volume_creation(client, test_volume_name) volume = wait_for_volume_detached(client, test_volume_name) volume.attach(hostId=common.get_self_host_id()) @@ -5306,7 +5364,8 @@ def test_backup_volume_restore_with_access_mode(core_api, # NOQA client.create_volume(name=volume_name_ori_access_mode, size=str(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=2, - fromBackup=b.url) + fromBackup=b.url, + dataEngine=DATA_ENGINE) volume_ori_access_mode = client.by_id_volume(volume_name_ori_access_mode) assert volume_ori_access_mode.accessMode == access_mode @@ -5316,11 +5375,13 @@ def test_backup_volume_restore_with_access_mode(core_api, # NOQA size=str(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=2, accessMode=overridden_restored_access_mode, - fromBackup=b.url) + fromBackup=b.url, + dataEngine=DATA_ENGINE) volume_sp_access_mode = client.by_id_volume(volume_name_sp_access_mode) assert volume_sp_access_mode.accessMode == overridden_restored_access_mode +@pytest.mark.v2_volume_test # NOQA def test_delete_backup_during_restoring_volume(set_random_backupstore, client): # NOQA """ Test delete backup during restoring volume @@ -5356,7 +5417,8 @@ def test_delete_backup_during_restoring_volume(set_random_backupstore, client): client.create_volume(name=vol_v2_name, size=str(512 * Mi), numberOfReplicas=3, - fromBackup=b.url) + fromBackup=b.url, + dataEngine=DATA_ENGINE) delete_backup(client, bv.name, b.name) volume = wait_for_volume_status(client, vol_v1_name, @@ -5406,7 +5468,8 @@ def test_filesystem_trim(client, fs_type): # NOQA client.create_volume(name=test_volume_name, size=str(DEFAULT_VOLUME_SIZE * Gi), numberOfReplicas=2, - unmapMarkSnapChainRemoved="enabled") + unmapMarkSnapChainRemoved="enabled", + dataEngine=DATA_ENGINE) wait_for_volume_creation(client, test_volume_name) volume = wait_for_volume_detached(client, test_volume_name) @@ -5647,6 +5710,7 @@ def test_filesystem_trim(client, fs_type): # NOQA wait_for_volume_delete(client, test_volume_name) +@pytest.mark.v2_volume_test # NOQA def test_backuptarget_invalid(apps_api, # NOQA client, # NOQA core_api, # NOQA @@ -5719,6 +5783,7 @@ def test_backuptarget_invalid(apps_api, # NOQA assert any(message in backup_targets[0]["message"] for message in BACKUP_TARGET_MESSAGES_INVALID) + @pytest.mark.volume_backup_restore # NOQA def test_volume_backup_and_restore_with_lz4_compression_method(client, set_random_backupstore, volume_name): # NOQA """ diff --git a/manager/integration/tests/test_csi.py b/manager/integration/tests/test_csi.py index 967c6fb75..58e474357 100644 --- a/manager/integration/tests/test_csi.py +++ b/manager/integration/tests/test_csi.py @@ -37,6 +37,7 @@ from common import fail_replica_expansion from common import get_volume_name, get_volume_dev_mb_data_md5sum # NOQA from common import exec_command_in_pod +from common import DATA_ENGINE from backupstore import set_random_backupstore # NOQA from kubernetes.stream import stream from kubernetes import client as k8sclient @@ -55,7 +56,8 @@ def create_pv_storage(api, cli, pv, claim, backing_image, from_backup): name=pv['metadata']['name'], size=pv['spec']['capacity']['storage'], numberOfReplicas=int(pv['spec']['csi']['volumeAttributes'] ['numberOfReplicas']), - backingImage=backing_image, fromBackup=from_backup) + backingImage=backing_image, fromBackup=from_backup, + dataEngine=DATA_ENGINE) if from_backup: common.wait_for_volume_restoration_completed(cli, pv['metadata']['name']) @@ -101,6 +103,7 @@ def create_and_wait_csi_pod_named_pv(pv_name, pod_name, client, core_api, csi_pv create_and_wait_pod(core_api, pod) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA @pytest.mark.csi # NOQA def test_csi_mount(client, core_api, csi_pv, pvc, pod_make): # NOQA @@ -141,6 +144,7 @@ def csi_mount_test(client, core_api, csi_pv, pvc, pod_make, # NOQA delete_and_wait_pv(core_api, csi_pv['metadata']['name']) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.csi # NOQA def test_csi_io(client, core_api, csi_pv, pvc, pod_make): # NOQA """ @@ -192,6 +196,7 @@ def csi_io_test(client, core_api, csi_pv, pvc, pod_make, backing_image=""): # N delete_and_wait_pv(core_api, pv_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.csi # NOQA def test_csi_backup(set_random_backupstore, client, core_api, csi_pv, pvc, pod_make): # NOQA """ @@ -246,6 +251,7 @@ def backupstore_test(client, core_api, csi_pv, pvc, pod_make, pod_name, vol_name client.delete(volume2) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.csi # NOQA def test_csi_block_volume(client, core_api, storage_class, pvc, pod_manifest): # NOQA """ @@ -636,6 +642,7 @@ def test_csi_mount_volume_online_expansion(client, core_api, storage_class, pvc, assert md5_after_expanding == md5_before_expanding +@pytest.mark.v2_volume_test # NOQA def test_xfs_pv(client, core_api, pod_manifest): # NOQA """ Test create PV with new XFS filesystem @@ -674,6 +681,7 @@ def test_xfs_pv(client, core_api, pod_manifest): # NOQA assert resp == test_data +@pytest.mark.v2_volume_test # NOQA def test_xfs_pv_existing_volume(client, core_api, pod_manifest): # NOQA """ Test create PV with existing XFS filesystem @@ -791,6 +799,7 @@ def test_csi_expansion_with_replica_failure(client, core_api, storage_class, pvc assert resp == test_data +@pytest.mark.v2_volume_test # NOQA @pytest.mark.coretest # NOQA def test_allow_volume_creation_with_degraded_availability_csi( client, core_api, apps_api, make_deployment_with_pvc): # NOQA @@ -913,6 +922,7 @@ def test_allow_volume_creation_with_degraded_availability_csi( data_path) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.csi # NOQA def test_csi_minimal_volume_size( client, core_api, csi_pv, pvc, pod_make): # NOQA diff --git a/manager/integration/tests/test_csi_snapshotter.py b/manager/integration/tests/test_csi_snapshotter.py index a7131f646..df0a3f54e 100644 --- a/manager/integration/tests/test_csi_snapshotter.py +++ b/manager/integration/tests/test_csi_snapshotter.py @@ -39,6 +39,9 @@ from common import BACKING_IMAGE_SOURCE_TYPE_FROM_VOLUME from common import create_backing_image_with_matching_url from common import SETTING_MIN_NUMBER_OF_BACKING_IMAGE_COPIES +from common import storage_class # NOQA +from common import create_storage_class +from common import DEFAULT_STORAGECLASS_NAME CSI_SNAPSHOT_TYPE_SNAP = "snap" CSI_SNAPSHOT_TYPE_BAK = "bak" @@ -357,7 +360,8 @@ def wait_for_volumesnapshot_ready(volumesnapshot_name, namespace, ready_to_use=T return v -def restore_csi_volume_snapshot(core_api, client, csivolsnap, pvc_name, pvc_request_storage_size, wait_for_restore=True): # NOQA +def restore_csi_volume_snapshot(core_api, client, csivolsnap, pvc_name, pvc_request_storage_size, storage_class, wait_for_restore=True): # NOQA + create_storage_class(storage_class) restore_pvc = { 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', @@ -373,7 +377,7 @@ def restore_csi_volume_snapshot(core_api, client, csivolsnap, pvc_name, pvc_requ 'storage': pvc_request_storage_size } }, - 'storageClassName': 'longhorn', + 'storageClassName': DEFAULT_STORAGECLASS_NAME, 'dataSource': { 'kind': 'VolumeSnapshot', 'apiGroup': 'snapshot.storage.k8s.io', @@ -408,6 +412,7 @@ def restore_csi_volume_snapshot(core_api, client, csivolsnap, pvc_name, pvc_requ return restore_pvc +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize("volsnapshotclass_delete_policy,backup_is_deleted", [("Delete", True), ("Retain", False)]) # NOQA def test_csi_volumesnapshot_basic(set_random_backupstore, # NOQA volumesnapshotclass, # NOQA @@ -420,6 +425,7 @@ def test_csi_volumesnapshot_basic(set_random_backupstore, # NOQA pod_make, # NOQA volsnapshotclass_delete_policy, # NOQA backup_is_deleted, + storage_class, # NOQA csi_snapshot_type=None): # NOQA """ Test creation / restoration / deletion of a backup via the csi snapshotter @@ -525,7 +531,8 @@ def csi_volumesnapshot_deletion_test(deletionPolicy='Delete|Retain'): client, csivolsnap, restore_pvc_name, - restore_pvc_size) + restore_pvc_size, + storage_class) restore_pod = pod_make() restore_pod_name = restore_pod["metadata"]["name"] @@ -545,6 +552,7 @@ def csi_volumesnapshot_deletion_test(deletionPolicy='Delete|Retain'): wait_for_backup_delete(client, volume_name, b["name"]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize("volsnapshotclass_delete_policy,backup_is_deleted", [("Delete", True), ("Retain", False)]) # NOQA def test_csi_volumesnapshot_restore_existing_backup(set_random_backupstore, # NOQA client, # NOQA @@ -557,6 +565,7 @@ def test_csi_volumesnapshot_restore_existing_backup(set_random_backupstore, # NO volumesnapshotcontent, volumesnapshot, # NOQA volsnapshotclass_delete_policy, # NOQA + storage_class, # NOQA backup_is_deleted): # NOQA """ Test retention of a backup while deleting the associated `VolumeSnapshot` @@ -625,7 +634,8 @@ def test_csi_volumesnapshot_restore_existing_backup(set_random_backupstore, # NO client, csivolsnap, restore_pvc_name, - restore_pvc_size) + restore_pvc_size, + storage_class) restore_pod = pod_make() restore_pod_name = restore_pod["metadata"]["name"] @@ -646,6 +656,7 @@ def test_csi_volumesnapshot_restore_existing_backup(set_random_backupstore, # NO wait_for_backup_delete(client, volume_name, b["name"]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize("volsnapshotclass_delete_policy,backup_is_deleted", [("Delete", True)]) # NOQA def test_csi_snapshot_with_bak_param(set_random_backupstore, # NOQA volumesnapshotclass, # NOQA @@ -657,7 +668,8 @@ def test_csi_snapshot_with_bak_param(set_random_backupstore, # NOQA pvc, # NOQA pod_make, # NOQA volsnapshotclass_delete_policy, # NOQA - backup_is_deleted): # NOQA + backup_is_deleted, # NOQA + storage_class): # NOQA """ Context: @@ -697,7 +709,8 @@ def test_csi_snapshot_with_bak_param(set_random_backupstore, # NOQA pvc, # NOQA pod_make, # NOQA volsnapshotclass_delete_policy, # NOQA - backup_is_deleted, # NOQA + backup_is_deleted, # NOQA, + storage_class, # NOQA csi_snapshot_type='bak') @@ -754,6 +767,7 @@ def prepare_test_csi_snapshot(apps_api, # NOQA return vol, deployment, csisnapclass, expected_md5sum +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize("csi_snapshot_type", [CSI_SNAPSHOT_TYPE_SNAP, CSI_SNAPSHOT_TYPE_BAK]) # NOQA def test_csi_snapshot_create_csi_snapshot(set_random_backupstore, # NOQA apps_api, # NOQA @@ -1006,6 +1020,7 @@ def test_csi_snapshot_snap_create_volume_from_snapshot(apps_api, # NOQA new_pvc2['metadata']['name'], "Pending") +@pytest.mark.v2_volume_test # NOQA def test_csi_snapshot_snap_delete_csi_snapshot_snapshot_exist(apps_api, # NOQA client, # NOQA make_deployment_with_pvc, # NOQA @@ -1102,6 +1117,7 @@ def test_csi_snapshot_snap_delete_csi_snapshot_snapshot_not_exist(apps_api, # NO wait_volumesnapshot_deleted(csivolsnap["metadata"]["name"], "default") +@pytest.mark.v2_volume_test # NOQA @pytest.mark.parametrize("csi_snapshot_type", [CSI_SNAPSHOT_TYPE_SNAP, CSI_SNAPSHOT_TYPE_BAK]) # NOQA def test_csi_snapshot_delete_csi_snapshot_volume_detached(set_random_backupstore, # NOQA apps_api, # NOQA @@ -1156,6 +1172,7 @@ def test_csi_snapshot_delete_csi_snapshot_volume_detached(set_random_backupstore can_be_deleted=True) +@pytest.mark.v2_volume_test # NOQA def test_csi_snapshot_with_invalid_param( volumesnapshotclass, # NOQA volumesnapshot, # NOQA @@ -1220,6 +1237,7 @@ def finalizer(): request.addfinalizer(finalizer) + def test_csi_volumesnapshot_backing_image_with_selectors(client, # NOQA core_api, # NOQA csi_pv, # NOQA diff --git a/manager/integration/tests/test_migration.py b/manager/integration/tests/test_migration.py index 1b9b812b7..c7b653fa9 100644 --- a/manager/integration/tests/test_migration.py +++ b/manager/integration/tests/test_migration.py @@ -15,10 +15,13 @@ from common import get_self_host_id, create_and_check_volume, create_backup from common import create_storage_class, get_volume_engine from common import create_rwx_volume_with_storageclass +from common import DATA_ENGINE from backupstore import set_random_backupstore # NOQA REPLICA_COUNT = 2 + +@pytest.mark.v2_volume_test @pytest.mark.coretest # NOQA @pytest.mark.migration # NOQA def test_migration_confirm(clients, volume_name): # NOQA @@ -68,6 +71,7 @@ def migration_confirm_test(clients, volume_name, backing_image=""): # NOQA wait_for_volume_delete(client, volume_name) +@pytest.mark.v2_volume_test @pytest.mark.coretest # NOQA @pytest.mark.migration # NOQA def test_migration_rollback(clients, volume_name): # NOQA @@ -117,6 +121,7 @@ def migration_rollback_test(clients, volume_name, backing_image=""): # NOQA wait_for_volume_delete(client, volume_name) +@pytest.mark.v2_volume_test @pytest.mark.coretest # NOQA @pytest.mark.migration # NOQA def test_migration_with_unscheduled_replica(clients, volume_name): # NOQA @@ -157,7 +162,8 @@ def test_migration_with_unscheduled_replica(clients, volume_name): # NOQA volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=3, backingImage="", - accessMode="rwx", migratable=True) + accessMode="rwx", migratable=True, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) attachment_id = common.generate_attachment_ticket_id() volume.attach(attachmentID=attachment_id, hostId=local_node) @@ -199,15 +205,20 @@ def test_migration_with_unscheduled_replica(clients, volume_name): # NOQA if r.name not in old_replicas: new_replicas.append(r.name) - assert len(old_replicas) == len(new_replicas) + if DATA_ENGINE == "v1": + assert len(old_replicas) == len(new_replicas) # Step 9 volume.detach(attachmentID=attachment_id_1) # Step 10 + if DATA_ENGINE == "v1": + replica_cnt = len(new_replicas) + elif DATA_ENGINE == "v2": + replica_cnt = len(old_replicas) volume = common.wait_for_volume_migration_node( client, volume_name, local_node, - expected_replica_count=len(new_replicas) + expected_replica_count=replica_cnt ) # Step 11 @@ -295,6 +306,7 @@ def finalizer(): check_detached_volume_data(client, volume_name, data) +@pytest.mark.v2_volume_test @pytest.mark.coretest # NOQA @pytest.mark.migration # NOQA def test_migration_with_rebuilding_replica(clients, volume_name): # NOQA @@ -326,7 +338,8 @@ def test_migration_with_rebuilding_replica(clients, volume_name): # NOQA volume = client.create_volume(name=volume_name, size=str(2 * Gi), numberOfReplicas=3, backingImage="", - accessMode="rwx", migratable=True) + accessMode="rwx", migratable=True, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) attachment_id_1 = common.generate_attachment_ticket_id() volume.attach(attachmentID=attachment_id_1, hostId=current_host, @@ -357,7 +370,8 @@ def test_migration_with_rebuilding_replica(clients, volume_name): # NOQA # Step 5 volume = common.wait_for_volume_migration_ready(client, volume_name) new_replicas = volume.replicas - assert len(old_replicas) == (len(new_replicas) - len(old_replicas)) + if DATA_ENGINE == "v1": + assert len(old_replicas) == (len(new_replicas) - len(old_replicas)) # Step 6 volume.detach(attachmentID=attachment_id_1) @@ -508,7 +522,8 @@ def setup_migration_test(clients, volume_name, backing_image="", replica_cnt=REP volume = client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=replica_cnt, backingImage=backing_image, - accessMode="rwx", migratable=True) + accessMode="rwx", migratable=True, + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) attachment_id = common.generate_attachment_ticket_id() volume.attach(attachmentID=attachment_id, hostId=common.get_self_host_id()) diff --git a/manager/integration/tests/test_recurring_job.py b/manager/integration/tests/test_recurring_job.py index b26f8eef1..676b5cc52 100644 --- a/manager/integration/tests/test_recurring_job.py +++ b/manager/integration/tests/test_recurring_job.py @@ -81,6 +81,7 @@ from common import SIZE, Mi, Gi from common import SETTING_RESTORE_RECURRING_JOBS from common import VOLUME_HEAD_NAME +from common import DATA_ENGINE RECURRING_JOB_LABEL = "RecurringJob" @@ -154,6 +155,7 @@ def wait_for_recurring_backup_to_start(client, core_api, volume_name, expected_s return snapshot_name +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job(set_random_backupstore, client, volume_name): # NOQA """ @@ -230,7 +232,8 @@ def test_recurring_job(set_random_backupstore, client, volume_name): # NOQA check_recurring_jobs(client, recurring_jobs) volume = client.create_volume(name=volume_name, size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume = volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) @@ -284,6 +287,7 @@ def test_recurring_job(set_random_backupstore, client, volume_name): # NOQA f"backupStatus = {client.by_id_volume(volume_name).backupStatus}" +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_in_volume_creation(client, volume_name): # NOQA """ @@ -322,7 +326,8 @@ def test_recurring_job_in_volume_creation(client, volume_name): # NOQA check_recurring_jobs(client, recurring_jobs) client.create_volume(name=volume_name, size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) @@ -346,6 +351,7 @@ def test_recurring_job_in_volume_creation(client, volume_name): # NOQA wait_for_snapshot_count(volume, 4) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_duplicated(client): # NOQA """ @@ -373,6 +379,7 @@ def test_recurring_job_duplicated(client): # NOQA assert "already exists" in str(e.value) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_in_storageclass(set_random_backupstore, client, core_api, storage_class, statefulset): # NOQA """ @@ -447,6 +454,7 @@ def test_recurring_job_in_storageclass(set_random_backupstore, client, core_api, wait_for_snapshot_count(volume, 4) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_labels(set_random_backupstore, client, random_labels, volume_name): # NOQA """ @@ -483,7 +491,8 @@ def recurring_job_labels_test(client, labels, volume_name, size=SIZE, backing_im check_recurring_jobs(client, recurring_jobs) client.create_volume(name=volume_name, size=size, - numberOfReplicas=2, backingImage=backing_image) + numberOfReplicas=2, backingImage=backing_image, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) @@ -517,6 +526,7 @@ def recurring_job_labels_test(client, labels, volume_name, size=SIZE, backing_im wait_for_backup_volume(client, volume_name, backing_image) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.csi # NOQA @pytest.mark.recurring_job def test_recurring_job_kubernetes_status(set_random_backupstore, client, core_api, volume_name): # NOQA @@ -535,7 +545,8 @@ def test_recurring_job_kubernetes_status(set_random_backupstore, client, core_ap volume have 1 backup. And backup have the Kubernetes Status labels. """ - client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) + client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name @@ -595,6 +606,7 @@ def test_recurring_job_kubernetes_status(set_random_backupstore, client, core_ap assert len(b.labels) == 3 +@pytest.mark.v2_volume_test # NOQA def test_recurring_jobs_maximum_retain(client, core_api, volume_name): # NOQA """ Scenario: test recurring jobs' maximum retain @@ -640,6 +652,7 @@ def test_recurring_jobs_maximum_retain(client, core_api, volume_name): # NOQA assert validator_error.upper() in str(e.value).upper() +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_detached_volume(client, batch_v1_api, volume_name): # NOQA """ @@ -661,7 +674,8 @@ def test_recurring_job_detached_volume(client, batch_v1_api, volume_name): # NO When wait for 2 minute. Then then volume should have only 2 snapshots. """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) self_host = get_self_host_id() @@ -694,6 +708,7 @@ def test_recurring_job_detached_volume(client, batch_v1_api, volume_name): # NO wait_for_snapshot_count(volume, 2) +@pytest.mark.v2_volume_test # NOQA def test_recurring_jobs_allow_detached_volume(set_random_backupstore, client, core_api, apps_api, volume_name, make_deployment_with_pvc): # NOQA """ Scenario: test recurring jobs for detached volume with @@ -824,6 +839,7 @@ def test_recurring_jobs_allow_detached_volume(set_random_backupstore, client, co common.wait_for_pod_phase(core_api, pod_names[0], pod_phase="Running") +@pytest.mark.v2_volume_test # NOQA def test_recurring_jobs_when_volume_detached_unexpectedly(set_random_backupstore, client, core_api, apps_api, volume_name, make_deployment_with_pvc): # NOQA """ Scenario: test recurring jobs when volume detached unexpectedly @@ -973,6 +989,7 @@ def test_recurring_jobs_on_nodes_with_taints(): # NOQA pass +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_groups(set_random_backupstore, client, batch_v1_api): # NOQA """ @@ -1003,8 +1020,10 @@ def test_recurring_job_groups(set_random_backupstore, client, batch_v1_api): # """ volume1_name = "test-job-1" volume2_name = "test-job-2" - client.create_volume(name=volume1_name, size=SIZE) - client.create_volume(name=volume2_name, size=SIZE) + client.create_volume(name=volume1_name, size=SIZE, + dataEngine=DATA_ENGINE) + client.create_volume(name=volume2_name, size=SIZE, + dataEngine=DATA_ENGINE) volume1 = wait_for_volume_detached(client, volume1_name) volume2 = wait_for_volume_detached(client, volume2_name) @@ -1061,6 +1080,7 @@ def test_recurring_job_groups(set_random_backupstore, client, batch_v1_api): # assert not backup_created +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_default(client, batch_v1_api, volume_name): # NOQA """ @@ -1083,7 +1103,8 @@ def test_recurring_job_default(client, batch_v1_api, volume_name): # NOQA Then volume should not have `snapshot` job in job label. volume should have `default` group in job label. """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) @@ -1104,6 +1125,7 @@ def test_recurring_job_default(client, batch_v1_api, volume_name): # NOQA jobs=[], groups=[DEFAULT]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_delete(client, batch_v1_api, volume_name): # NOQA """ @@ -1144,7 +1166,8 @@ def test_recurring_job_delete(client, batch_v1_api, volume_name): # NOQA default `backup2` cron job should not exist. `backup3` cron job should not exist. """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name) @@ -1250,6 +1273,7 @@ def test_recurring_job_delete(client, batch_v1_api, volume_name): # NOQA wait_for_cron_job_delete(batch_v1_api, JOB_LABEL+"="+back3) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_delete_should_remove_volume_label(client, batch_v1_api, volume_name): # NOQA """ @@ -1296,7 +1320,8 @@ def test_recurring_job_delete_should_remove_volume_label(client, batch_v1_api, v When delete `back2` recurring job. Then should not remove `default` job-group in volume. """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) snap1 = SNAPSHOT + "1" @@ -1396,6 +1421,7 @@ def test_recurring_job_delete_should_remove_volume_label(client, batch_v1_api, v jobs=[], groups=[DEFAULT]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_volume_label_when_job_and_group_use_same_name(client, volume_name): # NOQA """ @@ -1426,9 +1452,12 @@ def test_recurring_job_volume_label_when_job_and_group_use_same_name(client, vol volume1_name = volume_name + "-1" volume2_name = volume_name + "-2" volume3_name = volume_name + "-3" - client.create_volume(name=volume1_name, size=SIZE) - client.create_volume(name=volume2_name, size=SIZE) - client.create_volume(name=volume3_name, size=SIZE) + client.create_volume(name=volume1_name, size=SIZE, + dataEngine=DATA_ENGINE) + client.create_volume(name=volume2_name, size=SIZE, + dataEngine=DATA_ENGINE) + client.create_volume(name=volume3_name, size=SIZE, + dataEngine=DATA_ENGINE) volume1 = wait_for_volume_detached(client, volume1_name) volume2 = wait_for_volume_detached(client, volume2_name) volume3 = wait_for_volume_detached(client, volume3_name) @@ -1477,6 +1506,7 @@ def test_recurring_job_volume_label_when_job_and_group_use_same_name(client, vol jobs=[], groups=[DEFAULT]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_multiple_volumes(set_random_backupstore, client, batch_v1_api): # NOQA """ @@ -1510,7 +1540,8 @@ def test_recurring_job_multiple_volumes(set_random_backupstore, client, batch_v1 1 backup exist in `test-job-1` volume. """ volume1_name = "test-job-1" - client.create_volume(name=volume1_name, size=SIZE) + client.create_volume(name=volume1_name, size=SIZE, + dataEngine=DATA_ENGINE) volume1 = wait_for_volume_detached(client, volume1_name) volume1.attach(hostId=get_self_host_id()) volume1 = wait_for_volume_healthy(client, volume1_name) @@ -1547,7 +1578,8 @@ def test_recurring_job_multiple_volumes(set_random_backupstore, client, batch_v1 wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1) volume2_name = "test-job-2" - client.create_volume(name=volume2_name, size=SIZE) + client.create_volume(name=volume2_name, size=SIZE, + dataEngine=DATA_ENGINE) volume2 = wait_for_volume_detached(client, volume2_name) volume2.attach(hostId=get_self_host_id()) volume2 = wait_for_volume_healthy(client, volume2_name) @@ -1570,6 +1602,7 @@ def test_recurring_job_multiple_volumes(set_random_backupstore, client, batch_v1 wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_snapshot(client, batch_v1_api): # NOQA """ @@ -1597,8 +1630,10 @@ def test_recurring_job_snapshot(client, batch_v1_api): # NOQA """ volume1_name = "test-job-1" volume2_name = "test-job-2" - client.create_volume(name=volume1_name, size=SIZE) - client.create_volume(name=volume2_name, size=SIZE) + client.create_volume(name=volume1_name, size=SIZE, + dataEngine=DATA_ENGINE) + client.create_volume(name=volume2_name, size=SIZE, + dataEngine=DATA_ENGINE) volume1 = wait_for_volume_detached(client, volume1_name) volume2 = wait_for_volume_detached(client, volume2_name) @@ -1643,6 +1678,7 @@ def test_recurring_job_snapshot(client, batch_v1_api): # NOQA wait_for_snapshot_count(volume2, 3) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_snapshot_delete(set_random_backupstore, client, batch_v1_api, volume_name): # NOQA """ @@ -1678,7 +1714,8 @@ def test_recurring_job_snapshot_delete(set_random_backupstore, client, batch_v1_ - 3 snapshots retained - 1 volume-head """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) self_host = get_self_host_id() @@ -1771,7 +1808,8 @@ def test_recurring_job_snapshot_delete_retain_0(set_random_backupstore, client, - 0 snapshot retained - 1 volume-head """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) self_host = get_self_host_id() @@ -1839,7 +1877,8 @@ def test_recurring_job_snapshot_cleanup(set_random_backupstore, client, batch_v1 - 1 user-created - 1 volume-head """ - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) self_host = get_self_host_id() @@ -1901,6 +1940,7 @@ def test_recurring_job_snapshot_cleanup(set_random_backupstore, client, batch_v1 assert system_created_count == 0 +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_backup(set_random_backupstore, client, batch_v1_api): # NOQA """ @@ -1926,8 +1966,10 @@ def test_recurring_job_backup(set_random_backupstore, client, batch_v1_api): # """ volume1_name = "test-job-1" volume2_name = "test-job-2" - client.create_volume(name=volume1_name, size=SIZE) - client.create_volume(name=volume2_name, size=SIZE) + client.create_volume(name=volume1_name, size=SIZE, + dataEngine=DATA_ENGINE) + client.create_volume(name=volume2_name, size=SIZE, + dataEngine=DATA_ENGINE) volume1 = wait_for_volume_detached(client, volume1_name) volume2 = wait_for_volume_detached(client, volume2_name) @@ -1966,6 +2008,7 @@ def test_recurring_job_backup(set_random_backupstore, client, batch_v1_api): # wait_for_backup_count(client.by_id_backupVolume(volume2_name), 2) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA def test_recurring_job_restored_from_backup_target(set_random_backupstore, client, batch_v1_api): # NOQA """ @@ -2035,7 +2078,8 @@ def test_recurring_job_restored_from_backup_target(set_random_backupstore, clien check_recurring_jobs(client, recurring_jobs) volume = client.create_volume(name=volume_name1, size=SIZE, - numberOfReplicas=2) + numberOfReplicas=2, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name1) volume = volume.attach(hostId=get_self_host_id()) volume = wait_for_volume_healthy(client, volume_name1) @@ -2069,7 +2113,8 @@ def test_recurring_job_restored_from_backup_target(set_random_backupstore, clien _, backup = find_backup(client, volume_name1, restore_snapshot_name) client.create_volume(name=rvolume_name1, size=SIZE, - fromBackup=backup.url) + fromBackup=backup.url, + dataEngine=DATA_ENGINE) rvolume1 = wait_for_volume_detached(client, rvolume_name1) wait_for_volume_recurring_job_update(rvolume1, jobs=[snap1, back1], @@ -2079,13 +2124,15 @@ def test_recurring_job_restored_from_backup_target(set_random_backupstore, clien cleanup_all_recurring_jobs(client) client.create_volume(name=rvolume_name2, size=SIZE, - fromBackup=backup.url) + fromBackup=backup.url, + dataEngine=DATA_ENGINE) rvolume2 = wait_for_volume_detached(client, rvolume_name2) wait_for_volume_recurring_job_update(rvolume2, jobs=[snap1, back1], groups=[DEFAULT, group1]) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job # NOQA @pytest.mark.parametrize("access_mode", [ACCESS_MODE_RWO, ACCESS_MODE_RWX]) # NOQA def test_recurring_job_filesystem_trim(client, core_api, batch_v1_api, volume_name, csi_pv, pvc, pod_make, access_mode): # NOQA @@ -2163,6 +2210,7 @@ def test_recurring_job_filesystem_trim(client, core_api, batch_v1_api, volume_na assert size_trimmed == test_size +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job def test_recurring_job_label_on_pvc(client, core_api, volume_name): # NOQA """ @@ -2273,6 +2321,7 @@ def test_recurring_job_label_on_pvc(client, core_api, volume_name): # NOQA assert unexpected_count == 0 +@pytest.mark.v2_volume_test # NOQA @pytest.mark.recurring_job def test_recurring_job_source_label(client, core_api, volume_name): # NOQA """ diff --git a/manager/integration/tests/test_rwx.py b/manager/integration/tests/test_rwx.py index 4f4705dad..961cc9466 100644 --- a/manager/integration/tests/test_rwx.py +++ b/manager/integration/tests/test_rwx.py @@ -24,6 +24,7 @@ from common import wait_deployment_replica_ready, wait_for_volume_healthy from common import crypto_secret, storage_class # NOQA from common import create_crypto_secret, create_storage_class +from common import DATA_ENGINE from backupstore import set_random_backupstore # NOQA from multiprocessing import Pool @@ -38,7 +39,8 @@ def write_data_into_pod(pod_name_and_data_path): DATA_SIZE_IN_MB_3) -def test_rwx_with_statefulset_multi_pods(core_api, statefulset): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_with_statefulset_multi_pods(core_api, statefulset, storage_class): # NOQA """ Test creation of share manager pod and rwx volumes from 2 pods. @@ -51,7 +53,7 @@ def test_rwx_with_statefulset_multi_pods(core_api, statefulset): # NOQA 4. Write data in both pods and compute md5sum. 5. Compare md5sum of the data with the data written the share manager. """ - + create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-multi-pods-test' share_manager_name = [] volumes_name = [] @@ -62,7 +64,7 @@ def test_rwx_with_statefulset_multi_pods(core_api, statefulset): # NOQA statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ - = 'longhorn' + = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] @@ -104,7 +106,8 @@ def test_rwx_with_statefulset_multi_pods(core_api, statefulset): # NOQA assert pod_data == md5sum_pod[i] -def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset, pod): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset, pod, storage_class): # NOQA """ Test writing of data into a volume from multiple pods using same PVC @@ -118,12 +121,13 @@ def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset, pod): 7. Write data all three pods and compute md5sum. 8. Check the data md5sum in the share manager pod. """ + create_storage_class(storage_class) pvc_name = 'pvc-multi-pods-test' statefulset_name = 'statefulset-rwx-same-pvc-test' pod_name = 'pod-rwx-same-pvc-test' pvc['metadata']['name'] = pvc_name - pvc['spec']['storageClassName'] = 'longhorn' + pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( @@ -167,7 +171,8 @@ def test_rwx_multi_statefulset_with_same_pvc(core_api, pvc, statefulset, pod): core_api, command2, share_manager_name, LONGHORN_NAMESPACE) -def test_rwx_parallel_writing(core_api, statefulset, pod): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_parallel_writing(core_api, statefulset, pod, storage_class): # NOQA """ Test parallel writing of data @@ -182,7 +187,7 @@ def test_rwx_parallel_writing(core_api, statefulset, pod): # NOQA 6. Compute md5sum. 7. Check the data md5sum in share manager pod volume """ - + create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-parallel-writing-test' statefulset['metadata']['name'] = \ @@ -192,7 +197,7 @@ def test_rwx_parallel_writing(core_api, statefulset, pod): # NOQA statefulset_name statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ - = 'longhorn' + = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] @@ -232,7 +237,8 @@ def test_rwx_parallel_writing(core_api, statefulset, pod): # NOQA assert md5sum2 == share_manager_data2 -def test_rwx_statefulset_scale_down_up(core_api, statefulset): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_statefulset_scale_down_up(core_api, statefulset, storage_class): # NOQA """ Test Scaling up and down of pods attached to rwx volume. @@ -248,7 +254,7 @@ def test_rwx_statefulset_scale_down_up(core_api, statefulset): # NOQA 7. Wait for new pods to come up. 8. Check the data md5sum in new pods. """ - + create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-scale-down-up-test' share_manager_name = [] @@ -258,7 +264,7 @@ def test_rwx_statefulset_scale_down_up(core_api, statefulset): # NOQA statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ - = 'longhorn' + = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] @@ -339,7 +345,8 @@ def test_rwx_statefulset_scale_down_up(core_api, statefulset): # NOQA assert pod_data == md5sum_pod[i] -def test_rwx_delete_share_manager_pod(core_api, statefulset): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_delete_share_manager_pod(core_api, statefulset, storage_class): # NOQA """ Test moving of Share manager pod from one node to another. @@ -354,7 +361,7 @@ def test_rwx_delete_share_manager_pod(core_api, statefulset): # NOQA 7. Write more data to it and compute md5sum. 8. Check the data md5sum in share manager volume. """ - + create_storage_class(storage_class) statefulset_name = 'statefulset-delete-share-manager-pods-test' statefulset['metadata']['name'] = \ @@ -364,7 +371,7 @@ def test_rwx_delete_share_manager_pod(core_api, statefulset): # NOQA statefulset_name statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ - = 'longhorn' + = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] @@ -399,7 +406,8 @@ def test_rwx_delete_share_manager_pod(core_api, statefulset): # NOQA assert test_data_2 == share_manager_data_2 -def test_rwx_deployment_with_multi_pods(core_api, pvc, make_deployment_with_pvc): # NOQA +@pytest.mark.v2_volume_test # NOQA +def test_rwx_deployment_with_multi_pods(core_api, pvc, make_deployment_with_pvc, storage_class): # NOQA """ Test deployment of 2 pods with same PVC. @@ -411,10 +419,10 @@ def test_rwx_deployment_with_multi_pods(core_api, pvc, make_deployment_with_pvc) 5. Write data in both pods and compute md5sum. 6. Check the data md5sum in the share manager pod. """ - + create_storage_class(storage_class) pvc_name = 'pvc-deployment-multi-pods-test' pvc['metadata']['name'] = pvc_name - pvc['spec']['storageClassName'] = 'longhorn' + pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( @@ -461,6 +469,7 @@ def test_rwx_deployment_with_multi_pods(core_api, pvc, make_deployment_with_pvc) assert test_data_2 == share_manager_data_2 +@pytest.mark.v2_volume_test # NOQA def test_restore_rwo_volume_to_rwx(set_random_backupstore, client, core_api, volume_name, pvc, csi_pv, pod_make, make_deployment_with_pvc): # NOQA """ Test restoring a rwo to a rwx volume. @@ -496,7 +505,8 @@ def test_restore_rwo_volume_to_rwx(set_random_backupstore, client, core_api, vol client.create_volume(name=restore_volume_name, size=str(1 * Gi), numberOfReplicas=3, fromBackup=b1.url, - accessMode='rwx') + accessMode='rwx', + dataEngine=DATA_ENGINE) wait_for_volume_creation(client, restore_volume_name) restore_volume = wait_for_volume_detached(client, restore_volume_name) create_pv_for_volume(client, core_api, restore_volume, restore_pv_name) @@ -551,7 +561,7 @@ def test_rwx_online_expansion(): # NOQA pass -def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc): # NOQA +def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc, storage_class): # NOQA """ Related issue : https://github.com/longhorn/longhorn/issues/2181 @@ -579,9 +589,10 @@ def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc): And - 1.5 Gi of data is successfully written to the expanded volume """ + create_storage_class(storage_class) pvc_name = 'pvc-deployment-rwx-expand-test' pvc['metadata']['name'] = pvc_name - pvc['spec']['storageClassName'] = 'longhorn' + pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] pvc['spec']['resources']['requests']['storage'] = str(1 * Gi) @@ -641,6 +652,7 @@ def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc): assert int(data_size_in_pod)/1024/1024 == data_size_in_mb +@pytest.mark.v2_volume_test # NOQA def test_encrypted_rwx_volume(core_api, statefulset, storage_class, crypto_secret, pvc, make_deployment_with_pvc): # NOQA """ Test creating encrypted rwx volume and use the secret in @@ -686,6 +698,7 @@ def test_encrypted_rwx_volume(core_api, statefulset, storage_class, crypto_secre delete_and_wait_pvc(core_api, pvc_name) +@pytest.mark.v2_volume_test # NOQA def test_rwx_volume_mount_options(core_api, storage_class, pvc, make_deployment_with_pvc): # NOQA """ Test creating rwx volume with custom mount options diff --git a/manager/integration/tests/test_scheduling.py b/manager/integration/tests/test_scheduling.py index a676d38a3..fd0636905 100644 --- a/manager/integration/tests/test_scheduling.py +++ b/manager/integration/tests/test_scheduling.py @@ -89,6 +89,7 @@ from common import SETTING_REPLICA_ZONE_SOFT_ANTI_AFFINITY from common import SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY from common import SETTING_ALLOW_EMPTY_DISK_SELECTOR_VOLUME +from common import DATA_ENGINE from time import sleep @@ -146,6 +147,7 @@ def wait_new_replica_ready(client, volume_name, replica_names): # NOQA assert new_replica_ready +@pytest.mark.v2_volume_test # NOQA def test_soft_anti_affinity_scheduling(client, volume_name): # NOQA """ Test that volumes with Soft Anti-Affinity work as expected. @@ -185,6 +187,7 @@ def test_soft_anti_affinity_scheduling(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_soft_anti_affinity_detach(client, volume_name): # NOQA """ Test that volumes with Soft Anti-Affinity can detach and reattach to a @@ -230,6 +233,7 @@ def test_soft_anti_affinity_detach(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_hard_anti_affinity_scheduling(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity work as expected. @@ -279,6 +283,7 @@ def test_hard_anti_affinity_scheduling(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_hard_anti_affinity_detach(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity are still able to detach and @@ -332,6 +337,7 @@ def test_hard_anti_affinity_detach(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_hard_anti_affinity_live_rebuild(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity can build new replicas live once @@ -380,6 +386,7 @@ def test_hard_anti_affinity_live_rebuild(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_hard_anti_affinity_offline_rebuild(client, volume_name): # NOQA """ Test that volumes with Hard Anti-Affinity can build new replicas during @@ -430,6 +437,7 @@ def test_hard_anti_affinity_offline_rebuild(client, volume_name): # NOQA cleanup_volume(client, volume) +@pytest.mark.v2_volume_test # NOQA def test_replica_rebuild_per_volume_limit(client, core_api, storage_class, sts_name, statefulset): # NOQA """ Test the volume always only have one replica scheduled for rebuild @@ -477,6 +485,7 @@ def test_replica_rebuild_per_volume_limit(client, core_api, storage_class, sts_n assert md5sum == common.get_pod_data_md5sum(core_api, pod_name, data_path) +@pytest.mark.v2_volume_test # NOQA def test_replica_auto_balance_node_least_effort(client, volume_name): # NOQA """ Scenario: replica auto-balance nodes with `least_effort`. @@ -608,6 +617,7 @@ def test_replica_auto_balance_node_least_effort(client, volume_name): # NOQA check_volume_data(volume, data) +@pytest.mark.v2_volume_test # NOQA def test_replica_auto_balance_node_best_effort(client, volume_name): # NOQA """ Scenario: replica auto-balance nodes with `best_effort`. @@ -1094,7 +1104,8 @@ def replica_auto_balance_with_data_locality_test(client, volume_name): # NOQA volume = client.create_volume(name=volume_name, size=str(200 * Mi), numberOfReplicas=number_of_replicas, - dataLocality="best-effort") + dataLocality="best-effort", + dataEngine=DATA_ENGINE) volume = common.wait_for_volume_detached(client, volume_name) volume.attach(hostId=self_node) @@ -1278,6 +1289,7 @@ def test_replica_auto_balance_disabled_volume_spec_enabled(client, volume_name): check_volume_data(v2, d2) +@pytest.mark.v2_volume_test # NOQA def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset): # NOQA """ Test data locality basic feature @@ -1512,7 +1524,8 @@ def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset) size=volume2_size, numberOfReplicas=3, nodeSelector=["AVAIL"], - dataLocality="best-effort") + dataLocality="best-effort", + dataEngine=DATA_ENGINE) volume2 = wait_for_volume_detached(client, volume2_name) volume2 = client.by_id_volume(volume2_name) @@ -1584,7 +1597,8 @@ def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset) volume3 = client.create_volume(name=volume3_name, size=volume3_size, - numberOfReplicas=1) + numberOfReplicas=1, + dataEngine=DATA_ENGINE) volume3 = wait_for_volume_detached(client, volume3_name) volume3 = client.by_id_volume(volume3_name) @@ -1670,7 +1684,8 @@ def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset) volume4 = client.create_volume(name=volume4_name, size=volume4_size, numberOfReplicas=1, - dataLocality="best-effort") + dataLocality="best-effort", + dataEngine=DATA_ENGINE) volume4 = wait_for_volume_detached(client, volume4_name) volume4 = client.by_id_volume(volume4_name) @@ -1785,6 +1800,7 @@ def test_data_locality_basic(client, core_api, volume_name, pod, settings_reset) assert v4_node2_replica_count == 1 assert v4_node3_replica_count == 0 + def test_replica_schedule_to_disk_with_most_usable_storage(client, volume_name, request): # NOQA """ Scenario : test replica schedule to disk with the most usable storage @@ -1885,7 +1901,8 @@ def test_soft_anti_affinity_scheduling_volume_enable(client, volume_name): # NOQ backingImage="", frontend=VOLUME_FRONTEND_BLOCKDEV, snapshotDataIntegrity=SNAPSHOT_DATA_INTEGRITY_IGNORED, - replicaSoftAntiAffinity="enabled") + replicaSoftAntiAffinity="enabled", + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) @@ -1937,7 +1954,8 @@ def test_soft_anti_affinity_scheduling_volume_disable(client, volume_name): # NO backingImage="", frontend=VOLUME_FRONTEND_BLOCKDEV, snapshotDataIntegrity=SNAPSHOT_DATA_INTEGRITY_IGNORED, - replicaSoftAntiAffinity="disabled") + replicaSoftAntiAffinity="disabled", + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=host_id) @@ -1967,6 +1985,7 @@ def test_soft_anti_affinity_scheduling_volume_disable(client, volume_name): # NO check_volume_data(volume, data) +@pytest.mark.v2_volume_test # NOQA def test_data_locality_strict_local_node_affinity(client, core_api, apps_api, storage_class, statefulset, request): # NOQA """ Scenario: data-locality (strict-local) should schedule Pod to the same node @@ -2037,6 +2056,7 @@ def finalizer(): wait_for_statefulset_pods_healthy(statefulset) +@pytest.mark.v2_volume_test # NOQA def test_allow_empty_node_selector_volume_setting(client, volume_name): # NOQA """ Test the global setting allow-empty-node-selector-volume @@ -2075,7 +2095,8 @@ def test_allow_empty_node_selector_volume_setting(client, volume_name): # NOQA update_setting(client, SETTING_ALLOW_EMPTY_NODE_SELECTOR_VOLUME, "false") # Check volume can not be scehduled - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume = client.by_id_volume(volume.name) @@ -2148,6 +2169,7 @@ def finalizer(): return disk_path1, disk_path2 + def test_global_disk_soft_anti_affinity(client, volume_name, request): # NOQA """ 1. When Replica Disk Soft Anti-Affinity is false, it should be impossible @@ -2205,7 +2227,8 @@ def test_global_disk_soft_anti_affinity(client, volume_name, request): # NOQA update_setting(client, SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY, "false") lht_hostId = get_self_host_id() - client.create_volume(name=volume_name, size=str(500*Mi)) + client.create_volume(name=volume_name, size=str(500*Mi), + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume.attach(hostId=lht_hostId) volume = wait_for_volume_degraded(client, volume_name) @@ -2252,7 +2275,7 @@ def test_global_disk_soft_anti_affinity(client, volume_name, request): # NOQA assert replica.diskID not in disk_id disk_id.append(replica.diskID) - +@pytest.mark.v2_volume_test # NOQA def test_allow_empty_disk_selector_volume_setting(client, volume_name): # NOQA """ Test the global setting allow-empty-disk-selector-volume @@ -2292,7 +2315,8 @@ def test_allow_empty_disk_selector_volume_setting(client, volume_name): # NOQA update_setting(client, SETTING_ALLOW_EMPTY_DISK_SELECTOR_VOLUME, "false") # Check volume can not be scehduled - client.create_volume(name=volume_name, size=SIZE) + client.create_volume(name=volume_name, size=SIZE, + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) volume = client.by_id_volume(volume.name) @@ -2374,7 +2398,8 @@ def test_volume_disk_soft_anti_affinity(client, volume_name, request): # NOQA lht_hostId = get_self_host_id() client.create_volume(name=volume_name, size=str(500*Mi), - replicaDiskSoftAntiAffinity="disabled") + replicaDiskSoftAntiAffinity="disabled", + dataEngine=DATA_ENGINE) volume = wait_for_volume_detached(client, volume_name) assert volume.replicaDiskSoftAntiAffinity == "disabled" diff --git a/manager/integration/tests/test_system_backup_restore.py b/manager/integration/tests/test_system_backup_restore.py index 9703ed4b5..1299363ad 100644 --- a/manager/integration/tests/test_system_backup_restore.py +++ b/manager/integration/tests/test_system_backup_restore.py @@ -35,6 +35,7 @@ from common import SETTING_BACKUPSTORE_POLL_INTERVAL from common import SIZE +from common import DATA_ENGINE from backupstore import set_random_backupstore # NOQA @@ -44,6 +45,7 @@ IF_NOT_PRESENT = "if-not-present" +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_and_restore(client, set_random_backupstore): # NOQA """ @@ -71,6 +73,7 @@ def test_system_backup_and_restore(client, set_random_backupstore): # NOQA system_restore_wait_for_state("Completed", system_restore_name, client) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_and_restore_volume_with_data(client, volume_name, set_random_backupstore): # NOQA """ @@ -204,6 +207,7 @@ def test_system_backup_and_restore_volume_with_backingimage(client, core_api, vo restored_volume = wait_for_volume_healthy(client, volume_name) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_with_volume_backup_policy_if_not_present(client, volume_name, set_random_backupstore): # NOQA """ @@ -260,6 +264,7 @@ def create_system_backup_and_assert_volume_backup_count(count): create_system_backup_and_assert_volume_backup_count(2) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_with_volume_backup_policy_always(client, volume_name, set_random_backupstore): # NOQA """ @@ -296,7 +301,8 @@ def test_system_backup_with_volume_backup_policy_always(client, volume_name, set dr_volume_name = volume_name + "-dr" client.create_volume(name=dr_volume_name, size=SIZE, numberOfReplicas=1, fromBackup=backup.url, - frontend="", standby=True) + frontend="", standby=True, + dataEngine=DATA_ENGINE) wait_for_backup_restore_completed(client, dr_volume_name, backup.name) system_backup_name = system_backup_random_name() @@ -319,6 +325,7 @@ def test_system_backup_with_volume_backup_policy_always(client, volume_name, set wait_for_backup_count(backup_volume, 3) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_with_volume_backup_policy_disabled(client, volume_name, set_random_backupstore): # NOQA """ @@ -350,6 +357,7 @@ def test_system_backup_with_volume_backup_policy_disabled(client, volume_name, s wait_for_backup_count(backup_volume, 0) +@pytest.mark.v2_volume_test # NOQA @pytest.mark.system_backup_restore # NOQA def test_system_backup_delete_when_other_system_backup_using_name_as_prefix(client, set_random_backupstore): # NOQA """ diff --git a/test_framework/Jenkinsfile b/test_framework/Jenkinsfile index 9d8d6be98..16b8cb48f 100644 --- a/test_framework/Jenkinsfile +++ b/test_framework/Jenkinsfile @@ -117,6 +117,8 @@ node { --env LONGHORN_TRANSIENT_VERSION=${LONGHORN_TRANSIENT_VERSION} \ --env LONGHORN_TEST_CLOUDPROVIDER=${LONGHORN_TEST_CLOUDPROVIDER} \ --env LONGHORN_UPGRADE_TEST=${LONGHORN_UPGRADE_TEST} \ + --env TF_VAR_extra_block_device=${RUN_V2_TEST} \ + --env RUN_V2_TEST=${RUN_V2_TEST} \ --env PYTEST_CUSTOM_OPTIONS="${PYTEST_CUSTOM_OPTIONS}" \ --env BACKUP_STORE_TYPE="${BACKUP_STORE_TYPE}" \ --env TF_VAR_use_hdd=${USE_HDD} \ diff --git a/test_framework/scripts/longhorn-setup.sh b/test_framework/scripts/longhorn-setup.sh index fbfb87ca4..61bab76ea 100755 --- a/test_framework/scripts/longhorn-setup.sh +++ b/test_framework/scripts/longhorn-setup.sh @@ -450,6 +450,9 @@ run_longhorn_upgrade_test(){ RESOURCE_SUFFIX=$(terraform -chdir=${TF_VAR_tf_workspace}/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw resource_suffix) yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[7].value="'${RESOURCE_SUFFIX}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + ## for v2 volume test + yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "RUN_V2_TEST", "value": "'${TF_VAR_extra_block_device}'"}' "${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH}" + kubectl apply -f ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} # wait upgrade test pod to start running @@ -521,6 +524,9 @@ run_longhorn_tests(){ yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "AWS_DEFAULT_REGION", "valueFrom": {"secretKeyRef": {"name": "aws-cred-secret", "key": "AWS_DEFAULT_REGION"}}}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}" set -x + ## for v2 volume test + yq e -i 'select(.spec.containers[0].env != null).spec.containers[0].env += {"name": "RUN_V2_TEST", "value": "'${TF_VAR_extra_block_device}'"}' "${LONGHORN_TESTS_MANIFEST_FILE_PATH}" + LONGHORN_TEST_POD_NAME=`yq e 'select(.spec.containers[0] != null).metadata.name' ${LONGHORN_TESTS_MANIFEST_FILE_PATH}` RESOURCE_SUFFIX=$(terraform -chdir=${TF_VAR_tf_workspace}/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw resource_suffix)