diff --git a/tests/api/test_acl_counter.py b/tests/api/test_acl_counter.py index fb65afe3..994c8f26 100644 --- a/tests/api/test_acl_counter.py +++ b/tests/api/test_acl_counter.py @@ -1,6 +1,9 @@ from pprint import pprint +import pytest + +@pytest.mark.npu class TestSaiAclCounter: # object with parent SAI_OBJECT_TYPE_ACL_TABLE @@ -19,23 +22,33 @@ def test_acl_counter_create(self, npu): 'attributes': ['SAI_ACL_COUNTER_ATTR_TABLE_ID', '$acl_table_1'], }, ] - + npu.objects_discovery() results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values create =======') pprint(results) - def test_acl_counter_remove(self, npu): + + @pytest.mark.dependency(name='test_sai_acl_counter_attr_label_set') + def test_sai_acl_counter_attr_label_set(self, npu): commands = [ { 'name': 'acl_counter_1', - 'op': 'remove', - }, - { - 'name': 'acl_table_1', - 'op': 'remove', - }, + 'op': 'set', + 'attributes': ['SAI_ACL_COUNTER_ATTR_LABEL', '""'], + } ] + npu.objects_discovery() + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + def test_acl_counter_remove(self, npu): + commands = [ + {'name': 'acl_counter_1', 'op': 'remove'}, + {'name': 'acl_table_1', 'op': 'remove'}, + ] + npu.objects_discovery() results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======') pprint(results) diff --git a/tests/api/test_acl_range.py b/tests/api/test_acl_range.py index 3bdc5779..ec68d808 100644 --- a/tests/api/test_acl_range.py +++ b/tests/api/test_acl_range.py @@ -24,12 +24,7 @@ def test_acl_range_create(self, npu): pprint(results) def test_acl_range_remove(self, npu): - commands = [ - { - 'name': 'acl_range_1', - 'op': 'remove', - } - ] + commands = [{'name': 'acl_range_1', 'op': 'remove'}] results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======') diff --git a/tests/api/test_hostif_trap_group.py b/tests/api/test_hostif_trap_group.py index ce6db62f..2892f889 100644 --- a/tests/api/test_hostif_trap_group.py +++ b/tests/api/test_hostif_trap_group.py @@ -1,5 +1,7 @@ from pprint import pprint +import pytest + class TestSaiHostifTrapGroup: # object with no attributes @@ -18,13 +20,70 @@ def test_hostif_trap_group_create(self, npu): print('======= SAI commands RETURN values create =======') pprint(results) - def test_hostif_trap_group_remove(self, npu): + @pytest.mark.dependency(name='test_sai_hostif_trap_group_attr_admin_state_set') + def test_sai_hostif_trap_group_attr_admin_state_set(self, npu): + commands = [ + { + 'name': 'hostif_trap_group_1', + 'op': 'set', + 'attributes': ['SAI_HOSTIF_TRAP_GROUP_ATTR_ADMIN_STATE', 'true'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_hostif_trap_group_attr_admin_state_set']) + def test_sai_hostif_trap_group_attr_admin_state_get(self, npu): + commands = [ + { + 'name': 'hostif_trap_group_1', + 'op': 'get', + 'attributes': ['SAI_HOSTIF_TRAP_GROUP_ATTR_ADMIN_STATE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'true', 'Get error, expected true but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_hostif_trap_group_attr_queue_set') + def test_sai_hostif_trap_group_attr_queue_set(self, npu): + commands = [ + { + 'name': 'hostif_trap_group_1', + 'op': 'set', + 'attributes': ['SAI_HOSTIF_TRAP_GROUP_ATTR_QUEUE', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_hostif_trap_group_attr_queue_set']) + def test_sai_hostif_trap_group_attr_queue_get(self, npu): commands = [ { 'name': 'hostif_trap_group_1', - 'op': 'remove', + 'op': 'get', + 'attributes': ['SAI_HOSTIF_TRAP_GROUP_ATTR_QUEUE'], } ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + def test_hostif_trap_group_remove(self, npu): + commands = [{'name': 'hostif_trap_group_1', 'op': 'remove'}] results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======') diff --git a/tests/api/test_policer.py b/tests/api/test_policer.py index 4c6f3352..54fb769e 100644 --- a/tests/api/test_policer.py +++ b/tests/api/test_policer.py @@ -1,5 +1,7 @@ from pprint import pprint +import pytest + class TestSaiPolicer: # object with no parents @@ -23,13 +25,225 @@ def test_policer_create(self, npu): print('======= SAI commands RETURN values create =======') pprint(results) - def test_policer_remove(self, npu): + @pytest.mark.dependency(name='test_sai_policer_attr_cbs_set') + def test_sai_policer_attr_cbs_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': ['SAI_POLICER_ATTR_CBS', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_cbs_set']) + def test_sai_policer_attr_cbs_get(self, npu): + commands = [ + {'name': 'policer_1', 'op': 'get', 'attributes': ['SAI_POLICER_ATTR_CBS']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_policer_attr_cir_set') + def test_sai_policer_attr_cir_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': ['SAI_POLICER_ATTR_CIR', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_cir_set']) + def test_sai_policer_attr_cir_get(self, npu): + commands = [ + {'name': 'policer_1', 'op': 'get', 'attributes': ['SAI_POLICER_ATTR_CIR']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_policer_attr_pbs_set') + def test_sai_policer_attr_pbs_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': ['SAI_POLICER_ATTR_PBS', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_pbs_set']) + def test_sai_policer_attr_pbs_get(self, npu): + commands = [ + {'name': 'policer_1', 'op': 'get', 'attributes': ['SAI_POLICER_ATTR_PBS']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_policer_attr_pir_set') + def test_sai_policer_attr_pir_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': ['SAI_POLICER_ATTR_PIR', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_pir_set']) + def test_sai_policer_attr_pir_get(self, npu): + commands = [ + {'name': 'policer_1', 'op': 'get', 'attributes': ['SAI_POLICER_ATTR_PIR']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_policer_attr_green_packet_action_set') + def test_sai_policer_attr_green_packet_action_set(self, npu): commands = [ { 'name': 'policer_1', - 'op': 'remove', + 'op': 'set', + 'attributes': [ + 'SAI_POLICER_ATTR_GREEN_PACKET_ACTION', + 'SAI_PACKET_ACTION_FORWARD', + ], } ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_green_packet_action_set']) + def test_sai_policer_attr_green_packet_action_get(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'get', + 'attributes': ['SAI_POLICER_ATTR_GREEN_PACKET_ACTION'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_PACKET_ACTION_FORWARD', ( + 'Get error, expected SAI_PACKET_ACTION_FORWARD but got %s' % r_value + ) + + @pytest.mark.dependency(name='test_sai_policer_attr_yellow_packet_action_set') + def test_sai_policer_attr_yellow_packet_action_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': [ + 'SAI_POLICER_ATTR_YELLOW_PACKET_ACTION', + 'SAI_PACKET_ACTION_FORWARD', + ], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_yellow_packet_action_set']) + def test_sai_policer_attr_yellow_packet_action_get(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'get', + 'attributes': ['SAI_POLICER_ATTR_YELLOW_PACKET_ACTION'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_PACKET_ACTION_FORWARD', ( + 'Get error, expected SAI_PACKET_ACTION_FORWARD but got %s' % r_value + ) + + @pytest.mark.dependency(name='test_sai_policer_attr_red_packet_action_set') + def test_sai_policer_attr_red_packet_action_set(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'set', + 'attributes': [ + 'SAI_POLICER_ATTR_RED_PACKET_ACTION', + 'SAI_PACKET_ACTION_FORWARD', + ], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_policer_attr_red_packet_action_set']) + def test_sai_policer_attr_red_packet_action_get(self, npu): + commands = [ + { + 'name': 'policer_1', + 'op': 'get', + 'attributes': ['SAI_POLICER_ATTR_RED_PACKET_ACTION'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_PACKET_ACTION_FORWARD', ( + 'Get error, expected SAI_PACKET_ACTION_FORWARD but got %s' % r_value + ) + + + def test_policer_remove(self, npu): + commands = [{'name': 'policer_1', 'op': 'remove'}] results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======') diff --git a/tests/api/test_scheduler.py b/tests/api/test_scheduler.py index 34fe9acc..6be36c3a 100644 --- a/tests/api/test_scheduler.py +++ b/tests/api/test_scheduler.py @@ -1,5 +1,7 @@ from pprint import pprint +import pytest + class TestSaiScheduler: # object with no attributes @@ -18,13 +20,236 @@ def test_scheduler_create(self, npu): print('======= SAI commands RETURN values create =======') pprint(results) - def test_scheduler_remove(self, npu): + @pytest.mark.dependency(name='test_sai_scheduler_attr_scheduling_type_set') + def test_sai_scheduler_attr_scheduling_type_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': [ + 'SAI_SCHEDULER_ATTR_SCHEDULING_TYPE', + 'SAI_SCHEDULING_TYPE_WRR', + ], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_scheduler_attr_scheduling_type_set']) + def test_sai_scheduler_attr_scheduling_type_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_SCHEDULING_TYPE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_SCHEDULING_TYPE_WRR', ( + 'Get error, expected SAI_SCHEDULING_TYPE_WRR but got %s' % r_value + ) + + @pytest.mark.dependency(name='test_sai_scheduler_attr_scheduling_weight_set') + def test_sai_scheduler_attr_scheduling_weight_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_SCHEDULING_WEIGHT', '1'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_scheduler_attr_scheduling_weight_set']) + def test_sai_scheduler_attr_scheduling_weight_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_SCHEDULING_WEIGHT'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '1', 'Get error, expected 1 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_scheduler_attr_meter_type_set') + def test_sai_scheduler_attr_meter_type_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_METER_TYPE', 'SAI_METER_TYPE_BYTES'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_scheduler_attr_meter_type_set']) + def test_sai_scheduler_attr_meter_type_get(self, npu): commands = [ { 'name': 'scheduler_1', - 'op': 'remove', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_METER_TYPE'], } ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_METER_TYPE_BYTES', ( + 'Get error, expected SAI_METER_TYPE_BYTES but got %s' % r_value + ) + + @pytest.mark.dependency(name='test_sai_scheduler_attr_min_bandwidth_rate_set') + def test_sai_scheduler_attr_min_bandwidth_rate_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_MIN_BANDWIDTH_RATE', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_scheduler_attr_min_bandwidth_rate_set']) + def test_sai_scheduler_attr_min_bandwidth_rate_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_MIN_BANDWIDTH_RATE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_scheduler_attr_min_bandwidth_burst_rate_set') + def test_sai_scheduler_attr_min_bandwidth_burst_rate_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_MIN_BANDWIDTH_BURST_RATE', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency( + depends=['test_sai_scheduler_attr_min_bandwidth_burst_rate_set'] + ) + def test_sai_scheduler_attr_min_bandwidth_burst_rate_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_MIN_BANDWIDTH_BURST_RATE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_scheduler_attr_max_bandwidth_rate_set') + def test_sai_scheduler_attr_max_bandwidth_rate_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_MAX_BANDWIDTH_RATE', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_scheduler_attr_max_bandwidth_rate_set']) + def test_sai_scheduler_attr_max_bandwidth_rate_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_MAX_BANDWIDTH_RATE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_scheduler_attr_max_bandwidth_burst_rate_set') + def test_sai_scheduler_attr_max_bandwidth_burst_rate_set(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'set', + 'attributes': ['SAI_SCHEDULER_ATTR_MAX_BANDWIDTH_BURST_RATE', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency( + depends=['test_sai_scheduler_attr_max_bandwidth_burst_rate_set'] + ) + def test_sai_scheduler_attr_max_bandwidth_burst_rate_get(self, npu): + commands = [ + { + 'name': 'scheduler_1', + 'op': 'get', + 'attributes': ['SAI_SCHEDULER_ATTR_MAX_BANDWIDTH_BURST_RATE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + def test_scheduler_remove(self, npu): + commands = [{'name': 'scheduler_1', 'op': 'remove'}] results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======') diff --git a/tests/api/test_wred.py b/tests/api/test_wred.py index 5a809f14..1a6ebc9b 100644 --- a/tests/api/test_wred.py +++ b/tests/api/test_wred.py @@ -1,5 +1,7 @@ from pprint import pprint +import pytest + class TestSaiWred: # object with no attributes @@ -18,13 +20,715 @@ def test_wred_create(self, npu): print('======= SAI commands RETURN values create =======') pprint(results) - def test_wred_remove(self, npu): + @pytest.mark.dependency(name='test_sai_wred_attr_green_enable_set') + def test_sai_wred_attr_green_enable_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_GREEN_ENABLE', 'false'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_green_enable_set']) + def test_sai_wred_attr_green_enable_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_GREEN_ENABLE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'false', 'Get error, expected false but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_green_min_threshold_set') + def test_sai_wred_attr_green_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_GREEN_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_green_min_threshold_set']) + def test_sai_wred_attr_green_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_GREEN_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_green_max_threshold_set') + def test_sai_wred_attr_green_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_GREEN_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_green_max_threshold_set']) + def test_sai_wred_attr_green_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_GREEN_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_green_drop_probability_set') + def test_sai_wred_attr_green_drop_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_GREEN_DROP_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_green_drop_probability_set']) + def test_sai_wred_attr_green_drop_probability_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_GREEN_DROP_PROBABILITY'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_yellow_enable_set') + def test_sai_wred_attr_yellow_enable_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_YELLOW_ENABLE', 'false'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_yellow_enable_set']) + def test_sai_wred_attr_yellow_enable_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_YELLOW_ENABLE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'false', 'Get error, expected false but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_yellow_min_threshold_set') + def test_sai_wred_attr_yellow_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_yellow_min_threshold_set']) + def test_sai_wred_attr_yellow_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_yellow_max_threshold_set') + def test_sai_wred_attr_yellow_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_yellow_max_threshold_set']) + def test_sai_wred_attr_yellow_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_yellow_drop_probability_set') + def test_sai_wred_attr_yellow_drop_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_yellow_drop_probability_set']) + def test_sai_wred_attr_yellow_drop_probability_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_red_enable_set') + def test_sai_wred_attr_red_enable_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_RED_ENABLE', 'false'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_red_enable_set']) + def test_sai_wred_attr_red_enable_get(self, npu): + commands = [ + {'name': 'wred_1', 'op': 'get', 'attributes': ['SAI_WRED_ATTR_RED_ENABLE']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'false', 'Get error, expected false but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_red_min_threshold_set') + def test_sai_wred_attr_red_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_RED_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_red_min_threshold_set']) + def test_sai_wred_attr_red_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_RED_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_red_max_threshold_set') + def test_sai_wred_attr_red_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_RED_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_red_max_threshold_set']) + def test_sai_wred_attr_red_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_RED_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_red_drop_probability_set') + def test_sai_wred_attr_red_drop_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_RED_DROP_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_red_drop_probability_set']) + def test_sai_wred_attr_red_drop_probability_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_RED_DROP_PROBABILITY'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_weight_set') + def test_sai_wred_attr_weight_set(self, npu): + commands = [ + {'name': 'wred_1', 'op': 'set', 'attributes': ['SAI_WRED_ATTR_WEIGHT', '0']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_weight_set']) + def test_sai_wred_attr_weight_get(self, npu): + commands = [ + {'name': 'wred_1', 'op': 'get', 'attributes': ['SAI_WRED_ATTR_WEIGHT']} + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_mark_mode_set') + def test_sai_wred_attr_ecn_mark_mode_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_MARK_MODE', 'SAI_ECN_MARK_MODE_NONE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_mark_mode_set']) + def test_sai_wred_attr_ecn_mark_mode_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_MARK_MODE'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == 'SAI_ECN_MARK_MODE_NONE', ( + 'Get error, expected SAI_ECN_MARK_MODE_NONE but got %s' % r_value + ) + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_green_min_threshold_set') + def test_sai_wred_attr_ecn_green_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_green_min_threshold_set']) + def test_sai_wred_attr_ecn_green_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_green_max_threshold_set') + def test_sai_wred_attr_ecn_green_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_green_max_threshold_set']) + def test_sai_wred_attr_ecn_green_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_green_mark_probability_set') + def test_sai_wred_attr_ecn_green_mark_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MARK_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency( + depends=['test_sai_wred_attr_ecn_green_mark_probability_set'] + ) + def test_sai_wred_attr_ecn_green_mark_probability_get(self, npu): commands = [ { 'name': 'wred_1', - 'op': 'remove', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_GREEN_MARK_PROBABILITY'], } ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_yellow_min_threshold_set') + def test_sai_wred_attr_ecn_yellow_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_yellow_min_threshold_set']) + def test_sai_wred_attr_ecn_yellow_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_yellow_max_threshold_set') + def test_sai_wred_attr_ecn_yellow_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_yellow_max_threshold_set']) + def test_sai_wred_attr_ecn_yellow_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_yellow_mark_probability_set') + def test_sai_wred_attr_ecn_yellow_mark_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MARK_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency( + depends=['test_sai_wred_attr_ecn_yellow_mark_probability_set'] + ) + def test_sai_wred_attr_ecn_yellow_mark_probability_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_YELLOW_MARK_PROBABILITY'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_red_min_threshold_set') + def test_sai_wred_attr_ecn_red_min_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MIN_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_red_min_threshold_set']) + def test_sai_wred_attr_ecn_red_min_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MIN_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_red_max_threshold_set') + def test_sai_wred_attr_ecn_red_max_threshold_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MAX_THRESHOLD', '0'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_red_max_threshold_set']) + def test_sai_wred_attr_ecn_red_max_threshold_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MAX_THRESHOLD'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '0', 'Get error, expected 0 but got %s' % r_value + + @pytest.mark.dependency(name='test_sai_wred_attr_ecn_red_mark_probability_set') + def test_sai_wred_attr_ecn_red_mark_probability_set(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'set', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MARK_PROBABILITY', '100'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + pprint(results) + + @pytest.mark.dependency(depends=['test_sai_wred_attr_ecn_red_mark_probability_set']) + def test_sai_wred_attr_ecn_red_mark_probability_get(self, npu): + commands = [ + { + 'name': 'wred_1', + 'op': 'get', + 'attributes': ['SAI_WRED_ATTR_ECN_RED_MARK_PROBABILITY'], + } + ] + results = [*npu.process_commands(commands)] + print('======= SAI commands RETURN values get =======') + for command in results: + for attribute in command: + pprint(attribute.raw()) + r_value = results[0][0].value() + print(r_value) + assert r_value == '100', 'Get error, expected 100 but got %s' % r_value + + def test_wred_remove(self, npu): + commands = [{'name': 'wred_1', 'op': 'remove'}] results = [*npu.process_commands(commands)] print('======= SAI commands RETURN values remove =======')