Create CODEOWNERS #408
10 fail, 143 pass in 20m 32s
Annotations
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_kernel_names (tests.test_profile_general) failed
tests/test_profile_misc.xml [took 28s]
Raw output
AssertionError: assert ['empirRoof_g...f_1.csv', ...] == ['empirRoof_g...f_1.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'kernelName_legend.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.misc
def test_kernel_names():
options = baseline_opts + ["--roof-only", "--kernel-names"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
> assert sorted(list(file_dict.keys())) == [
"empirRoof_gpu-0_fp32.pdf",
"empirRoof_gpu-0_int8_fp16.pdf",
"kernelName_legend.pdf",
"pmc_perf.csv",
"pmc_perf_0.csv",
"pmc_perf_1.csv",
"pmc_perf_2.csv",
"roofline.csv",
"sysinfo.csv",
"timestamps.csv",
]
E AssertionError: assert ['empirRoof_g...f_1.csv', ...] == ['empirRoof_g...f_1.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'kernelName_legend.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:537: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_HBM (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 28s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_HBM():
options = baseline_opts + ["--roof-only", "--mem-level", "HBM"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1473: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_L2 (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_L2():
options = baseline_opts + ["--roof-only", "--mem-level", "L2"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1504: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_vL1D (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 28s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_vL1D():
options = baseline_opts + ["--roof-only", "--mem-level", "vL1D"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1535: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_LDS (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_LDS():
options = baseline_opts + ["--roof-only", "--mem-level", "LDS"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1566: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_HBM_LDS (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_HBM_LDS():
options = baseline_opts + ["--roof-only", "--mem-level", "HBM", "LDS"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1597: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_vL1D_LDS (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_vL1D_LDS():
options = baseline_opts + ["--roof-only", "--mem-level", "vL1D", "LDS"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1628: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_mem_levels_L2_vL1D_LDS (tests.test_profile_general) failed
tests/test_profile_mem.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.mem
def test_mem_levels_L2_vL1D_LDS():
options = baseline_opts + ["--roof-only", "--mem-level", "L2", "vL1D", "LDS"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
print(sorted(list(file_dict.keys())))
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1658: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_sort_dispatches (tests.test_profile_general) failed
tests/test_profile_sort.xml [took 28s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.sort
def test_sort_dispatches():
options = baseline_opts + ["--roof-only", "--sort", "dispatches"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1412: AssertionError
Check warning on line 0 in tests.test_profile_general
github-actions / Test Results
test_sort_kernels (tests.test_profile_general) failed
tests/test_profile_sort.xml [took 27s]
Raw output
AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
Full diff:
[
- 'empirRoof_gpu-0_fp32.pdf',
+ 'empirRoof_gpu-0_fp32_fp64.pdf',
? +++++
'empirRoof_gpu-0_int8_fp16.pdf',
'pmc_perf.csv',
'pmc_perf_0.csv',
'pmc_perf_1.csv',
'pmc_perf_2.csv',
'roofline.csv',
'sysinfo.csv',
'timestamps.csv',
]
@pytest.mark.sort
def test_sort_kernels():
options = baseline_opts + ["--roof-only", "--sort", "kernels"]
workload_dir = test_utils.get_output_dir()
e = test_utils.launch_omniperf(config, options, workload_dir, check_success=False)
if soc == "MI100":
# assert that it did not run
assert e.value.code >= 1
# Do not continue testing
return
# assert successful run
assert e.value.code == 0
file_dict = test_utils.check_csv_files(workload_dir, num_devices, num_kernels)
if soc == "MI200":
> assert sorted(list(file_dict.keys())) == ROOF_ONLY_FILES
E AssertionError: assert ['empirRoof_g...f_2.csv', ...] == ['empirRoof_g...f_2.csv', ...]
E At index 0 diff: 'empirRoof_gpu-0_fp32_fp64.pdf' != 'empirRoof_gpu-0_fp32.pdf'
E Full diff:
E [
E - 'empirRoof_gpu-0_fp32.pdf',
E + 'empirRoof_gpu-0_fp32_fp64.pdf',
E ? +++++
E 'empirRoof_gpu-0_int8_fp16.pdf',
E 'pmc_perf.csv',
E 'pmc_perf_0.csv',
E 'pmc_perf_1.csv',
E 'pmc_perf_2.csv',
E 'roofline.csv',
E 'sysinfo.csv',
E 'timestamps.csv',
E ]
tests/test_profile_general.py:1442: AssertionError
Check notice on line 0 in .github
github-actions / Test Results
153 tests found
There are 153 tests, see "Raw output" for the full list of tests.
Raw output
tests.test_analyze_commands ‑ test_baseline
tests.test_analyze_commands ‑ test_col_1
tests.test_analyze_commands ‑ test_col_2
tests.test_analyze_commands ‑ test_col_3
tests.test_analyze_commands ‑ test_decimal_1
tests.test_analyze_commands ‑ test_decimal_2
tests.test_analyze_commands ‑ test_decimal_3
tests.test_analyze_commands ‑ test_dependency_MI100
tests.test_analyze_commands ‑ test_dispatch_1
tests.test_analyze_commands ‑ test_dispatch_2
tests.test_analyze_commands ‑ test_dispatch_3
tests.test_analyze_commands ‑ test_dispatch_4
tests.test_analyze_commands ‑ test_dispatch_5
tests.test_analyze_commands ‑ test_filter_block_1
tests.test_analyze_commands ‑ test_filter_block_2
tests.test_analyze_commands ‑ test_filter_block_3
tests.test_analyze_commands ‑ test_filter_block_4
tests.test_analyze_commands ‑ test_filter_block_5
tests.test_analyze_commands ‑ test_filter_block_6
tests.test_analyze_commands ‑ test_filter_kernel_1
tests.test_analyze_commands ‑ test_filter_kernel_2
tests.test_analyze_commands ‑ test_filter_kernel_3
tests.test_analyze_commands ‑ test_g
tests.test_analyze_commands ‑ test_gpu_ids
tests.test_analyze_commands ‑ test_kernel_verbose_0
tests.test_analyze_commands ‑ test_kernel_verbose_1
tests.test_analyze_commands ‑ test_kernel_verbose_2
tests.test_analyze_commands ‑ test_kernel_verbose_3
tests.test_analyze_commands ‑ test_kernel_verbose_4
tests.test_analyze_commands ‑ test_kernel_verbose_5
tests.test_analyze_commands ‑ test_kernel_verbose_6
tests.test_analyze_commands ‑ test_list_kernels
tests.test_analyze_commands ‑ test_list_metrics_gfx906
tests.test_analyze_commands ‑ test_list_metrics_gfx908
tests.test_analyze_commands ‑ test_list_metrics_gfx90a
tests.test_analyze_commands ‑ test_max_stat_num_1
tests.test_analyze_commands ‑ test_max_stat_num_2
tests.test_analyze_commands ‑ test_max_stat_num_3
tests.test_analyze_commands ‑ test_max_stat_num_4
tests.test_analyze_commands ‑ test_normal_unit_per_cycle
tests.test_analyze_commands ‑ test_normal_unit_per_kernel
tests.test_analyze_commands ‑ test_normal_unit_per_second
tests.test_analyze_commands ‑ test_normal_unit_per_wave
tests.test_analyze_commands ‑ test_save_dfs
tests.test_analyze_commands ‑ test_time_unit_ms
tests.test_analyze_commands ‑ test_time_unit_ns
tests.test_analyze_commands ‑ test_time_unit_s
tests.test_analyze_commands ‑ test_time_unit_us
tests.test_analyze_commands ‑ test_valid_path
tests.test_analyze_workloads ‑ test_analyze_device_filter_MI100
tests.test_analyze_workloads ‑ test_analyze_device_filter_MI200
tests.test_analyze_workloads ‑ test_analyze_device_inv_int_MI100
tests.test_analyze_workloads ‑ test_analyze_device_inv_int_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_0_1_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_0_1_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_0_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_0_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_2_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_2_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_6_8_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_6_8_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_7_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_7_MI200
tests.test_analyze_workloads ‑ test_analyze_dispatch_inv_MI100
tests.test_analyze_workloads ‑ test_analyze_dispatch_inv_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_CPC_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_CPC_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_CPF_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_CPF_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SPI_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SPI_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQC_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQC_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_CPC_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_CPC_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SPI_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SPI_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SPI_TA_TCC_CPF_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SPI_TA_TCC_CPF_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SQC_TCP_CPC_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_SQC_TCP_CPC_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_TA_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_SQ_TA_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TA_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TA_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TCC_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TCC_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TCP_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TCP_MI200
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TD_MI100
tests.test_analyze_workloads ‑ test_analyze_ipblocks_TD_MI200
tests.test_analyze_workloads ‑ test_analyze_join_type_grid_MI100
tests.test_analyze_workloads ‑ test_analyze_join_type_grid_MI200
tests.test_analyze_workloads ‑ test_analyze_join_type_kernel_MI100
tests.test_analyze_workloads ‑ test_analyze_join_type_kernel_MI200
tests.test_analyze_workloads ‑ test_analyze_kernel_MI100
tests.test_analyze_workloads ‑ test_analyze_kernel_MI200
tests.test_analyze_workloads ‑ test_analyze_kernel_inv_int_MI100
tests.test_analyze_workloads ‑ test_analyze_kernel_inv_int_MI200
tests.test_analyze_workloads ‑ test_analyze_kernel_inv_str_MI100
tests.test_analyze_workloads ‑ test_analyze_kernel_inv_str_MI200
tests.test_analyze_workloads ‑ test_analyze_kernel_names_MI200
tests.test_analyze_workloads ‑ test_analyze_kernel_substr_MI100
tests.test_analyze_workloads ‑ test_analyze_kernel_substr_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_HBM_LDS_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_HBM_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_L2_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_L2_vL1d_LDS_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_LDS_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_vL1D_MI200
tests.test_analyze_workloads ‑ test_analyze_mem_levels_vL1d_LDS_MI200
tests.test_analyze_workloads ‑ test_analyze_no_roof_MI100
tests.test_analyze_workloads ‑ test_analyze_no_roof_MI200
tests.test_analyze_workloads ‑ test_analyze_path_MI100
tests.test_analyze_workloads ‑ test_analyze_path_MI200
tests.test_analyze_workloads ‑ test_analyze_sort_dispatches_MI200
tests.test_analyze_workloads ‑ test_analyze_sort_kernels_MI200
tests.test_profile_general ‑ test_block_CPC
tests.test_profile_general ‑ test_block_CPF
tests.test_profile_general ‑ test_block_SPI
tests.test_profile_general ‑ test_block_SQ
tests.test_profile_general ‑ test_block_SQC
tests.test_profile_general ‑ test_block_SQ_CPC
tests.test_profile_general ‑ test_block_SQ_SPI
tests.test_profile_general ‑ test_block_SQ_SPI_TA_TCC_CPF
tests.test_profile_general ‑ test_block_SQ_SQC_TCP_CPC
tests.test_profile_general ‑ test_block_SQ_TA
tests.test_profile_general ‑ test_block_TA
tests.test_profile_general ‑ test_block_TCC
tests.test_profile_general ‑ test_block_TCP
tests.test_profile_general ‑ test_block_TD
tests.test_profile_general ‑ test_device_filter
tests.test_profile_general ‑ test_dispatch_0
tests.test_profile_general ‑ test_dispatch_0_1
tests.test_profile_general ‑ test_dispatch_2
tests.test_profile_general ‑ test_join_type_grid
tests.test_profile_general ‑ test_join_type_kernel
tests.test_profile_general ‑ test_kernel
tests.test_profile_general ‑ test_kernel_names
tests.test_profile_general ‑ test_kernel_summaries
tests.test_profile_general ‑ test_mem_levels_HBM
tests.test_profile_general ‑ test_mem_levels_HBM_LDS
tests.test_profile_general ‑ test_mem_levels_L2
tests.test_profile_general ‑ test_mem_levels_L2_vL1D_LDS
tests.test_profile_general ‑ test_mem_levels_LDS
tests.test_profile_general ‑ test_mem_levels_vL1D
tests.test_profile_general ‑ test_mem_levels_vL1D_LDS
tests.test_profile_general ‑ test_no_roof
tests.test_profile_general ‑ test_path
tests.test_profile_general ‑ test_sort_dispatches
tests.test_profile_general ‑ test_sort_kernels