From 88b6df66bd0cc13e6f0f8fc00ed8016b4af9c131 Mon Sep 17 00:00:00 2001 From: Samuel Moors Date: Sun, 1 Dec 2024 13:44:06 +0100 Subject: [PATCH] set compute_unit in child classes --- .../tests/apps/PyTorch/PyTorch_torchvision.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/eessi/testsuite/tests/apps/PyTorch/PyTorch_torchvision.py b/eessi/testsuite/tests/apps/PyTorch/PyTorch_torchvision.py index 391be095..1fd1e5c4 100644 --- a/eessi/testsuite/tests/apps/PyTorch/PyTorch_torchvision.py +++ b/eessi/testsuite/tests/apps/PyTorch/PyTorch_torchvision.py @@ -26,7 +26,6 @@ def required_mem_per_node(self): @run_after('init') def prepare_test(self): - # Set nn_model as executable option self.executable_opts = ['pytorch_synthetic_benchmark.py --model %s' % self.nn_model] self.bench_name = self.nn_model @@ -35,19 +34,6 @@ def prepare_test(self): if self.device_type != DEVICE_TYPES[GPU]: self.executable_opts += ['--no-cuda'] - @run_after('init') - def set_compute_unit(self): - """ - Set the compute unit to which tasks will be assigned: - one task per NUMA node for CPU runs, and one task per GPU for GPU runs. - """ - device_to_compute_unit = { - # Hybrid execution with one task per NUMA_NODE is typically the most efficient - DEVICE_TYPES[CPU]: COMPUTE_UNIT[NUMA_NODE], - DEVICE_TYPES[GPU]: COMPUTE_UNIT[GPU], - } - self.compute_unit = device_to_compute_unit.get(self.device_type) - @run_after('setup') def set_ddp_options(self): "Set environment variables for PyTorch DDP" @@ -86,7 +72,7 @@ def total_throughput(self): @performance_function('img/sec') def througput_per_CPU(self): - '''Training througput per CPU''' + '''Training througput per device type''' if self.device_type == DEVICE_TYPES[CPU]: return sn.extractsingle(r'Img/sec per CPU:\s+(?P\S+)', self.stdout, 'perf_per_cpu', float) else: @@ -96,11 +82,13 @@ def througput_per_CPU(self): @rfm.simple_test class EESSI_PyTorch_torchvision_CPU(EESSI_PyTorch_torchvision): device_type = DEVICE_TYPES[CPU] + compute_unit = COMPUTE_UNIT[NUMA_NODE] @rfm.simple_test class EESSI_PyTorch_torchvision_GPU(EESSI_PyTorch_torchvision): device_type = DEVICE_TYPES[GPU] + compute_unit = COMPUTE_UNIT[GPU] precision = parameter(['default', 'mixed']) @run_after('init')