diff --git a/.github/workflows/userbenchmark-a100-bisection.yml b/.github/workflows/userbenchmark-a100-bisection.yml index aa39a997de..dbc14f13bd 100644 --- a/.github/workflows/userbenchmark-a100-bisection.yml +++ b/.github/workflows/userbenchmark-a100-bisection.yml @@ -64,7 +64,6 @@ jobs: run: | CONDA_ENV=${BASE_CONDA_ENV} . "${SETUP_SCRIPT}" cd benchmark - python install.py mkdir -p "${BISECT_WORKDIR}" PYTORCH_GIT_HASH=$(python -c 'import torch; print(torch.version.git_version)') python run_benchmark.py ${{ github.event.inputs.userbenchmark }} ${{ github.event.inputs.userbenchmark_args }} --dryrun \ diff --git a/userbenchmark/test_bench/run.py b/userbenchmark/test_bench/run.py index 31cf177146..ef77063fad 100644 --- a/userbenchmark/test_bench/run.py +++ b/userbenchmark/test_bench/run.py @@ -198,7 +198,7 @@ def run_config( return dict.fromkeys(metrics, str(e)) -def run_config_memleak(config: TorchBenchModelConfig) -> Dict[str, str]: +def run_config_memleak(config: TorchBenchModelConfig, dryrun: bool=False) -> Dict[str, str]: def assertEqual(x, y): assert x == y, f"{x} != {y}" model_name = config.name @@ -210,6 +210,9 @@ def assertEqual(x, y): ) # to speedup test, use batch size 1 if possible batch_size = 1 if allow_customize_batch_size else None + if dryrun: + print(" [skip_by_dryrun] ", flush=True) + return {"memleak": "skip_by_dryrun"} try: with task.watch_cuda_memory( skip=False, @@ -327,7 +330,7 @@ def run(args: List[str]): if "accuracy" in metrics: metrics_dict = run_config_accuracy(config, metrics, dryrun=args.dryrun) elif "memleak" in metrics: - metrics_dict = run_config_memleak(config) + metrics_dict = run_config_memleak(config, dryrun=args.dryrun) else: metrics_dict = run_config(config, metrics, dryrun=args.dryrun) config_str = config_to_str(config)