From 371ef6e8b9ae81d842098368e337c8cb17bf76d9 Mon Sep 17 00:00:00 2001 From: Bangtian Liu Date: Thu, 9 Jan 2025 21:37:22 -0500 Subject: [PATCH] [tuner] reduce log file size (#809) This PR aims to the log size detailed in: https://github.com/nod-ai/shark-ai/issues/806 - Avoid printing the stdout and stderr of running the command - Reduce the precision of fp constants printed Signed-off-by: Bangtian Liu --- tuner/tuner/candidate_gen.py | 5 ----- tuner/tuner/libtuner.py | 4 ++-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/tuner/tuner/candidate_gen.py b/tuner/tuner/candidate_gen.py index a1ee421d0..b3cd31079 100644 --- a/tuner/tuner/candidate_gen.py +++ b/tuner/tuner/candidate_gen.py @@ -237,11 +237,6 @@ def run_command(run_pack: RunPack) -> RunResult: text=True, timeout=timeout_seconds, ) - - if result.stdout: - logging.debug(f"stdout: {result.stdout}") - if result.stderr: - logging.debug(f"stderr: {result.stderr}") except subprocess.TimeoutExpired as e: logging.warning( f"Command '{command_str}' timed out after {timeout_seconds} seconds." diff --git a/tuner/tuner/libtuner.py b/tuner/tuner/libtuner.py index 9942187ec..cdd589022 100644 --- a/tuner/tuner/libtuner.py +++ b/tuner/tuner/libtuner.py @@ -803,7 +803,7 @@ def compile( compiled_candidates = [c for c in compiled_candidates if c is not None] success_rate = float(len(compiled_candidates)) / float(len(candidates)) logging.info( - f"Successfully compiled [{len(compiled_candidates)}] candidates. Success rate: {success_rate}" + f"Successfully compiled [{len(compiled_candidates)}] candidates. Success rate: {success_rate:.2f}" ) # Remove duplicate vmfbs from the candidate list. @@ -875,7 +875,7 @@ def get_speedup(result: BenchmarkResult) -> float: speedup = f"{round(get_speedup(r) * 100, 2)}% of baseline" else: speedup = "baseline unavailable" - logging.info(f"Candidate {r.candidate_id} time: {r.time} ({speedup})") + logging.info(f"Candidate {r.candidate_id} time: {r.time:.2f} ({speedup})") return best_results