From 31cc1a499786be5d6e309334de8263c3834b2749 Mon Sep 17 00:00:00 2001 From: "Oguz Ulgen (Meta Employee)" Date: Wed, 8 Jan 2025 09:46:34 -0800 Subject: [PATCH] Introduce cache hot loading APIs (a.k.a. "Mega-cache") (#143341) Summary: This PR essentially introduces two new APIs * torch.compiler.save_cache_artifacts * torch.compiler.load_cache_artifacts which aim to create a mega cache experience where the user can start collecting cache artifacts, and later call the save API to fetch them. In the next attempt, the user can "hot load" the cache artifacts via the load function. This bundling approach reduces the need to rely on porting individual files one by one, or relying on many network requests. Note that these APIs CANNOT log to structured logging as these functions will be called before and after compilation, as opposed to during compilation. Due to this limitation, the API returns a struct that the user can log with. X-link: https://github.com/pytorch/pytorch/pull/143341 Approved by: https://github.com/jansel Reviewed By: clee2000 Differential Revision: D67927135 Pulled By: oulgen fbshipit-source-id: 00c4f3955bd098a61b40760a9a29cdf58caf04cf --- userbenchmark/dynamo/dynamobench/_dynamo/testing.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py index d401c83f0..3f5dd0255 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py @@ -255,6 +255,11 @@ def __call__( self.graphs.append(gm) return lookup_backend(self.backend)(gm, example_inputs) + def clear(self) -> None: + self.frame_count = 0 + self.op_count = 0 + self.graphs = [] + # Equivalent to backend="eager", but also records graphs that # we can assert on