From 441d74fe89dfda9ce5d295055c9cf47d01eed593 Mon Sep 17 00:00:00 2001 From: Muhammed Fatih Balin Date: Sat, 3 Feb 2024 06:18:10 +0000 Subject: [PATCH] extend GPUCachedFeature tests. --- .../graphbolt/impl/test_gpu_cached_feature.py | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py b/tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py index d251701cdaf9..eb9a62babff1 100644 --- a/tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py +++ b/tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py @@ -28,14 +28,16 @@ torch.float64, ], ) -def test_gpu_cached_feature(dtype): +@pytest.mark.parametrize("cache_size_a", [1, 1024]) +@pytest.mark.parametrize("cache_size_b", [1, 1024]) +def test_gpu_cached_feature(dtype, cache_size_a, cache_size_b): a = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dtype, pin_memory=True) b = torch.tensor( [[[1, 2], [3, 4]], [[4, 5], [6, 7]]], dtype=dtype, pin_memory=True ) - feat_store_a = gb.GPUCachedFeature(gb.TorchBasedFeature(a), 2) - feat_store_b = gb.GPUCachedFeature(gb.TorchBasedFeature(b), 1) + feat_store_a = gb.GPUCachedFeature(gb.TorchBasedFeature(a), cache_size_a) + feat_store_b = gb.GPUCachedFeature(gb.TorchBasedFeature(b), cache_size_b) # Test read the entire feature. assert torch.equal(feat_store_a.read(), a.to("cuda")) @@ -52,6 +54,23 @@ def test_gpu_cached_feature(dtype): "cuda" ), ) + assert torch.equal( + feat_store_a.read(torch.tensor([1, 1]).to("cuda")), + torch.tensor([[4, 5, 6], [4, 5, 6]], dtype=dtype).to("cuda"), + ) + assert torch.equal( + feat_store_b.read(torch.tensor([0]).to("cuda")), + torch.tensor([[[1, 2], [3, 4]]], dtype=dtype).to("cuda"), + ) + # The cache should be full now for the large cache sizes, %100 hit expected. + if cache_size_a >= 1024: + total_miss = feat_store_a._feature.total_miss + feat_store_a.read(torch.tensor([0, 1]).to("cuda")) + assert total_miss == feat_store_a._feature.total_miss + if cache_size_b >= 1024: + total_miss = feat_store_b._feature.total_miss + feat_store_b.read(torch.tensor([0, 1]).to("cuda")) + assert total_miss == feat_store_b._feature.total_miss # Test get the size of the entire feature with ids. assert feat_store_a.size() == torch.Size([3])