Skip to content

Commit

Permalink
amend
Browse files Browse the repository at this point in the history
  • Loading branch information
vmoens committed Nov 13, 2023
1 parent a4d6176 commit 3b9eb36
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 1 deletion.
2 changes: 2 additions & 0 deletions test/_utils_internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,13 +175,15 @@ def memmap_td(self, device):
# MemmapTensor allows a 'cuda' device, which means that the data will
# be sent to cuda when accessed.
# When deprecating MemmapTensor, we'll also deprecate this behaviour.
device = torch.device(device)
if device.type == "cpu":
return self.td(device).memmap_(backend="Tensor")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.td(device).memmap_(backend="MemmapTensor")

def memmap_td_file(self, device):
device = torch.device(device)
if device.type == "cpu":
return self.td(device).memmap_(backend="Tensor", prefix=global_dir_prefix)
return self.td(device).memmap_(backend="MemmapTensor", prefix=global_dir_prefix)
Expand Down
2 changes: 1 addition & 1 deletion test/test_tensordict.py
Original file line number Diff line number Diff line change
Expand Up @@ -5249,7 +5249,7 @@ def test_memmap_as_tensor(device):
if device.type == "cuda":
td = td.pin_memory()
td_memmap = td.clone().memmap_(backend="Tensor")
td_memmap_pm = td_memmap.apply(lambda x: x.as_tensor()).pin_memory()
td_memmap_pm = td_memmap.as_tensor().pin_memory()
assert (td.pin_memory().to(device) == td_memmap_pm.to(device)).all()


Expand Down

0 comments on commit 3b9eb36

Please sign in to comment.