-
Notifications
You must be signed in to change notification settings - Fork 1
/
transforms.py
56 lines (47 loc) · 1.91 KB
/
transforms.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import numpy as np
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class Resize(object):
def __init__(self, h, w, eval_mode=False):
self.h = h
self.w = w
self.eval_mode = eval_mode
def __call__(self, image, target):
image = F.resize(image, (self.h, self.w))
# If size is a sequence like (h, w), the output size will be matched to this.
# If size is an int, the smaller edge of the image will be matched to this number maintaining the aspect ratio
if not self.eval_mode:
if isinstance(target, list):
target_new = []
for _target in target:
target_new.append(F.resize(_target, (self.h, self.w), interpolation=F.InterpolationMode.NEAREST))
target = target_new
else:
target = F.resize(target, (self.h, self.w), interpolation=F.InterpolationMode.NEAREST)
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
if isinstance(target, list):
target_new = []
for _target in target:
target_new.append(torch.as_tensor(np.asarray(_target).copy(), dtype=torch.int64))
target = target_new
else:
target = torch.as_tensor(np.asarray(target).copy(), dtype=torch.int64)
return image, target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target