-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdataloader.py
89 lines (75 loc) · 3.6 KB
/
dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import numpy as np
from collections import OrderedDict
import os
import glob
import cv2
import torch.utils.data as data
import torchvision.transforms as transforms
def np_load_frame(filename, resize_height, resize_width):
"""
Load image path and convert it to numpy.ndarray. Notes that the color channels are BGR and the color space
is normalized from [0, 255] to [-1, 1].
:param filename: the full path of image
:param resize_height: resized height
:param resize_width: resized width
:return: numpy.ndarray
"""
image_decoded = cv2.imread(filename)
image_resized = cv2.resize(image_decoded, (resize_width, resize_height))
image_resized = image_resized.astype(dtype=np.float32)
image_resized = (image_resized / 127.5) - 1.0
return image_resized
class DataLoader(data.Dataset):
def __init__(self, video_folder, transform, resize_height, resize_width, time_step=4, num_pred=1):
self.dir = video_folder
self.transform = transform
self.videos = OrderedDict()
self._resize_height = resize_height
self._resize_width = resize_width
self._time_step = time_step
self._num_pred = num_pred
self.setup()
self.samples = self.get_all_samples()
def setup(self):
videos = glob.glob(os.path.join(self.dir, '*'))
for video in sorted(videos):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*.jpg'))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
def get_all_samples(self):
frames = []
videos = glob.glob(os.path.join(self.dir, '*'))
for video in sorted(videos):
video_name = video.split('/')[-1]
for i in range(len(self.videos[video_name]['frame']) - self._time_step):
frames.append(self.videos[video_name]['frame'][i])
return frames
def __getitem__(self, index):
video_name = self.samples[index].split('/')[-2]
frame_name = int(self.samples[index].split('/')[-1].split('.')[-2])
batch = []
flow_batch = []
for i in range(self._time_step + self._num_pred):
image = np_load_frame(self.videos[video_name]['frame'][frame_name + i-1], self._resize_height,self._resize_width) # 256,256,3
if self.transform is not None:
batch.append(self.transform(image)) # 3,256,256
if(self.dir.split('/')[-2]=="shanghai"):
b = self.videos[video_name]['frame'][frame_name + i - 1].split('/')[-3]
flow_path = (self.videos[video_name]['frame'][frame_name + i-1]).replace(b,b+"_flows").replace("jpg","npy")
else:
flow_path = (self.videos[video_name]['frame'][frame_name + i-1]).replace("frames","flows").replace("jpg","npy")
flow = np.load(flow_path) # (256,256,2)
flow_batch.append(flow)
return np.concatenate(batch, axis=0) ,np.concatenate(flow_batch, axis=2) # (b),15,256,256 b应该是迭代器给的
def __len__(self):
return len(self.samples)
if __name__ == '__main__':
train_folder = "/home/huyt/DATASET/ped2/training/frames"
train_dataset = DataLoader(train_folder, transforms.Compose([transforms.ToTensor(), ]), 256,256, 4)
train_batch = data.DataLoader(train_dataset, 1, shuffle=False, drop_last=True)
for j, (imgs) in enumerate(train_batch):
print(imgs.shape)
break