-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdata_provider.py
108 lines (89 loc) · 4.03 KB
/
data_provider.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import h5py
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
# Ignore warinings
import warnings
import ast
import pickle
import preprocessing.preprocessing_util as util
import torch
warnings.filterwarnings("ignore")
class DatasetProvider(Dataset):
def __init__(self, pair_file_path, data_file_path, images_dir, transform=None, start_id=-1,
id_do_data_map='dataset/id_to_desc_map.pickle', id_to_img_map='dataset/img_id_map.pickle'):
'''
Class that creates the dataset online, using the ids from the pairs dataset
:param pair_file_path: path to the Pairs dataset
:param data_file_path: path to the description dataset
:param images_dir: path to the directory where the image embeddings are stored
:param transform: PyTorch transform object that it's used for transforming our input to a tensor
'''
# Read data
self.pairs = pd.read_csv(pair_file_path, encoding='utf-8')
if start_id != -1:
start_index = start_id * 207551
end_index = (start_id + 1) * 207551
if end_index > len(self.pairs):
end_index = len(self.pairs) + 1
self.pairs = self.pairs[start_index: end_index]
data = h5py.File(data_file_path, 'r')
self.images_dir = images_dir
self.id_to_data = pickle.load(open(id_do_data_map, 'rb'))
self.id_to_img = pickle.load(open(id_to_img_map, 'rb'))
self.transform = transform
# self.item_idx = data['itemID'][()]
self.image_ids = data['image_id'][()]
self.descriptions = data['descriptions'][()]
def get_image_embedding(self, image_id):
folder_id = image_id % 100
with h5py.File(self.images_dir + "/image_features_" + str(folder_id) + ".hdf5", 'r') as img_data:
#with h5py.File(self.images_dir, 'r') as img_data:
# i ovie treba da se smenat za baseline
# ids = img_data['image_id'][()]
position_item = self.id_to_img[image_id]
result = img_data['image_features'][position_item]
img_data.close()
return result
# ids = img_data['img_id'][()]
# position_item = np.argwhere(ids == image_id)[0][0]
# return img_data['img_embed'][position_item]
def __len__(self):
return self.pairs.shape[0]
def __getitem__(self, idx):
pair = self.pairs.iloc[idx]
y = int(pair['isDuplicate'])
item_1_id = int(pair['itemID_1'])
item_2_id = int(pair['itemID_2'])
position_item_1 = self.id_to_data[item_1_id]
position_item_2 = self.id_to_data[item_2_id]
item_1_desc = self.descriptions[position_item_1]
item_2_desc = self.descriptions[position_item_2]
item_1_img = self.image_ids[position_item_1]
item_2_img = self.image_ids[position_item_2]
img_1 = self.get_image_embedding(int(item_1_img))
img_2 = self.get_image_embedding(int(item_2_img))
return {'desc1': item_1_desc, 'image_1': img_1, 'desc2': item_2_desc, 'image_2': img_2, 'target': y}
def collater(self, samples):
if len(samples) == 0:
return {}
def merge(values):
max_length = max(v.size(0) for v in values)
result = values[0].new(len(values), max_length).fill_(0)
for i, v in enumerate(values):
result[i, :len(v)].copy_(v)
return result
images_1 = torch.FloatTensor([s['image_1'] for s in samples])
images_2 = torch.FloatTensor([s['image_2'] for s in samples])
# desc_1 = merge([s['desc1'] for s in samples])
# desc_2 = merge([s['desc2'] for s in samples])
desc_1 = torch.LongTensor([s['desc1'] for s in samples])
desc_2 = torch.LongTensor([s['desc2'] for s in samples])
target = torch.FloatTensor([s['target'] for s in samples])
return {
'desc1': desc_1,
'image_1': images_1,
'desc2': desc_2,
'image_2': images_2,
'target': target
}