forked from cookielee77/RankGan-NIPS2017
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataloader.py
executable file
·91 lines (74 loc) · 3.23 KB
/
dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy as np
import random
class Rank_Data_loader():
def __init__(self, batch_size, ref_size):
self.batch_size = batch_size
self.ref_size = ref_size
self.sentences = np.array([])
self.labels = np.array([])
def load_train_data(self, positive_file, negative_file):
# Load data
positive_examples = []
negative_examples = []
with open(positive_file)as fin:
for line in fin:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
positive_examples.append(parse_line)
self.positive_examples = np.array(positive_examples)
with open(negative_file)as fin:
for line in fin:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
if len(parse_line) == 20:
negative_examples.append(parse_line)
self.sentences = np.array(positive_examples + negative_examples)
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
self.labels = np.concatenate([positive_labels, negative_labels], 0)
# Shuffle the data
shuffle_indices = np.random.permutation(np.arange(len(self.labels)))
self.sentences = self.sentences[shuffle_indices]
self.labels = self.labels[shuffle_indices]
# Split batches
self.num_batch = int(len(self.labels) / self.batch_size)
self.sentences = self.sentences[:self.num_batch * self.batch_size]
self.labels = self.labels[:self.num_batch * self.batch_size]
self.sentences_batches = np.split(self.sentences, self.num_batch, 0)
self.labels_batches = np.split(self.labels, self.num_batch, 0)
self.pointer = 0
def get_ref(self):
index = np.random.randint(0, self.positive_examples.shape[0], size = self.ref_size)
return self.positive_examples[index]
def next_batch(self):
ret = self.sentences_batches[self.pointer], self.labels_batches[self.pointer], self.get_ref()
self.pointer = (self.pointer + 1) % self.num_batch
return ret
def reset_pointer(self):
self.pointer = 0
class Gen_Data_loader():
def __init__(self, batch_size):
self.batch_size = batch_size
self.token_stream = []
def create_batches(self, data_file):
self.token_stream = []
with open(data_file, 'r') as f:
for line in f:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
if len(parse_line) == 20:
self.token_stream.append(parse_line)
self.num_batch = int(len(self.token_stream) / self.batch_size)
self.token_stream = self.token_stream[:self.num_batch * self.batch_size]
self.sequence_batch = np.split(np.array(self.token_stream), self.num_batch, 0)
self.pointer = 0
def next_batch(self):
ret = self.sequence_batch[self.pointer]
self.pointer = (self.pointer + 1) % self.num_batch
return ret
def reset_pointer(self):
self.pointer = 0