-
Notifications
You must be signed in to change notification settings - Fork 2
/
RegularDecoder.py
executable file
·149 lines (121 loc) · 5.33 KB
/
RegularDecoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import torch
from torch import nn
from GlobalAttention import GlobalAttention
from torch.autograd import Variable
from Beam import Beam
from decoders import DecoderState, Prediction
class RegularDecoder(nn.Module):
def __init__(self, vocabs, opt):
super(RegularDecoder, self).__init__()
self.opt = opt
self.vocabs = vocabs
self.decoder_embedding = nn.Embedding(
len(vocabs['code']),
opt.tgt_word_vec_size,
padding_idx=vocabs['code'].stoi['<blank>'])
self.attn = GlobalAttention(
opt.rnn_size,
attn_type='general',
bias=opt.attbias)
self.copy_attn = GlobalAttention(
opt.rnn_size,
attn_type='general',
bias=opt.attbias)
self.decoder_rnn = nn.LSTM(
input_size=(opt.tgt_word_vec_size + opt.rnn_size) if self.opt.input_feed else opt.tgt_word_vec_size,
hidden_size=opt.rnn_size,
num_layers=opt.dec_layers,
dropout=opt.dropout,
batch_first=True)
self.decoder_dropout = nn.Dropout(opt.dropout)
def forward(self, batch, context, context_lengths, decState):
inp = Variable(batch['code'].cuda(), requires_grad=False)
tgt_embeddings = self.decoder_embedding(inp)
if self.opt.input_feed:
attn_outputs, attn_scores, copy_attn_scores = [], [], []
for code in tgt_embeddings.split(1, 1):
rnn_output, prev_hidden = self.decoder_rnn(torch.cat((code, decState.input_feed), 2), decState.hidden)
attn_output, attn_score = self.attn(rnn_output, context, context_lengths)
attn_output = self.decoder_dropout(attn_output)
decState.update_state(prev_hidden, attn_output)
attn_outputs.append(attn_output)
attn_scores.append(attn_score)
_, copy_attn_score = self.copy_attn(attn_output, context, context_lengths)
copy_attn_scores.append(copy_attn_score)
# dec state encapsulates hidden and input feed
output = torch.cat(attn_outputs, 1)
attn_scores = torch.cat(attn_scores, 1)
copy_attn_scores = torch.cat(copy_attn_scores, 1)
else:
rnn_output, prev_hidden = self.decoder_rnn(tgt_embeddings, decState.hidden)
rnn_output.contiguous()
attn_output, attn_scores = self.attn(rnn_output, context, context_lengths)
attn_output = self.decoder_dropout(attn_output)
decState.update_state(prev_hidden, attn_output)
_, copy_attn_scores = self.copy_attn(attn_output, context, context_lengths)
output = attn_output
return output, attn_scores, copy_attn_scores
def predict(self, enc_hidden, context, context_lengths, batch, beam_size, max_code_length, generator, replace_unk):
decState = DecoderState(
enc_hidden,
Variable(torch.zeros(1, 1, self.opt.rnn_size).cuda(), requires_grad=False)
)
# Repeat everything beam_size times.
def rvar(a, beam_size):
return Variable(a.repeat(beam_size, 1, 1), volatile=True)
context = rvar(context.data, beam_size)
context_lengths = context_lengths.repeat(beam_size)
decState.repeat_beam_size_times(beam_size)
beam = Beam(beam_size,
cuda=True,
vocab=self.vocabs['code'])
for i in range(max_code_length):
if beam.done():
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
# Uses the start symbol in the beginning
inp = beam.getCurrentState() # Should return a batch of the frontier
# Turn any copied words to UNKs
inp['code'] = inp['code'].masked_fill_(inp['code'].gt(len(self.vocabs["code"]) - 1), self.vocabs["code"].stoi['<unk>'])
# Run one step., decState gets automatically updated
decOut, attn, copy_attn = self.forward(inp, context, context_lengths, decState)
# decOut: beam x rnn_size
decOut = decOut.squeeze(1)
out = generator(decOut, copy_attn.squeeze(1), batch['src_map'], inp).data
out = out.unsqueeze(1)
out = generator.collapseCopyScores(out, batch)
out = out.log()
# beam x tgt_vocab
beam.advance(out[:, 0], attn.data[:, 0])
decState.beam_update(beam.getCurrentOrigin(), beam_size)
score, times, k = beam.getFinal() # times is the length of the prediction
hyp, att = beam.getHyp(times, k)
goldNl = self.vocabs['seq2seq'].addStartOrEnd(batch['raw_seq2seq'][0])
goldCode = self.vocabs['code'].addStartOrEnd(batch['raw_code'][0])
predSent = self.buildTargetTokens(
hyp,
self.vocabs,
goldNl,
att,
batch['seq2seq_vocab'][0],
replace_unk
)
return Prediction(goldNl, ['None'], goldCode, predSent, score, att, self.vocabs['dataset'])
def buildTargetTokens(self, pred, vocabs, src, attn, copy_vocab, replace_unk):
vocab = vocabs['code']
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(copy_vocab.itos[tok - len(vocab)])
if tokens[-1] == '</s>':
tokens = tokens[:-1]
break
if replace_unk and attn is not None:
for i in range(len(tokens)):
if tokens[i] == '<unk>':
_, maxIndex = attn[i].max(0)
tokens[i] = src[maxIndex.item()]
return tokens