-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_lightweight.py
90 lines (68 loc) · 2.9 KB
/
test_lightweight.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tqdm import tqdm
from sklearn.utils import shuffle
# from models.transformer_v2 import Transformer
from weightLightModels.transformer import Transformer
from weightLightModels.model_params import TINY_PARAMS
# from models import model_params
from tests.utils import CustomTestCase
from utils import metrics
from tensorlayer.cost import cross_entropy_seq
from models import optimizer
import time
class Model_SEQ2SEQ_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
cls.batch_size = 16
cls.embedding_size = 32
cls.dec_seq_length = 5
cls.trainX = np.random.randint(low=2, high=50, size=(50, 10))
cls.trainY = np.random.randint(low=2, high=50, size=(50, 11))
cls.trainX[:,-1] = 1
cls.trainY[:,-1] = 1
# Parameters
cls.src_len = len(cls.trainX)
cls.tgt_len = len(cls.trainY)
assert cls.src_len == cls.tgt_len
cls.num_epochs = 1000
cls.n_step = cls.src_len // cls.batch_size
@classmethod
def tearDownClass(cls):
pass
def test_basic_simpleSeq2Seq(self):
model_ = Transformer(TINY_PARAMS)
self.vocab_size = TINY_PARAMS.vocab_size
optimizer = tf.optimizers.Adam(learning_rate=0.01)
for epoch in range(self.num_epochs):
model_.train()
t = time.time()
trainX, trainY = shuffle(self.trainX, self.trainY)
total_loss, n_iter = 0, 0
for X, Y in tqdm(tl.iterate.minibatches(inputs=trainX, targets=trainY, batch_size=self.batch_size,
shuffle=False), total=self.n_step,
desc='Epoch[{}/{}]'.format(epoch + 1, self.num_epochs), leave=False):
with tf.GradientTape() as tape:
targets = Y
logits = model_(inputs = X, targets = Y)
logits = metrics.MetricLayer(self.vocab_size)([logits, targets])
logits, loss = metrics.LossLayer(self.vocab_size, 0.1)([logits, targets])
grad = tape.gradient(loss, model_.all_weights)
optimizer.apply_gradients(zip(grad, model_.all_weights))
total_loss += loss
n_iter += 1
print(time.time()-t)
model_.eval()
test_sample = trainX[0:2, :]
model_.eval()
prediction = model_(inputs = test_sample)
print("Prediction: >>>>> ", prediction["outputs"], "\n Target: >>>>> ", trainY[0:2, :], "\n\n")
print('Epoch [{}/{}]: loss {:.4f}'.format(epoch + 1, self.num_epochs, total_loss / n_iter))
if __name__ == '__main__':
unittest.main()