-
Notifications
You must be signed in to change notification settings - Fork 1
/
models.py
65 lines (46 loc) · 1.87 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import torch
from torch import nn
class BaselineDNN(nn.Module):
"""
1. We embed the words in the input texts using an embedding layer
2. We compute the min, mean, max of the word embeddings in each sample
and use it as the feature representation of the sequence.
4. We project with a linear layer the representation
to the number of classes.ngth)
"""
def __init__(self, output_size, embeddings, trainable_emb=False):
"""
Args:
output_size(int): the number of classes
embeddings(bool): the 2D matrix with the pretrained embeddings
trainable_emb(bool): train (finetune) or freeze the weights
the embedding layer
"""
super(BaselineDNN, self).__init__()
# 1 - define the embedding layer
... # EX4
# 2 - initialize the weights of our Embedding layer
# from the pretrained word embeddings
... # EX4
# 3 - define if the embedding layer will be frozen or finetuned
... # EX4
# 4 - define a non-linear transformation of the representations
... # EX5
# 5 - define the final Linear layer which maps
# the representations to the classes
... # EX5
def forward(self, x, lengths):
"""
This is the heart of the model.
This function, defines how the data passes through the network.
Returns: the logits for each class
"""
# 1 - embed the words, using the embedding layer
embeddings = ... # EX6
# 2 - construct a sentence representation out of the word embeddings
representations = ... # EX6
# 3 - transform the representations to new ones.
representations = ... # EX6
# 4 - project the representations to classes using a linear layer
logits = ... # EX6
return logits