-
Notifications
You must be signed in to change notification settings - Fork 33
/
AdamW.py
114 lines (99 loc) · 4.96 KB
/
AdamW.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
"""From built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import copy
from six.moves import zip
from keras import backend as K
from keras.utils.generic_utils import serialize_keras_object
from keras.utils.generic_utils import deserialize_keras_object
from keras.legacy import interfaces
from keras.optimizers import Optimizer
class AdamW(Optimizer):
"""AdamW optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
weight_decay: float >= 0. Weight decay (L2 penalty) (default: 0.025).
batch_size: integer >= 1. Batch size used during training.
samples_per_epoch: integer >= 1. Number of samples (training points) per epoch.
epochs: integer >= 1. Total number of epochs for training.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., weight_decay=0.025,
batch_size=1, samples_per_epoch=1,
epochs=1, **kwargs):
super(AdamW, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
self.batch_size = K.variable(batch_size, name='batch_size')
self.samples_per_epoch = K.variable(samples_per_epoch, name='samples_per_epoch')
self.epochs = K.variable(epochs, name='epochs')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
'''Bias corrections according to the Adam paper
'''
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
'''Schedule multiplier eta_t = 1 for simple AdamW
According to the AdamW paper, eta_t can be fixed, decay, or
also be used for warm restarts (AdamWR to come).
'''
eta_t = 1.
p_t = p - eta_t*(lr_t * m_t / (K.sqrt(v_t) + self.epsilon))
if self.weight_decay != 0:
'''Normalized weight decay according to the AdamW paper
'''
w_d = self.weight_decay*K.sqrt(self.batch_size/(self.samples_per_epoch*self.epochs))
p_t = p_t - eta_t*(w_d*p)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'weight_decay': float(K.get_value(self.weight_decay)),
'batch_size': int(K.get_value(self.batch_size)),
'samples_per_epoch': int(K.get_value(self.samples_per_epoch)),
'epochs': int(K.get_value(self.epochs)),
'epsilon': self.epsilon}
base_config = super(AdamW, self).get_config()
return dict(list(base_config.items()) + list(config.items()))