Skip to content

Commit

Permalink
release Binary Nets, release 1.8.1 (#423)
Browse files Browse the repository at this point in the history
* update bnn cnn, htanh

* bconv example

* release binary

* release 1.8.2
  • Loading branch information
zsdonghao authored and wagamamaz committed Mar 16, 2018
1 parent 46df7a7 commit ba71d18
Show file tree
Hide file tree
Showing 8 changed files with 98 additions and 21 deletions.
6 changes: 3 additions & 3 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@
# built documents.
#
# The short X.Y version.
version = '1.8.1'
version = '1.8.2'
# The full version, including alpha/beta/rc tags.
release = '1.8.1'
release = '1.8.2'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down Expand Up @@ -143,7 +143,7 @@
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'TensorLayer v1.8.1'
# html_title = 'TensorLayer v1.8.2'

# A shorter title for the navigation bar. Default is the same as html_title.
#
Expand Down
5 changes: 5 additions & 0 deletions docs/modules/activation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ For more complex activation, TensorFlow API will be required.
leaky_relu
swish
sign
hard_tanh
pixel_wise_softmax

Identity
Expand All @@ -53,6 +54,10 @@ Sign
---------------------
.. autofunction:: sign

Hard Tanh
---------------------
.. autofunction:: hard_tanh

Pixel-wise softmax
--------------------
.. autofunction:: pixel_wise_softmax
Expand Down
44 changes: 38 additions & 6 deletions docs/modules/layers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ At the end, for a layer with parameters, we also append the parameters into ``al
name ='simple_dense',
):
# check layer name (fixed)
Layer.__init__(self, name=name)
Layer.__init__(self, layer=layer, name=name)
# the input of this layer is the output of previous layer (fixed)
self.inputs = layer.outputs
Expand All @@ -169,11 +169,6 @@ At the end, for a layer with parameters, we also append the parameters into ``al
# tensor operation
self.outputs = act(tf.matmul(self.inputs, W) + b)
# get stuff from previous layer (fixed)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
# update layer (customized)
self.all_layers.extend( [self.outputs] )
self.all_params.extend( [W, b] )
Expand Down Expand Up @@ -336,6 +331,11 @@ Layer list

SlimNetsLayer

BinaryDenseLayer
BinaryConv2d
SignLayer
ScaleLayer

PReluLayer

MultiplexerLayer
Expand Down Expand Up @@ -799,6 +799,38 @@ see `Slim-model <https://github.com/tensorflow/models/tree/master/research/slim>
.. autoclass:: KerasLayer


Binary Nets
------------------

Read Me
^^^^^^^^^^^^^^

This is an experimental API package for building Binary Nets.
We are using matrix multiplication rather than add-minus and bit-count operation at the moment.
Therefore, these APIs would not speed up the inferencing, for production, you can train model via TensorLayer and deploy the model into other customized C/C++ implementation (We probably provide users an extra C/C++ binary net framework that can load model from TensorLayer).

Note that, these experimental APIs can be changed in anytime.

Binarized Dense
^^^^^^^^^^^^^^^^^
.. autoclass:: BinaryDenseLayer


Binarized Conv2d
^^^^^^^^^^^^^^^^^^
.. autoclass:: BinaryConv2d


Sign
^^^^^^^^^^^^^^
.. autoclass:: SignLayer


Scale
^^^^^^^^^^^^^^
.. autoclass:: ScaleLayer


Parametric activation layer
---------------------------

Expand Down
23 changes: 14 additions & 9 deletions example/tutorial_binarynet_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)

sess = tf.InteractiveSession()

Expand All @@ -17,25 +18,29 @@


def model(x, is_train=True, reuse=False):
# In BNN, all the layers inputs are binary, with the exception of the first layer.
# ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py
with tf.variable_scope("binarynet", reuse=reuse):
net = tl.layers.InputLayer(x, name='input')
net = tl.layers.BinaryConv2d(net, 32, (5, 5), (1, 1), padding='SAME', name='bcnn1')
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1')
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn1')

net = tl.layers.BatchNormLayer(net, is_train=is_train, name='bn')
net = tl.layers.SignLayer(net, name='sign2')
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), padding='SAME', name='bcnn2')
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2')
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn2')

net = tl.layers.SignLayer(net, name='sign2')
net = tl.layers.FlattenLayer(net, name='flatten')
net = tl.layers.DropoutLayer(net, 0.5, True, is_train, name='drop1')
# net = tl.layers.DenseLayer(net, 256, act=tf.nn.relu, name='dense')
net = tl.layers.DropoutLayer(net, 0.8, True, is_train, name='drop1')
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryDenseLayer(net, 256, name='dense')
net = tl.layers.DropoutLayer(net, 0.5, True, is_train, name='drop2')
# net = tl.layers.DenseLayer(net, 10, act=tf.identity, name='output')
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn3')

net = tl.layers.DropoutLayer(net, 0.8, True, is_train, name='drop2')
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryDenseLayer(net, 10, name='bout')
# net = tl.layers.ScaleLayer(net, name='scale')
net = tl.layers.BatchNormLayer(net, is_train=is_train, name='bno')
return net


Expand Down Expand Up @@ -66,7 +71,7 @@ def model(x, is_train=True, reuse=False):
n_epoch = 200
print_freq = 5

# print(sess.run(net_test.all_params)) # print real value of parameters
# print(sess.run(net_test.all_params)) # print real values of parameters

for epoch in range(n_epoch):
start_time = time.time()
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

setup(
name="tensorlayer",
version="1.8.1",
version="1.8.2",
include_package_data=True,
author='TensorLayer Contributors',
author_email='[email protected]',
Expand Down
2 changes: 1 addition & 1 deletion tensorlayer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
act = activation
vis = visualize

__version__ = "1.8.1"
__version__ = "1.8.2"

global_flag = {}
global_dict = {}
23 changes: 23 additions & 0 deletions tensorlayer/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,28 @@ def sign(x): # https://github.com/AngusG/tensorflow-xnor-bnn/blob/master/models
# return tf.sign(x), grad


def hard_tanh(x, name='htanh'):
"""Hard tanh activation function.
Which is a ramp function with low bound of -1 and upper bound of 1, shortcut is ``htanh`.
Parameters
----------
x : Tensor
input.
name : str
The function name (optional).
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
# with tf.variable_scope("hard_tanh"):
return tf.clip_by_value(x, -1, 1, name=name)


@deprecated("2018-06-30", "This API will be deprecated soon as tf.nn.softmax can do the same thing.")
def pixel_wise_softmax(x, name='pixel_wise_softmax'):
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Expand Down Expand Up @@ -204,3 +226,4 @@ def pixel_wise_softmax(x, name='pixel_wise_softmax'):
# Alias
linear = identity
lrelu = leaky_relu
htanh = hard_tanh
14 changes: 13 additions & 1 deletion tensorlayer/layers/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@

__all__ = [
'BinaryDenseLayer',
'BinaryConv2d',
'SignLayer',
'ScaleLayer',
'BinaryConv2d',
]


Expand Down Expand Up @@ -142,6 +142,18 @@ class BinaryConv2d(Layer):
name : str
A unique layer name.
Examples
---------
>>> net = tl.layers.InputLayer(x, name='input')
>>> net = tl.layers.BinaryConv2d(net, 32, (5, 5), (1, 1), padding='SAME', name='bcnn1')
>>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1')
>>> net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn1')
...
>>> net = tl.layers.SignLayer(net)
>>> net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), padding='SAME', name='bcnn2')
>>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2')
>>> net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn2')
"""

def __init__(
Expand Down

0 comments on commit ba71d18

Please sign in to comment.