diff --git a/.travis.yml b/.travis.yml index 533d90437..f5675dc2c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ env: # Backward Compatibility in insured for release less than 1 year old. # https://pypi.org/project/tensorflow/#history matrix: + - _TF_VERSION=1.9.0rc0 # Remove on Apr 28, 2019 - _TF_VERSION=1.8.0 # Remove on Apr 28, 2019 - _TF_VERSION=1.7.1 # Remove on May 08, 2019 - _TF_VERSION=1.7.0 # Remove on Mar 29, 2019 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2af219c40..d4bdbe61f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ To release a new version, please update the changelog as followed: - Decorator: - `tl.decorators` API created including `deprecated_alias` and `private_method` (PR #660) - `tl.decorators` API enriched with `protected_method` (PR #675) + - `tl.decorators` API enriched with `deprecated` directly raising warning and modifying documentation (PR #691) - Docker: - Containers for each release and for each PR merged on master built (PR #648) - Containers built in the following configurations (PR #648): @@ -90,7 +91,7 @@ To release a new version, please update the changelog as followed: - py3 + cpu - py3 + gpu - Documentation: - - Clean README (PR #677) + - Clean README.md (PR #677) - Release semantic version added on index page (PR #633) - Optimizers page added (PR #636) - `AMSGrad` added on Optimizers page added (PR #636) @@ -123,7 +124,7 @@ To release a new version, please update the changelog as followed: - The document of LambdaLayer for linking it with ElementwiseLambdaLayer (PR #587) - RTD links point to stable documentation instead of latest used for development (PR #633) - TF Version older than 1.6.0 are officially unsupported and raises an exception (PR #644) -- Readme Badges Updated with Support Python and Tensorflow Versions (PR #644) +- README.md Badges Updated with Support Python and Tensorflow Versions (PR #644) - TL logging API has been consistent with TF logging API and thread-safe (PR #645) - Relative Imports changed for absolute imports (PR #657) - `tl.files` refactored into a directory with numerous files (PR #657) @@ -140,6 +141,7 @@ To release a new version, please update the changelog as followed: - Input Layers have been removed from `tl.layers.core` and added to `tl.layers.inputs` (PR #675) - Input Layers are now considered as true layers in the graph (they represent a placeholder), unittests have been updated (PR #675) - Layer API is simplified, with automatic feeding `prev_layer` into `self.inputs` (PR #675) +- Complete Documentation Refactoring and Reorganization (namely Layer APIs) (PR #691) ### Deprecated - `tl.layers.TimeDistributedLayer` argurment `args` is deprecated in favor of `layer_args` (PR #667) @@ -177,7 +179,7 @@ To release a new version, please update the changelog as followed: ### Contributors - @lgarithm: #563 -- @DEKHTIARJonathan: #573 #574 #575 #580 #633 #635 #636 #639 #644 #645 #648 #657 #667 #658 #659 #660 #661 #666 #667 #672 #675 #683 #686 #687 #690 #692 +- @DEKHTIARJonathan: #573 #574 #575 #580 #633 #635 #636 #639 #644 #645 #648 #657 #667 #658 #659 #660 #661 #666 #667 #672 #675 #683 #686 #687 #690 #691 #692 - @2wins: #560 #566 #662 - @One-sixth: #579 - @zsdonghao: #587 #588 #639 #685 #697 @@ -208,7 +210,7 @@ To release a new version, please update the changelog as followed: - py3 + cpu - py3 + gpu - Documentation: - - Clean README (PR #677) + - Clean README.md (PR #677) - Release semantic version added on index page (PR #633) - Optimizers page added (PR #636) - `AMSGrad` added on Optimizers page added (PR #636) @@ -236,7 +238,7 @@ To release a new version, please update the changelog as followed: - The document of LambdaLayer for linking it with ElementwiseLambdaLayer (PR #587) - RTD links point to stable documentation instead of latest used for development (PR #633) - TF Version older than 1.6.0 are officially unsupported and raises an exception (PR #644) -- Readme Badges Updated with Support Python and Tensorflow Versions (PR #644) +- README.md Badges Updated with Support Python and Tensorflow Versions (PR #644) - TL logging API has been consistent with TF logging API and thread-safe (PR #645) - Relative Imports changed for absolute imports (PR #657) - `tl.files` refactored into a directory with numerous files (PR #657) @@ -337,5 +339,5 @@ To release a new version, please update the changelog as followed: @zsdonghao @luomai @DEKHTIARJonathan [Unreleased]: https://github.com/tensorlayer/tensorlayer/compare/1.8.5...master -[1.8.6]: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc5...1.8.5 +[1.8.6]: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc6...1.8.5 [1.8.5]: https://github.com/tensorlayer/tensorlayer/compare/1.8.4...1.8.5 \ No newline at end of file diff --git a/README.md b/README.md index 8a4ba3e1c..e9c812520 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ![PyPI Stable Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer.svg?label=PyPI%20-%20Release) ![PyPI RC Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer/all.svg?label=PyPI%20-%20Pre-Release) -[![Github commits (since latest release)](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg)](https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc5...master) +[![Github commits (since latest release)](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg)](https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc6...master) [![PyPI - Python Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg)](https://pypi.org/project/tensorlayer/) [![Supported TF Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/badge/tensorflow-1.6.0+-blue.svg)](https://github.com/tensorflow/tensorflow/releases) diff --git a/README.rst b/README.rst index 840a2ad96..051a398da 100644 --- a/README.rst +++ b/README.rst @@ -1,95 +1,14 @@ -.. raw:: html +|TENSORLAYER-LOGO| - -
- -
-
+|Awesome| |Documentation-EN| |Documentation-CN| |Book-CN| |Downloads| -.. image:: https://awesome.re/mentioned-badge.svg - :target: https://github.com/tensorlayer/awesome-tensorlayer +|PyPI| |PyPI-Prerelease| |Commits-Since| |Python| |TensorFlow| -.. image:: https://img.shields.io/badge/documentation-english-blue.svg - :target: https://tensorlayer.readthedocs.io/ +|Travis| |Docker| |RTD-EN| |RTD-CN| |PyUP| |Docker-Pulls| |Code-Quality| -.. image:: https://img.shields.io/badge/documentation-中文-blue.svg - :target: https://tensorlayercn.readthedocs.io/ -.. image:: https://img.shields.io/badge/book-中文-blue.svg - :target: http://www.broadview.com.cn/book/5059/ - -.. image:: http://pepy.tech/badge/tensorlayer - :target: http://pepy.tech/project/tensorlayer - - -.. raw:: html - -
- -.. raw:: html - - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer.svg?label=PyPI%20-%20Release - :target: https://pypi.org/project/tensorlayer/ - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer/all.svg?label=PyPI%20-%20Pre-Release - :target: https://pypi.org/project/tensorlayer/ - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg - :target: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc5...master - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg - :target: https://pypi.org/project/tensorlayer/ - -.. image:: https://img.shields.io/badge/tensorflow-1.6.0+-blue.svg - :target: https://github.com/tensorflow/tensorflow/releases - - -.. raw:: html - -
- -.. raw:: html - - - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/travis/tensorlayer/tensorlayer/master.svg?label=Travis - :target: https://travis-ci.org/tensorlayer/tensorlayer - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/circleci/project/github/tensorlayer/tensorlayer/master.svg?label=Docker%20Build - :target: https://circleci.com/gh/tensorlayer/tensorlayer/tree/master - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayer/latest.svg?label=ReadTheDocs-EN - :target: https://tensorlayer.readthedocs.io/ - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayercn/latest.svg?label=ReadTheDocs-CN - :target: https://tensorlayercn.readthedocs.io/ - -.. image:: https://pyup.io/repos/github/tensorlayer/tensorlayer/shield.svg - :target: https://pyup.io/repos/github/tensorlayer/tensorlayer/ - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/docker/pulls/tensorlayer/tensorlayer.svg - :target: https://hub.docker.com/r/tensorlayer/tensorlayer/ - -.. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/codacy/grade/ca2a29ddcf7445588beff50bee5406d9.svg - :target: https://app.codacy.com/app/tensorlayer/tensorlayer - - - -.. raw:: html - -

- - -
- -
-
- -
- -.. raw:: html +|JOIN-SLACK-LOGO| TensorLayer is a novel TensorFlow-based deep learning and reinforcement learning library designed for researchers and engineers. It provides a @@ -232,3 +151,47 @@ License ======= TensorLayer is released under the Apache 2.0 license. + + +.. |TENSORLAYER-LOGO| image:: https://raw.githubusercontent.com/tensorlayer/tensorlayer/master/img/tl_transparent_logo.png + :target: https://tensorlayer.readthedocs.io/ +.. |JOIN-SLACK-LOGO| image:: https://raw.githubusercontent.com/tensorlayer/tensorlayer/master/img/join_slack.png + :target: https://join.slack.com/t/tensorlayer/shared_invite/enQtMjUyMjczMzU2Njg4LWI0MWU0MDFkOWY2YjQ4YjVhMzI5M2VlZmE4YTNhNGY1NjZhMzUwMmQ2MTc0YWRjMjQzMjdjMTg2MWQ2ZWJhYzc + +.. |Awesome| image:: https://awesome.re/mentioned-badge.svg + :target: https://github.com/tensorlayer/awesome-tensorlayer +.. |Documentation-EN| image:: https://img.shields.io/badge/documentation-english-blue.svg + :target: https://tensorlayer.readthedocs.io/ +.. |Documentation-CN| image:: https://img.shields.io/badge/documentation-中文-blue.svg + :target: https://tensorlayercn.readthedocs.io/ +.. |Book-CN| image:: https://img.shields.io/badge/book-中文-blue.svg + :target: http://www.broadview.com.cn/book/5059/ +.. |Downloads| image:: http://pepy.tech/badge/tensorlayer + :target: http://pepy.tech/project/tensorlayer + + +.. |PyPI| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer.svg?label=PyPI%20-%20Release + :target: https://pypi.org/project/tensorlayer/ +.. |PyPI-Prerelease| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer/all.svg?label=PyPI%20-%20Pre-Release + :target: https://pypi.org/project/tensorlayer/ +.. |Commits-Since| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg + :target: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc5...master +.. |Python| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg + :target: https://pypi.org/project/tensorlayer/ +.. |TensorFlow| image:: https://img.shields.io/badge/tensorflow-1.6.0+-blue.svg + :target: https://github.com/tensorflow/tensorflow/releases + +.. |Travis| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/travis/tensorlayer/tensorlayer/master.svg?label=Travis + :target: https://travis-ci.org/tensorlayer/tensorlayer +.. |Docker| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/circleci/project/github/tensorlayer/tensorlayer/master.svg?label=Docker%20Build + :target: https://circleci.com/gh/tensorlayer/tensorlayer/tree/master +.. |RTD-EN| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayer/latest.svg?label=ReadTheDocs-EN + :target: https://tensorlayer.readthedocs.io/ +.. |RTD-CN| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayercn/latest.svg?label=ReadTheDocs-CN + :target: https://tensorlayercn.readthedocs.io/ +.. |PyUP| image:: https://pyup.io/repos/github/tensorlayer/tensorlayer/shield.svg + :target: https://pyup.io/repos/github/tensorlayer/tensorlayer/ +.. |Docker-Pulls| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/docker/pulls/tensorlayer/tensorlayer.svg + :target: https://hub.docker.com/r/tensorlayer/tensorlayer/ +.. |Code-Quality| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/codacy/grade/ca2a29ddcf7445588beff50bee5406d9.svg + :target: https://app.codacy.com/app/tensorlayer/tensorlayer diff --git a/docs/index.rst b/docs/index.rst index 1fabe087a..06919e582 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,7 +12,7 @@ Welcome to TensorLayer **Good News:** We won the **Best Open Source Software Award** `@ACM Multimedia (MM) 2017 `_. `TensorLayer`_ is a Deep Learning (DL) and Reinforcement Learning (RL) library extended from `Google TensorFlow `_. It provides popular DL and RL modules that can be easily customized and assembled for tackling real-world machine learning problems. -More details can be found `here `_. +More details can be found `here `_. .. note:: If you got problem to read the docs online, you could download the repository @@ -28,12 +28,14 @@ to the library as a developer. .. toctree:: :maxdepth: 2 + :caption: Starting with TensorLayer user/installation user/tutorial user/example - user/development - user/more + user/contributing + user/get_involved + user/faq API Reference ------------- @@ -43,6 +45,7 @@ method, this part of the documentation is for you. .. toctree:: :maxdepth: 2 + :caption: Stable Functionalities modules/activation modules/array_ops @@ -50,14 +53,19 @@ method, this part of the documentation is for you. modules/distributed modules/files modules/iterate + modules/layers modules/models modules/nlp - modules/layers modules/optimizers modules/prepro modules/rein modules/utils modules/visualize + +.. toctree:: + :maxdepth: 2 + :caption: Unstable Functionalities + modules/db @@ -68,6 +76,7 @@ TensorLayer provides a handy command-line tool `tl` to perform some common tasks .. toctree:: :maxdepth: 2 + :caption: Command Line Interface modules/cli @@ -80,5 +89,5 @@ Indices and tables * :ref:`search` -.. _GitHub: https://github.com/zsdonghao/tensorlayer -.. _TensorLayer: https://github.com/zsdonghao/tensorlayer/ +.. _GitHub: https://github.com/tensorlayer/tensorlayer +.. _TensorLayer: https://github.com/tensorlayer/tensorlayer/ diff --git a/docs/modules/distributed.rst b/docs/modules/distributed.rst index c3d96be5d..fd3917d9b 100644 --- a/docs/modules/distributed.rst +++ b/docs/modules/distributed.rst @@ -4,7 +4,7 @@ API - Distributed Training (Alpha release - usage might change later) Helper sessions and methods to run a distributed training. -Check this `minst example `_. +Check this `minst example `_. .. automodule:: tensorlayer.distributed diff --git a/docs/modules/layers.rst b/docs/modules/layers.rst index 10b05635e..fcef8db9d 100644 --- a/docs/modules/layers.rst +++ b/docs/modules/layers.rst @@ -4,6 +4,33 @@ API - Layers .. automodule:: tensorlayer.layers +Name Scope and Sharing Parameters +--------------------------------- + +These functions help you to reuse parameters for different inference (graph), and get a +list of parameters by given name. About TensorFlow parameters sharing click `here `__. + +Get variables with name +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: get_variables_with_name + +Get layers with name +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: get_layers_with_name + +Enable layer name reuse +^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: set_name_reuse + +Print variables +^^^^^^^^^^^^^^^^^^ +.. autofunction:: print_all_variables + +Initialize variables +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: initialize_global_variables + + Understanding the Basic Layer ----------------------------- @@ -95,129 +122,9 @@ In case for evaluating and testing, you can disable all dropout layers as follow For more details, please read the MNIST examples in the example folder. - -Customizing Layers ------------------- - -A Simple Layer -^^^^^^^^^^^^^^ - -To implement a custom layer in TensorLayer, you will have to write a Python class -that subclasses Layer and implement the ``outputs`` expression. - -The following is an example implementation of a layer that multiplies its input by 2: - -.. code-block:: python - - class DoubleLayer(Layer): - def __init__( - self, - layer = None, - name ='double_layer', - ): - # check layer name (fixed) - Layer.__init__(self, layer=layer, name=name) - - # the input of this layer is the output of previous layer (fixed) - self.inputs = layer.outputs - - # operation (customized) - self.outputs = self.inputs * 2 - - # update layer (customized) - self.all_layers.append(self.outputs) - - -Your Dense Layer -^^^^^^^^^^^^^^^^ - -Before creating your own TensorLayer layer, let's have a look at the Dense layer. -It creates a weight matrix and a bias vector if not exists, and then implements -the output expression. -At the end, for a layer with parameters, we also append the parameters into ``all_params``. - -.. code-block:: python - - class MyDenseLayer(Layer): - def __init__( - self, - layer = None, - n_units = 100, - act = tf.nn.relu, - name ='simple_dense', - ): - # check layer name (fixed) - Layer.__init__(self, layer=layer, name=name) - - # the input of this layer is the output of previous layer (fixed) - self.inputs = layer.outputs - - # print out info (customized) - print(" MyDenseLayer %s: %d, %s" % (self.name, n_units, act)) - - # operation (customized) - n_in = int(self.inputs._shape[-1]) - with tf.variable_scope(name) as vs: - # create new parameters - W = tf.get_variable(name='W', shape=(n_in, n_units)) - b = tf.get_variable(name='b', shape=(n_units)) - # tensor operation - self.outputs = act(tf.matmul(self.inputs, W) + b) - - # update layer (customized) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [W, b] ) - - -Modifying Pre-train Behaviour -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Greedy layer-wise pretraining is an important task for deep neural network -initialization, while there are many kinds of pre-training methods according -to different network architectures and applications. - -For example, the pre-train process of `Vanilla Sparse Autoencoder `_ -can be implemented by using KL divergence (for sigmoid) as the following code, -but for `Deep Rectifier Network `_, -the sparsity can be implemented by using the L1 regularization of activation output. - -.. code-block:: python - - # Vanilla Sparse Autoencoder - beta = 4 - rho = 0.15 - p_hat = tf.reduce_mean(activation_out, reduction_indices = 0) - KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) - + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) ) - - -There are many pre-train methods, for this reason, TensorLayer provides a simple way to modify or design your -own pre-train method. For Autoencoder, TensorLayer uses ``ReconLayer.__init__()`` -to define the reconstruction layer and cost function, to define your own cost -function, just simply modify the ``self.cost`` in ``ReconLayer.__init__()``. -To creat your own cost expression please read `Tensorflow Math `_. -By default, ``ReconLayer`` only updates the weights and biases of previous 1 -layer by using ``self.train_params = self.all _params[-4:]``, where the 4 -parameters are ``[W_encoder, b_encoder, W_decoder, b_decoder]``, where -``W_encoder, b_encoder`` belong to previous DenseLayer, ``W_decoder, b_decoder`` -belong to this ReconLayer. -In addition, if you want to update the parameters of previous 2 layers at the same time, simply modify ``[-4:]`` to ``[-6:]``. - - -.. code-block:: python - - ReconLayer.__init__(...): - ... - self.train_params = self.all_params[-4:] - ... - self.cost = mse + L1_a + L2_w - - - - - - - +.. ----------------------------------------------------------- +.. Layer List +.. ----------------------------------------------------------- Layer list ---------- @@ -350,225 +257,532 @@ Layer list MultiplexerLayer - flatten_reshape clear_layers_name initialize_rnn_state list_remove_repeat merge_networks +.. ----------------------------------------------------------- +.. Customizing Layers +.. ----------------------------------------------------------- -Name Scope and Sharing Parameters ---------------------------------- - -These functions help you to reuse parameters for different inference (graph), and get a -list of parameters by given name. About TensorFlow parameters sharing click `here `__. - -Get variables with name -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: get_variables_with_name +Customizing Layers +------------------ -Get layers with name -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: get_layers_with_name +A Simple Layer +^^^^^^^^^^^^^^ -Enable layer name reuse -^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: set_name_reuse +To implement a custom layer in TensorLayer, you will have to write a Python class +that subclasses Layer and implement the ``outputs`` expression. -Print variables -^^^^^^^^^^^^^^^^^^ -.. autofunction:: print_all_variables +The following is an example implementation of a layer that multiplies its input by 2: -Initialize variables -^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: initialize_global_variables +.. code-block:: python -Basic layer ------------ -.. autoclass:: Layer + class DoubleLayer(Layer): + def __init__( + self, + layer = None, + name ='double_layer', + ): + # check layer name (fixed) + Layer.__init__(self, layer=layer, name=name) + # the input of this layer is the output of previous layer (fixed) + self.inputs = layer.outputs -Input layer ------------- -.. autoclass:: InputLayer - :members: + # operation (customized) + self.outputs = self.inputs * 2 -One-hot layer ----------------- -.. autoclass:: OneHotInputLayer + # update layer (customized) + self.all_layers.append(self.outputs) -Word Embedding Input layer ------------------------------ -Word2vec layer for training -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: Word2vecEmbeddingInputlayer +Your Dense Layer +^^^^^^^^^^^^^^^^ -Embedding Input layer -^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: EmbeddingInputlayer +Before creating your own TensorLayer layer, let's have a look at the Dense layer. +It creates a weight matrix and a bias vector if not exists, and then implements +the output expression. +At the end, for a layer with parameters, we also append the parameters into ``all_params``. -Average Embedding Input layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: AverageEmbeddingInputlayer +.. code-block:: python -Dense layer ------------- + class MyDenseLayer(Layer): + def __init__( + self, + layer = None, + n_units = 100, + act = tf.nn.relu, + name ='simple_dense', + ): + # check layer name (fixed) + Layer.__init__(self, layer=layer, name=name) -Dense layer -^^^^^^^^^^^^^ -.. autoclass:: DenseLayer + # the input of this layer is the output of previous layer (fixed) + self.inputs = layer.outputs -Reconstruction layer for Autoencoder -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: ReconLayer - :members: + # print out info (customized) + print(" MyDenseLayer %s: %d, %s" % (self.name, n_units, act)) -Noise layer ------------- + # operation (customized) + n_in = int(self.inputs._shape[-1]) + with tf.variable_scope(name) as vs: + # create new parameters + W = tf.get_variable(name='W', shape=(n_in, n_units)) + b = tf.get_variable(name='b', shape=(n_units)) + # tensor operation + self.outputs = act(tf.matmul(self.inputs, W) + b) -Dropout layer -^^^^^^^^^^^^^^^^ -.. autoclass:: DropoutLayer + # update layer (customized) + self.all_layers.extend( [self.outputs] ) + self.all_params.extend( [W, b] ) -Gaussian noise layer -^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: GaussianNoiseLayer -Dropconnect + Dense layer -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: DropconnectDenseLayer +Modifying Pre-train Behaviour +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Convolutional layer (Pro) --------------------------- +Greedy layer-wise pretraining is an important task for deep neural network +initialization, while there are many kinds of pre-training methods according +to different network architectures and applications. -1D Convolution -^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: Conv1dLayer +For example, the pre-train process of `Vanilla Sparse Autoencoder `_ +can be implemented by using KL divergence (for sigmoid) as the following code, +but for `Deep Rectifier Network `_, +the sparsity can be implemented by using the L1 regularization of activation output. -2D Convolution -^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: Conv2dLayer +.. code-block:: python -2D Deconvolution -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: DeConv2dLayer + # Vanilla Sparse Autoencoder + beta = 4 + rho = 0.15 + p_hat = tf.reduce_mean(activation_out, reduction_indices = 0) + KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) ) -3D Convolution -^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: Conv3dLayer -3D Deconvolution -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: DeConv3dLayer +There are many pre-train methods, for this reason, TensorLayer provides a simple way to modify or design your +own pre-train method. For Autoencoder, TensorLayer uses ``ReconLayer.__init__()`` +to define the reconstruction layer and cost function, to define your own cost +function, just simply modify the ``self.cost`` in ``ReconLayer.__init__()``. +To creat your own cost expression please read `Tensorflow Math `_. +By default, ``ReconLayer`` only updates the weights and biases of previous 1 +layer by using ``self.train_params = self.all _params[-4:]``, where the 4 +parameters are ``[W_encoder, b_encoder, W_decoder, b_decoder]``, where +``W_encoder, b_encoder`` belong to previous DenseLayer, ``W_decoder, b_decoder`` +belong to this ReconLayer. +In addition, if you want to update the parameters of previous 2 layers at the same time, simply modify ``[-4:]`` to ``[-6:]``. -2D UpSampling -^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: UpSampling2dLayer -2D DownSampling +.. code-block:: python + + ReconLayer.__init__(...): + ... + self.train_params = self.all_params[-4:] + ... + self.cost = mse + L1_a + L2_w + + +.. ----------------------------------------------------------- +.. Basic Layers +.. ----------------------------------------------------------- + +Basic layer +----------- + +.. autoclass:: Layer + +.. ----------------------------------------------------------- +.. Input Layers +.. ----------------------------------------------------------- + +Input Layers +--------------- + +Input Layer +^^^^^^^^^^^^^^^^ +.. autoclass:: InputLayer + +One-hot Input Layer +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: OneHotInputLayer + +Word2Vec Embedding Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Word2vecEmbeddingInputlayer + +Embedding Input layer ^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: DownSampling2dLayer +.. autoclass:: EmbeddingInputlayer -1D Atrous convolution -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: AtrousConv1dLayer +Average Embedding Input layer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AverageEmbeddingInputlayer -2D Atrous convolution -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: AtrousConv2dLayer +.. ----------------------------------------------------------- +.. Activation Layers +.. ----------------------------------------------------------- -2D Atrous transposed convolution -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: AtrousDeConv2dLayer +Activation Layers +--------------------------- + +PReLU Layer +^^^^^^^^^^^^^^^^^ +.. autoclass:: PReluLayer + + +PReLU6 Layer +^^^^^^^^^^^^^^^^^^ +.. autoclass:: PRelu6Layer + + +PTReLU6 Layer +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: PTRelu6Layer -Convolutional layer (Simplified) ------------------------------------ + +.. ----------------------------------------------------------- +.. Convolutional Layers +.. ----------------------------------------------------------- + +Convolutional Layers +--------------------- + +Simplified Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ For users don't familiar with TensorFlow, the following simplified functions may easier for you. We will provide more simplified functions later, but if you are good at TensorFlow, the professional APIs may better for you. -1D Convolution -^^^^^^^^^^^^^^^^^^^^^^^ +Conv1d +""""""""""""""""""""" .. autoclass:: Conv1d -2D Convolution -^^^^^^^^^^^^^^^^^^^^^^^ +Conv2d +""""""""""""""""""""" .. autoclass:: Conv2d -2D Deconvolution + +Simplified Deconvolutions ^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For users don't familiar with TensorFlow, the following simplified functions may easier for you. +We will provide more simplified functions later, but if you are good at TensorFlow, the professional +APIs may better for you. + +DeConv2d +""""""""""""""""""""" .. autoclass:: DeConv2d -3D Deconvolution -^^^^^^^^^^^^^^^^^^^^^^^^^^ +DeConv3d +""""""""""""""""""""" .. autoclass:: DeConv3d -2D Depthwise Conv -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Expert Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Conv1dLayer +""""""""""""""""""""" +.. autoclass:: Conv1dLayer + +Conv2dLayer +""""""""""""""""""""" +.. autoclass:: Conv2dLayer + +Conv3dLayer +""""""""""""""""""""" +.. autoclass:: Conv3dLayer + + +Expert Deconvolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DeConv2dLayer +""""""""""""""""""""" +.. autoclass:: DeConv2dLayer + +DeConv3dLayer +""""""""""""""""""""" +.. autoclass:: DeConv3dLayer + + +Atrous (De)Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +AtrousConv1dLayer +""""""""""""""""""""" +.. autofunction:: AtrousConv1dLayer + +AtrousConv2dLayer +""""""""""""""""""""" +.. autoclass:: AtrousConv2dLayer + +AtrousDeConv2dLayer +""""""""""""""""""""" +.. autoclass:: AtrousDeConv2dLayer + + +Binary (De)Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +BinaryConv2d +""""""""""""""""""""" +.. autoclass:: BinaryConv2d + + +Deformable Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DeformableConv2d +""""""""""""""""""""" +.. autoclass:: DeformableConv2d + + +Depthwise Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DepthwiseConv2d +""""""""""""""""""""" .. autoclass:: DepthwiseConv2d -1D Depthwise Separable Conv -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DoReFa Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DorefaConv2d +""""""""""""""""""""" +.. autoclass:: DorefaConv2d + + +Group Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +GroupConv2d +""""""""""""""""""""" +.. autoclass:: GroupConv2d + + +Separable Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SeparableConv1d +""""""""""""""""""""" .. autoclass:: SeparableConv1d -2D Depthwise Separable Conv -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +SeparableConv2d +""""""""""""""""""""" .. autoclass:: SeparableConv2d -2D Deformable Conv + +SubPixel Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SubpixelConv1d +""""""""""""""""""""" +.. autoclass:: SubpixelConv1d + +SubpixelConv2d +""""""""""""""""""""" +.. autoclass:: SubpixelConv2d + + +Ternary Convolutions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TernaryConv2d +""""""""""""""""""""" +.. autoclass:: TernaryConv2d + + + +.. ----------------------------------------------------------- +.. Dense Layers +.. ----------------------------------------------------------- + +Dense layer +------------ + +Binary Dense Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: BinaryDenseLayer + +Dense Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: DenseLayer + +DoReFa Dense Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: DorefaDenseLayer + +Drop Connect Dense Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: DropconnectDenseLayer + +Ternary Dense Layer +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: TernaryDenseLayer + +.. ----------------------------------------------------------- +.. Dropout Layer +.. ----------------------------------------------------------- + +Dropout Layers +------------------- +.. autoclass:: DropoutLayer + +.. ----------------------------------------------------------- +.. Extend Layers +.. ----------------------------------------------------------- + +Extend Layers +------------------- + +Expand Dims Layer +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: ExpandDimsLayer + + +Tile layer +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: TileLayer + + +.. ----------------------------------------------------------- +.. External Libraries Layers +.. ----------------------------------------------------------- + +External Libraries Layers +------------------------------ + +TF-Slim Layer +^^^^^^^^^^^^^^^^^^^ +TF-Slim models can be connected into TensorLayer. All Google's Pre-trained model can be used easily , +see `Slim-model `__. + +.. autoclass:: SlimNetsLayer + +Keras Layer +^^^^^^^^^^^^^^^^^^^ +Yes ! Keras models can be connected into TensorLayer! +see `tutorial_keras.py `_ . + +.. autoclass:: KerasLayer + +Estimator Layer +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: EstimatorLayer + +.. ----------------------------------------------------------- +.. Flow Control Layer +.. ----------------------------------------------------------- + +Flow Control Layer +---------------------- +.. autoclass:: MultiplexerLayer + +.. ----------------------------------------------------------- +.. Image Resampling Layers +.. ----------------------------------------------------------- + +Image Resampling Layers +------------------------- + +2D UpSampling ^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: DeformableConv2d +.. autoclass:: UpSampling2dLayer -2D Grouped Conv +2D DownSampling ^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: GroupConv2d +.. autoclass:: DownSampling2dLayer -Super-Resolution layer ------------------------- +.. ----------------------------------------------------------- +.. Lambda Layer +.. ----------------------------------------------------------- -1D Subpixel Convolution -^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: SubpixelConv1d +Lambda Layers +--------------- + +Lambda Layer +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: LambdaLayer -2D Subpixel Convolution +ElementWise Lambda Layer ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: SubpixelConv2d +.. autoclass:: ElementwiseLambdaLayer +.. ----------------------------------------------------------- +.. Merge Layer +.. ----------------------------------------------------------- -Spatial Transformer ------------------------ +Merge Layers +--------------- -2D Affine Transformation +Concat Layer +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: ConcatLayer + +ElementWise Layer +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: ElementwiseLayer + +.. ----------------------------------------------------------- +.. Noise Layers +.. ----------------------------------------------------------- + +Noise Layer +--------------- +.. autoclass:: GaussianNoiseLayer + +.. ----------------------------------------------------------- +.. Normalization Layers +.. ----------------------------------------------------------- + +Normalization layer +-------------------- + +For local response normalization as it does not have any weights and arguments, +you can also apply ``tf.nn.lrn`` on ``network.outputs``. + +Batch Normalization +^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: BatchNormLayer + +Local Response Normalization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: SpatialTransformer2dAffineLayer +.. autoclass:: LocalResponseNormLayer -2D Affine Transformation function -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: transformer +Instance Normalization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: InstanceNormLayer -Batch 2D Affine Transformation function -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: batch_transformer +Layer Normalization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: LayerNormLayer +.. ----------------------------------------------------------- +.. Object Detection Layers +.. ----------------------------------------------------------- -Pooling and Padding layers ---------------------------- +Object Detection Layer +------------------------ +.. autoclass:: ROIPoolingLayer -Padding (Pro) -^^^^^^^^^^^^^^ -Padding layer for any modes. +.. ----------------------------------------------------------- +.. Padding Layers +.. ----------------------------------------------------------- -.. autoclass:: PadLayer +Padding Layers +------------------------ -Pooling (Pro) -^^^^^^^^^^^^^^ -Pooling layer for any dimensions and any pooling functions. +Pad Layer (Expert API) +^^^^^^^^^^^^^^^^^^^^^^^^^ +Padding layer for any modes. -.. autoclass:: PoolLayer +.. autoclass:: PadLayer 1D Zero padding ^^^^^^^^^^^^^^^^^^^ @@ -582,6 +796,19 @@ Pooling layer for any dimensions and any pooling functions. ^^^^^^^^^^^^^^^^^^^ .. autoclass:: ZeroPad3d +.. ----------------------------------------------------------- +.. Pooling Layers +.. ----------------------------------------------------------- + +Padding Layers +------------------------ + +Pool Layer (Expert API) +^^^^^^^^^^^^^^^^^^^^^^^^^ +Pooling layer for any dimensions and any pooling functions. + +.. autoclass:: PoolLayer + 1D Max pooling ^^^^^^^^^^^^^^^^^^^ .. autoclass:: MaxPool1d @@ -630,132 +857,111 @@ Pooling layer for any dimensions and any pooling functions. ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: GlobalMeanPool3d +.. ----------------------------------------------------------- +.. Quantized Layers +.. ----------------------------------------------------------- -Normalization layer --------------------- - -For local response normalization as it does not have any weights and arguments, -you can also apply ``tf.nn.lrn`` on ``network.outputs``. - -Batch Normalization -^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: BatchNormLayer - -Local Response Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LocalResponseNormLayer - -Instance Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNormLayer - -Layer Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LayerNormLayer +Quantized Nets +------------------ -Object Detection -------------------- +This is an experimental API package for building Quantized Neural Networks. We are using matrix multiplication rather than add-minus and bit-count operation at the moment. Therefore, these APIs would not speed up the inferencing, for production, you can train model via TensorLayer and deploy the model into other customized C/C++ implementation (We probably provide users an extra C/C++ binary net framework that can load model from TensorLayer). -ROI layer -^^^^^^^^^^^ -.. autoclass:: ROIPoolingLayer +Note that, these experimental APIs can be changed in the future -Time distributed layer ------------------------- +Sign +^^^^^^^^^^^^^^ +.. autoclass:: SignLayer -.. autoclass:: TimeDistributedLayer +Scale +^^^^^^^^^^^^^^ +.. autoclass:: ScaleLayer +.. ----------------------------------------------------------- +.. Reconstruction Layer +.. ----------------------------------------------------------- +Recurrent Layers +--------------------- Fixed Length Recurrent layer -------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All recurrent layers can implement any type of RNN cell by feeding different cell function (LSTM, GRU etc). RNN layer -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autoclass:: RNNLayer Bidirectional layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""" .. autoclass:: BiRNNLayer - Recurrent Convolutional layer -------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Conv RNN Cell -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""" .. autoclass:: ConvRNNCell Basic Conv LSTM Cell -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""" .. autoclass:: BasicConvLSTMCell Conv LSTM layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""" .. autoclass:: ConvLSTMLayer - Advanced Ops for Dynamic RNN -------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ These operations usually be used inside Dynamic RNN layer, they can compute the sequence lengths for different situation and get the last RNN outputs by indexing. Output indexing -^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""" .. autofunction:: advanced_indexing_op Compute Sequence length 1 -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autofunction:: retrieve_seq_length_op Compute Sequence length 2 -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autofunction:: retrieve_seq_length_op2 Compute Sequence length 3 -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autofunction:: retrieve_seq_length_op3 Get Mask -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autofunction:: target_mask_op Dynamic RNN layer ----------------------- +^^^^^^^^^^^^^^^^^^^^^^ RNN layer -^^^^^^^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""""""""" .. autoclass:: DynamicRNNLayer Bidirectional layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""" .. autoclass:: BiDynamicRNNLayer - Sequence to Sequence ----------------------- +^^^^^^^^^^^^^^^^^^^^^^ Simple Seq2Seq -^^^^^^^^^^^^^^^^^ +""""""""""""""""" .. autoclass:: Seq2Seq -.. - PeekySeq2Seq - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - .. autoclass:: PeekySeq2Seq - - AttentionSeq2Seq - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - .. autoclass:: AttentionSeq2Seq - - +.. ----------------------------------------------------------- +.. Shape Layers +.. ----------------------------------------------------------- Shape layer ------------ @@ -772,41 +978,28 @@ Transpose layer ^^^^^^^^^^^^^^^^^ .. autoclass:: TransposeLayer +.. ----------------------------------------------------------- +.. Spatial Transformer Layers +.. ----------------------------------------------------------- -Lambda layer ---------------- - -.. autoclass:: LambdaLayer - -Merge layer -------------- - -Concat layer -^^^^^^^^^^^^^^ -.. autoclass:: ConcatLayer - - -Element-wise layer -^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: ElementwiseLayer - - -Element-wise lambda layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: ElementwiseLambdaLayer - +Spatial Transformer +----------------------- -Extend layer -------------- +2D Affine Transformation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: SpatialTransformer2dAffineLayer -Expand dims layer -^^^^^^^^^^^^^^^^^^^ -.. autoclass:: ExpandDimsLayer +2D Affine Transformation function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: transformer -Tile layer -^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: TileLayer +Batch 2D Affine Transformation function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: batch_transformer +.. ----------------------------------------------------------- +.. Stack Layers +.. ----------------------------------------------------------- Stack layer ------------- @@ -819,108 +1012,20 @@ Unstack layer ^^^^^^^^^^^^^^^ .. autoclass:: UnStackLayer -.. - Estimator layer - ------------------ - .. autoclass:: EstimatorLayer - - -Connect TF-Slim ------------------- - -TF-Slim models can be connected into TensorLayer. All Google's Pre-trained model can be used easily , -see `Slim-model `__. - -.. autoclass:: SlimNetsLayer - -.. - Connect Keras - ------------------ - - Yes ! Keras models can be connected into TensorLayer! see `tutorial_keras.py `_ . - - .. autoclass:: KerasLayer - - -Quantized Nets ------------------- - -Read Me -^^^^^^^^^^^^^^ - -This is an experimental API package for building Quantized Neural Networks. We are using matrix multiplication rather than add-minus and bit-count operation at the moment. Therefore, these APIs would not speed up the inferencing, for production, you can train model via TensorLayer and deploy the model into other customized C/C++ implementation (We probably provide users an extra C/C++ binary net framework that can load model from TensorLayer). - -Note that, these experimental APIs can be changed in the future +.. ----------------------------------------------------------- +.. Time Distributed Layer +.. ----------------------------------------------------------- -Binarized Dense -^^^^^^^^^^^^^^^^^ -.. autoclass:: BinaryDenseLayer - -Binarized Conv2d -^^^^^^^^^^^^^^^^^^ -.. autoclass:: BinaryConv2d - -Ternary Dense -^^^^^^^^^^^^^^^^^^ -.. autoclass:: TernaryDenseLayer - -Ternary Conv2d -^^^^^^^^^^^^^^^^^^ -.. autoclass:: TernaryConv2d - -Dorefa Dense -^^^^^^^^^^^^^^^^^^ -.. autoclass:: DorefaDenseLayer - -Dorefa Conv2d -^^^^^^^^^^^^^^^^^^ -.. autoclass:: DorefaConv2d - -Sign -^^^^^^^^^^^^^^ -.. autoclass:: SignLayer - -Scale -^^^^^^^^^^^^^^ -.. autoclass:: ScaleLayer - - -Parametric activation layer ---------------------------- - -PReLU Layer -^^^^^^^^^^^ -.. autoclass:: PReluLayer - - -PReLU6 Layer -^^^^^^^^^^^^ -.. autoclass:: PRelu6Layer - - -PTReLU6 Layer -^^^^^^^^^^^^^ -.. autoclass:: PTRelu6Layer - - -Flow control layer ----------------------- - -.. autoclass:: MultiplexerLayer - -.. - Wrapper - --------- - - Embedding + Attention + Seq2seq - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - .. autoclass:: EmbeddingAttentionSeq2seqWrapper - :members: +Time Distributed Layer +------------------------ +.. autoclass:: TimeDistributedLayer +.. ----------------------------------------------------------- +.. Helper Functions +.. ----------------------------------------------------------- -Helper functions +Helper Functions ------------------------ Flatten tensor diff --git a/docs/user/development.rst b/docs/user/contributing.rst similarity index 71% rename from docs/user/development.rst rename to docs/user/contributing.rst index dc51cab32..ea0ca41c5 100644 --- a/docs/user/development.rst +++ b/docs/user/contributing.rst @@ -1,29 +1,41 @@ -Development -=========== - -.. - The TensorLayer project was started by Hao Dong, Imperial College London in Jun - 2016. It is developed by a core team (in alphabetical order: - `Akara Supratak `_, - `Hao Dong `_, - `Simiao Yu `_) - and numerous additional contributors on `Release`_. - -TensorLayer is a major ongoing research project in Data Science Institute, Imperial College London. +.. _contributing: + +=============== +Contributing +=============== + +TensorLayer is a major ongoing research project in Data Science Institute, Imperial College London. The goal of the project is to develop a compositional language while complex learning systems can be build through composition of neural network modules. -The whole development is now participated by numerous contributors on `Release`_. -As an open-source project by we highly welcome contributions! -Every bit helps and will be credited. -.. - TensorLayer was initially developed as a part of project in Imperial College London. - Now it also maintained by numerous contributors on `GitHub`_. +Numerous contributors come from various horizons such as: Tsinghua University, Carnegie Mellon University, University of Technology of Compiegne, +Google, Microsoft, Bloomberg and etc. + +There are many functions need to be contributed such as Maxout, Neural Turing Machine, Attention, TensorLayer Mobile and etc. + +You can easily open a Pull Request (PR) on `GitHub`_, every little step counts and will be credited. +As an open-source project, we highly welcome and value contributions! + +**If you are interested in working with us, please contact us at:** `tensorlayer@gmail.com `_. + + +Project Maintainers +-------------------------- + + +The TensorLayer project was started by `Hao Dong `_ at Imperial College London in June 2016. -.. - As an open-source project by Researchers for Researchers and Engineers, +It is actively developed and maintained by the following people *(in alphabetical order)*: +- **Akara Supratak** (`@akaraspt `_) - ``_ +- **Fangde Liu** (`@fangde `_) - ``_ +- **Guo Li** (`@lgarithm `_) - ``_ +- **Hao Dong** (`@zsdonghao `_) - ``_ +- **Jonathan Dekhtiar** (`@DEKHTIARJonathan `_) - ``_ +- **Luo Mai** (`@luomai `_) - ``_ +- **Simiao Yu** (`@nebulaV `_) - ``_ +Numerous other contributors can be found in the `Github Contribution Graph `_. What to contribute @@ -161,6 +173,6 @@ help us reviewing it. If it is fixing an open issue, say, issue #123, add GitHub will close it when your request is merged. -.. _Release: https://github.com/zsdonghao/tensorlayer/releases -.. _GitHub: https://github.com/zsdonghao/tensorlayer +.. _Release: https://github.com/tensorlayer/tensorlayer/releases +.. _GitHub: https://github.com/tensorlayer/tensorlayer .. _our mailing list: hao.dong11@imperial.ac.uk diff --git a/docs/user/example.rst b/docs/user/example.rst index 642cddb87..bac5c3e77 100644 --- a/docs/user/example.rst +++ b/docs/user/example.rst @@ -7,19 +7,19 @@ Examples Basics ============ - - Multi-layer perceptron (MNIST). Classification task, see `tutorial_mnist_simple.py `__. - - Multi-layer perceptron (MNIST). Classification with dropout using iterator, see `method1 `__ (**use placeholder**) and `method2 `__ (**use reuse**). - - Denoising Autoencoder (MNIST). Classification task, see `tutorial_mnist.py `__. - - Stacked Denoising Autoencoder and Fine-Tuning (MNIST). A MLP classification task, see `tutorial_mnist.py `__. - - Convolutional Network (MNIST). Classification task, see `tutorial_mnist.py `__. - - Convolutional Network (CIFAR-10). Classification task, see `tutorial_cifar10.py `_ and `tutorial_cifar10_tfrecord.py `__. + - Multi-layer perceptron (MNIST). Classification task, see `tutorial_mnist_simple.py `__. + - Multi-layer perceptron (MNIST). Classification with dropout using iterator, see `method1 `__ (**use placeholder**) and `method2 `__ (**use reuse**). + - Denoising Autoencoder (MNIST). Classification task, see `tutorial_mnist.py `__. + - Stacked Denoising Autoencoder and Fine-Tuning (MNIST). A MLP classification task, see `tutorial_mnist.py `__. + - Convolutional Network (MNIST). Classification task, see `tutorial_mnist.py `__. + - Convolutional Network (CIFAR-10). Classification task, see `tutorial_cifar10.py `_ and `tutorial_cifar10_tfrecord.py `__. - TensorFlow dataset API for object detection see `here `__. - - Merge TF-Slim into TensorLayer. `tutorial_inceptionV3_tfslim.py `__. - - Merge Keras into TensorLayer. `tutorial_keras.py `__. - - Data augmentation with TFRecord. Effective way to load and pre-process data, see `tutorial_tfrecord*.py `__ and `tutorial_cifar10_tfrecord.py `__. - - Data augmentation with TensorLayer, see `tutorial_image_preprocess.py `__. - - Float 16 half-precision model, see `tutorial_mnist_float16.py `__. - - Distributed Training. `mnist `__ and `imagenet `__ by `jorgemf `__. + - Merge TF-Slim into TensorLayer. `tutorial_inceptionV3_tfslim.py `__. + - Merge Keras into TensorLayer. `tutorial_keras.py `__. + - Data augmentation with TFRecord. Effective way to load and pre-process data, see `tutorial_tfrecord*.py `__ and `tutorial_cifar10_tfrecord.py `__. + - Data augmentation with TensorLayer, see `tutorial_image_preprocess.py `__. + - Float 16 half-precision model, see `tutorial_mnist_float16.py `__. + - Distributed Training. `mnist `__ and `imagenet `__ by `jorgemf `__. Vision ================== @@ -37,44 +37,44 @@ Vision Adversarial Learning ======================== - - DCGAN (CelebA). Generating images by `Deep Convolutional Generative Adversarial Networks `__ by `zsdonghao `__. + - DCGAN (CelebA). Generating images by `Deep Convolutional Generative Adversarial Networks `__ by `zsdonghao `__. - `Generative Adversarial Text to Image Synthesis `__ by `zsdonghao `__. - `Unsupervised Image to Image Translation with Generative Adversarial Networks `__ by `zsdonghao `__. - `Improved CycleGAN `__ with resize-convolution by `luoxier `__. - - `Super Resolution GAN `__ by `zsdonghao `__. + - `Super Resolution GAN `__ by `zsdonghao `__. - `BEGAN: Boundary Equilibrium Generative Adversarial Networks `__ by `2wins `__. - `DAGAN: Fast Compressed Sensing MRI Reconstruction `__ by `nebulaV `__. Natural Language Processing ============================== - - Recurrent Neural Network (LSTM). Apply multiple LSTM to PTB dataset for language modeling, see `tutorial_ptb_lstm_state_is_tuple.py `__. - - Word Embedding (Word2vec). Train a word embedding matrix, see `tutorial_word2vec_basic.py `__. - - Restore Embedding matrix. Restore a pre-train embedding matrix, see `tutorial_generate_text.py `__. - - Text Generation. Generates new text scripts, using LSTM network, see `tutorial_generate_text.py `__. + - Recurrent Neural Network (LSTM). Apply multiple LSTM to PTB dataset for language modeling, see `tutorial_ptb_lstm_state_is_tuple.py `__. + - Word Embedding (Word2vec). Train a word embedding matrix, see `tutorial_word2vec_basic.py `__. + - Restore Embedding matrix. Restore a pre-train embedding matrix, see `tutorial_generate_text.py `__. + - Text Generation. Generates new text scripts, using LSTM network, see `tutorial_generate_text.py `__. - Chinese Text Anti-Spam by `pakrchen `__. - - `Chatbot in 200 lines of code `__ for `Seq2Seq `__. - - FastText Sentence Classification (IMDB), see `tutorial_imdb_fasttext.py `__ by `tomtung `__. + - `Chatbot in 200 lines of code `__ for `Seq2Seq `__. + - FastText Sentence Classification (IMDB), see `tutorial_imdb_fasttext.py `__ by `tomtung `__. Reinforcement Learning ============================== - - Policy Gradient / Network (Atari Ping Pong), see `tutorial_atari_pong.py `__. - - Deep Q-Network (Frozen lake), see `tutorial_frozenlake_dqn.py `__. - - Q-Table learning algorithm (Frozen lake), see `tutorial_frozenlake_q_table.py `__. + - Policy Gradient / Network (Atari Ping Pong), see `tutorial_atari_pong.py `__. + - Deep Q-Network (Frozen lake), see `tutorial_frozenlake_dqn.py `__. + - Q-Table learning algorithm (Frozen lake), see `tutorial_frozenlake_q_table.py `__. - Asynchronous Policy Gradient using TensorDB (Atari Ping Pong) by `nebulaV `__. - - AC for discrete action space (Cartpole), see `tutorial_cartpole_ac.py `__. - - A3C for continuous action space (Bipedal Walker), see `tutorial_bipedalwalker_a3c*.py `__. + - AC for discrete action space (Cartpole), see `tutorial_cartpole_ac.py `__. + - A3C for continuous action space (Bipedal Walker), see `tutorial_bipedalwalker_a3c*.py `__. - `DAGGER `__ for (`Gym Torcs `__) by `zsdonghao `__. - `TRPO `__ for continuous and discrete action space by `jjkke88 `__. Pretrained Models ================== - - VGG 16 (ImageNet). Classification task, see `tl.models.VGG16 `__ or `tutorial_vgg16.py `__. - - VGG 19 (ImageNet). Classification task, see `tutorial_vgg19.py `__. - - InceptionV3 (ImageNet). Classification task, see `tutorial_inceptionV3_tfslim.py `__. - - SqueezeNet (ImageNet). Model compression, see `tl.models.SqueezeNetV1 `__ or `tutorial_squeezenet.py `__. + - VGG 16 (ImageNet). Classification task, see `tl.models.VGG16 `__ or `tutorial_vgg16.py `__. + - VGG 19 (ImageNet). Classification task, see `tutorial_vgg19.py `__. + - InceptionV3 (ImageNet). Classification task, see `tutorial_inceptionV3_tfslim.py `__. + - SqueezeNet (ImageNet). Model compression, see `tl.models.SqueezeNetV1 `__ or `tutorial_squeezenet.py `__. - MobileNet (ImageNet). Model compression, see `tl.models.MobileNetV1 `__ or `tutorial_mobilenet.py `__. - More CNN implementations of `TF-Slim `__ can be connected to TensorLayer via SlimNetsLayer. - All pretrained models in `pretrained-models `__. @@ -111,7 +111,7 @@ Miscellaneous -.. _GitHub: https://github.com/zsdonghao/tensorlayer +.. _GitHub: https://github.com/tensorlayer/tensorlayer .. _Deeplearning Tutorial: http://deeplearning.stanford.edu/tutorial/ .. _Convolutional Neural Networks for Visual Recognition: http://cs231n.github.io/ .. _Neural Networks and Deep Learning: http://neuralnetworksanddeeplearning.com/ diff --git a/docs/user/more.rst b/docs/user/faq.rst similarity index 67% rename from docs/user/more.rst rename to docs/user/faq.rst index b9ded5a58..8b181322b 100644 --- a/docs/user/more.rst +++ b/docs/user/faq.rst @@ -1,21 +1,13 @@ -.. _more: +.. _faq: ============ -More +FAQ ============ -.. - Competitions - ============ - - Coming soon - -FAQ -=========== - How to effectively learn TensorLayer ------------------------------------------- +===================================== + No matter what stage you are in, we recommend you to spend just 10 minutes to read the source code of TensorLayer and the `Understand layer / Your layer `__ in this website, you will find the abstract methods are very simple for everyone. @@ -23,27 +15,27 @@ Reading the source codes helps you to better understand TensorFlow and allows you to implement your own methods easily. For discussion, we recommend `Gitter `__, `Help Wanted Issues `__, -`QQ group `__ +`QQ group `__ and `Wechat group `__. Beginner -^^^^^^^^^^^^^^ +----------- For people who new to deep learning, the contirbutors provided a number of tutorials in this website, these tutorials will guide you to understand autoencoder, convolutional neural network, recurrent neural network, word embedding and deep reinforcement learning and etc. If your already understand the basic of deep learning, we recommend you to skip the tutorials and read the example codes on `Github `__ , then implement an example from scratch. Engineer -^^^^^^^^^^^^^ +------------ For people from industry, the contirbutors provided mass format-consistent examples covering computer vision, natural language processing and reinforcement learning. Besides, there are also many TensorFlow users already implemented product-level examples including image captioning, semantic/instance segmentation, machine translation, chatbot and etc, which can be found online. It is worth noting that a wrapper especially for computer vision `Tf-Slim `__ can be connected with TensorLayer seamlessly. Therefore, you may able to find the examples that can be used in your project. Researcher -^^^^^^^^^^^^^ +------------- For people from academic, TensorLayer was originally developed by PhD students who facing issues with other libraries on implement novel algorithm. Installing TensorLayer in editable mode is recommended, so you can extend your methods in TensorLayer. For researches related to image such as image captioning, visual QA and etc, you may find it is very helpful to use the existing `Tf-Slim pre-trained models `__ with TensorLayer (a specially layer for connecting Tf-Slim is provided). Exclude some layers from training ------------------------------------ +====================================== You may need to get the list of variables you want to update, TensorLayer provides two ways to get the variables list. @@ -70,7 +62,7 @@ After you get the variable list, you can define your optimizer like that so as t Logging -------- +========== TensorLayer adopts the `Python logging module `__ to log running information. @@ -79,10 +71,10 @@ If you want to configure the logging module, you shall follow its `manual `__. Visualization --------------- +=============== Cannot Save Image -^^^^^^^^^^^^^^^^^^^^^^^ +----------------------- If you run the script via SSH control, sometime you may find the following error. @@ -101,7 +93,7 @@ Alternatively, add the following code into the top of ``visualize.py`` or in you Install Master Version ------------------------ +======================== To use all new features of TensorLayer, you need to install the master version from Github. Before that, you need to make sure you already installed git. @@ -109,10 +101,10 @@ Before that, you need to make sure you already installed git. .. code-block:: bash [stable version] pip install tensorlayer - [master version] pip install git+https://github.com/zsdonghao/tensorlayer.git + [master version] pip install git+https://github.com/tensorlayer/tensorlayer.git Editable Mode ---------------- +=============== - 1. Download the TensorLayer folder from Github. - 2. Before editing the TensorLayer ``.py`` file. @@ -126,7 +118,7 @@ Editable Mode Load Model --------------- +=========== Note that, the ``tl.files.load_npz()`` can only able to load the npz model saved by ``tl.files.save_npz()``. If you have a model want to load into your TensorLayer network, you can first assign your parameters into a list in order, @@ -134,48 +126,7 @@ then use ``tl.files.assign_params()`` to load the parameters into your TensorLay - - -Recruitment -=========== - -TensorLayer Contributors --------------------------- - -TensorLayer contributors are from Imperial College, Tsinghua University, Carnegie Mellon University, Google, Microsoft, Bloomberg and etc. -There are many functions need to be contributed such as -Maxout, Neural Turing Machine, Attention, TensorLayer Mobile and etc. -Please push on `GitHub`_, every bit helps and will be credited. -If you are interested in working with us, please -`contact us `__. - - -Data Science Institute, Imperial College London ------------------------------------------------- - -Data science is therefore by nature at the core of all modern transdisciplinary scientific activities, as it involves the whole life cycle of data, from acquisition and exploration to analysis and communication of the results. Data science is not only concerned with the tools and methods to obtain, manage and analyse data: it is also about extracting value from data and translating it from asset to product. - -Launched on 1st April 2014, the Data Science Institute at Imperial College London aims to enhance Imperial's excellence in data-driven research across its faculties by fulfilling the following objectives. - -The Data Science Institute is housed in purpose built facilities in the heart of the Imperial College campus in South Kensington. Such a central location provides excellent access to collabroators across the College and across London. - - - To act as a focal point for coordinating data science research at Imperial College by facilitating access to funding, engaging with global partners, and stimulating cross-disciplinary collaboration. - - To develop data management and analysis technologies and services for supporting data driven research in the College. - - To promote the training and education of the new generation of data scientist by developing and coordinating new degree courses, and conducting public outreach programmes on data science. - - To advise College on data strategy and policy by providing world-class data science expertise. - - To enable the translation of data science innovation by close collaboration with industry and supporting commercialization. - -If you are interested in working with us, please check our -`vacancies `__ -and other ways to -`get involved `__ -, or feel free to -`contact us `__. - - - - -.. _GitHub: https://github.com/zsdonghao/tensorlayer +.. _GitHub: https://github.com/tensorlayer/tensorlayer .. _Deeplearning Tutorial: http://deeplearning.stanford.edu/tutorial/ .. _Convolutional Neural Networks for Visual Recognition: http://cs231n.github.io/ .. _Neural Networks and Deep Learning: http://neuralnetworksanddeeplearning.com/ diff --git a/docs/user/get_involved.rst b/docs/user/get_involved.rst new file mode 100644 index 000000000..2cf2d8b0c --- /dev/null +++ b/docs/user/get_involved.rst @@ -0,0 +1,28 @@ + + +========================= +Get Involved in Research +========================= + + +Data Science Institute, Imperial College London +================================================== + +Data science is therefore by nature at the core of all modern transdisciplinary scientific activities, as it involves the whole life cycle of data, from acquisition and exploration to analysis and communication of the results. Data science is not only concerned with the tools and methods to obtain, manage and analyse data: it is also about extracting value from data and translating it from asset to product. + +Launched on 1st April 2014, the Data Science Institute at Imperial College London aims to enhance Imperial's excellence in data-driven research across its faculties by fulfilling the following objectives. + +The Data Science Institute is housed in purpose built facilities in the heart of the Imperial College campus in South Kensington. Such a central location provides excellent access to collabroators across the College and across London. + + - To act as a focal point for coordinating data science research at Imperial College by facilitating access to funding, engaging with global partners, and stimulating cross-disciplinary collaboration. + - To develop data management and analysis technologies and services for supporting data driven research in the College. + - To promote the training and education of the new generation of data scientist by developing and coordinating new degree courses, and conducting public outreach programmes on data science. + - To advise College on data strategy and policy by providing world-class data science expertise. + - To enable the translation of data science innovation by close collaboration with industry and supporting commercialization. + +If you are interested in working with us, please check our +`vacancies `__ +and other ways to +`get involved `__ +, or feel free to +`contact us `__. \ No newline at end of file diff --git a/docs/user/installation.rst b/docs/user/installation.rst index f8aed8e6b..757191e36 100644 --- a/docs/user/installation.rst +++ b/docs/user/installation.rst @@ -12,7 +12,7 @@ If you run into any trouble, please check the `TensorFlow installation instructions `_ which cover installing the TensorFlow for a range of operating systems including Mac OX, Linux and Windows, or ask for help on `tensorlayer@gmail.com `_ -or `FQA `_. +or `FAQ `_. @@ -85,7 +85,7 @@ The simplest way to install TensorLayer is as follow, it will also install the n .. code-block:: bash [stable version] pip install tensorlayer - [master version] pip install git+https://github.com/zsdonghao/tensorlayer.git + [master version] pip install git+https://github.com/tensorlayer/tensorlayer.git However, if you want to modify or extend TensorLayer, you can download the repository from `Github`_ and install it as follow. @@ -149,8 +149,8 @@ To install it, copy the ``*.h`` files to ``/usr/local/cuda/include`` and the ``lib*`` files to ``/usr/local/cuda/lib64``. .. _TensorFlow: https://www.tensorflow.org/versions/master/get_started/os_setup.html -.. _GitHub: https://github.com/zsdonghao/tensorlayer -.. _TensorLayer: https://github.com/zsdonghao/tensorlayer/ +.. _GitHub: https://github.com/tensorlayer/tensorlayer +.. _TensorLayer: https://github.com/tensorlayer/tensorlayer/ diff --git a/docs/user/tutorial.rst b/docs/user/tutorial.rst index 2663ca138..9d69e1deb 100644 --- a/docs/user/tutorial.rst +++ b/docs/user/tutorial.rst @@ -1971,7 +1971,7 @@ preprocessing functions (:mod:`tensorlayer.prepro`), command line interface (:mod:`tensorlayer.prepro`), -.. _TensorLayer: https://github.com/zsdonghao/tensorlayer/ +.. _TensorLayer: https://github.com/tensorlayer/tensorlayer/ .. _Deeplearning Tutorial: http://deeplearning.stanford.edu/tutorial/ .. _Convolutional Neural Networks for Visual Recognition: http://cs231n.github.io/ .. _Neural Networks and Deep Learning: http://neuralnetworksanddeeplearning.com/ diff --git a/example/tutorial_generate_text.py b/example/tutorial_generate_text.py index af634d1f7..811be87da 100644 --- a/example/tutorial_generate_text.py +++ b/example/tutorial_generate_text.py @@ -18,7 +18,7 @@ Generate text using LSTM. -Data: https://github.com/zsdonghao/tensorlayer/tree/master/example/data/ +Data: https://github.com/tensorlayer/tensorlayer/tree/master/example/data/ """ diff --git a/example/tutorial_inceptionV3_tfslim.py b/example/tutorial_inceptionV3_tfslim.py index 631ce5571..57e92d68a 100644 --- a/example/tutorial_inceptionV3_tfslim.py +++ b/example/tutorial_inceptionV3_tfslim.py @@ -43,7 +43,7 @@ from tensorlayer.models.imagenet_classes import * except Exception as e: raise Exception( - "{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e) + "{} / download the file from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data".format(e) ) MODEL_PATH = os.path.join("models", 'inception_v3.ckpt') @@ -136,7 +136,7 @@ def print_prob(prob): y = network.outputs probs = tf.nn.softmax(y) -# test data in github: https://github.com/zsdonghao/tensorlayer/tree/master/example/data +# test data in github: https://github.com/tensorlayer/tensorlayer/tree/master/example/data img1 = load_image("data/puzzle.jpeg") img1 = img1.reshape((1, 299, 299, 3)) diff --git a/example/tutorial_mnist.py b/example/tutorial_mnist.py index 7d06cd8d0..87b290f38 100644 --- a/example/tutorial_mnist.py +++ b/example/tutorial_mnist.py @@ -3,11 +3,11 @@ """Examples of Stacked Denoising Autoencoder, Dropout, Dropconnect and CNN. - Multi-layer perceptron (MNIST) - Classification task, see tutorial_mnist_simple.py - https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_mnist_simple.py + https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_simple.py - Multi-layer perceptron (MNIST) - Classification using Iterator, see: - method1 : https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py - method2 : https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_mlp_dropout2.py + method1 : https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py + method2 : https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout2.py """ diff --git a/example/tutorial_mobilenet.py b/example/tutorial_mobilenet.py index e2f6755a0..5082d41e5 100644 --- a/example/tutorial_mobilenet.py +++ b/example/tutorial_mobilenet.py @@ -41,7 +41,7 @@ def decode_predictions(preds, top=5): # keras.applications.resnet50 fpath = os.path.join("data", "imagenet_class_index.json") if tl.files.file_exists(fpath) is False: raise Exception( - "{} / download imagenet_class_index.json from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data" + "{} / download imagenet_class_index.json from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data" ) if isinstance(preds, np.ndarray) is False: preds = np.asarray(preds) diff --git a/example/tutorial_ptb_lstm.py b/example/tutorial_ptb_lstm.py index 11949376b..45e51668b 100644 --- a/example/tutorial_ptb_lstm.py +++ b/example/tutorial_ptb_lstm.py @@ -375,7 +375,7 @@ def loss_fn(outputs, targets): #, batch_size, num_steps): print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time)) print( - "More example: Text generation using Trump's speech data: https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" + "More example: Text generation using Trump's speech data: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" ) diff --git a/example/tutorial_ptb_lstm_state_is_tuple.py b/example/tutorial_ptb_lstm_state_is_tuple.py index 370c66f85..ae253dea6 100644 --- a/example/tutorial_ptb_lstm_state_is_tuple.py +++ b/example/tutorial_ptb_lstm_state_is_tuple.py @@ -404,7 +404,7 @@ def loss_fn(outputs, targets, batch_size): print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time)) print( - "More example: Text generation using Trump's speech data: https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" + "More example: Text generation using Trump's speech data: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" ) diff --git a/example/tutorial_squeezenet.py b/example/tutorial_squeezenet.py index 0a1d20372..bd98d3e4b 100644 --- a/example/tutorial_squeezenet.py +++ b/example/tutorial_squeezenet.py @@ -19,7 +19,7 @@ def decode_predictions(preds, top=5): # keras.applications.resnet50 fpath = os.path.join("data", "imagenet_class_index.json") if tl.files.file_exists(fpath) is False: raise Exception( - "{} / download imagenet_class_index.json from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data" + "{} / download imagenet_class_index.json from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data" ) if isinstance(preds, np.ndarray) is False: preds = np.asarray(preds) diff --git a/example/tutorial_vgg16.py b/example/tutorial_vgg16.py index 738eee500..236373d53 100644 --- a/example/tutorial_vgg16.py +++ b/example/tutorial_vgg16.py @@ -51,7 +51,7 @@ from tensorlayer.models.imagenet_classes import * except Exception as e: raise Exception( - "{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e) + "{} / download the file from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data".format(e) ) diff --git a/example/tutorial_vgg19.py b/example/tutorial_vgg19.py index aba010768..73e1c94ae 100755 --- a/example/tutorial_vgg19.py +++ b/example/tutorial_vgg19.py @@ -29,7 +29,7 @@ from tensorlayer.models.imagenet_classes import * except Exception as e: raise Exception( - "{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e) + "{} / download the file from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data".format(e) ) VGG_MEAN = [103.939, 116.779, 123.68] diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 4f92070b7..5b3d049e9 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -8,3 +8,4 @@ scikit-learn>=0.19,<0.20 scikit-image>=0.14,<0.15 scipy>=1.1,<1.2 tqdm>=4.23,<4.24 +wrapt>=1.10,<1.11 diff --git a/setup.py b/setup.py index 507d05de0..7b5d65e93 100755 --- a/setup.py +++ b/setup.py @@ -160,5 +160,4 @@ def req_file(filename, folder="requirements"): scripts=[ 'tl', ], - platform=['any'], ) diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index c38c044b4..942b25513 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -57,7 +57,7 @@ global_dict = {} # Use the following formating: (major, minor, patch, prerelease) -VERSION = (1, 8, 6, "rc5") +VERSION = (1, 8, 6, "rc6") __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:]) diff --git a/tensorlayer/activation.py b/tensorlayer/activation.py index 5f33534f5..1b13d2760 100644 --- a/tensorlayer/activation.py +++ b/tensorlayer/activation.py @@ -3,7 +3,8 @@ """A file containing various activation functions.""" import tensorflow as tf -from tensorflow.python.util.deprecation import deprecated + +from tensorlayer.decorators import deprecated __all__ = [ 'leaky_relu', @@ -44,7 +45,7 @@ def ramp(x, v_min=0, v_max=1, name=None): return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) -@deprecated("2018-09-30", "This API is deprecated. Please use as `tf.nn.leaky_relu`.") +@deprecated(date="2018-09-30", instructions="This API is deprecated. Please use as `tf.nn.leaky_relu`") def leaky_relu(x, alpha=0.2, name="leaky_relu"): """leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`. @@ -301,7 +302,7 @@ def hard_tanh(x, name='htanh'): return tf.clip_by_value(x, -1, 1, name=name) -@deprecated("2018-06-30", "This API will be deprecated soon as tf.nn.softmax can do the same thing.") +@deprecated(date="2018-06-30", instructions="This API will be deprecated soon as tf.nn.softmax can do the same thing") def pixel_wise_softmax(x, name='pixel_wise_softmax'): """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. diff --git a/tensorlayer/cost.py b/tensorlayer/cost.py index 8b3ece8a6..dabcc4d4b 100644 --- a/tensorlayer/cost.py +++ b/tensorlayer/cost.py @@ -370,7 +370,7 @@ def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): def cross_entropy_seq(logits, target_seqs, batch_size=None): #, batch_size=1, num_steps=None): """Returns the expression of cross-entropy of two sequences, implement - softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__. + softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__. Parameters ---------- @@ -385,7 +385,7 @@ def cross_entropy_seq(logits, target_seqs, batch_size=None): #, batch_size=1, n Examples -------- - >>> see `PTB example `__.for more details + >>> see `PTB example `__.for more details >>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps]) >>> targets = tf.placeholder(tf.int32, [batch_size, n_steps]) >>> # build the network diff --git a/tensorlayer/decorators/__init__.py b/tensorlayer/decorators/__init__.py index b0880344f..9d4eeaa17 100644 --- a/tensorlayer/decorators/__init__.py +++ b/tensorlayer/decorators/__init__.py @@ -9,8 +9,9 @@ More functions can be found in `TensorFlow API `__. """ -from .deprecation import deprecated_alias +from .deprecated import deprecated +from .deprecated_alias import deprecated_alias from .method_decorator import private_method from .method_decorator import protected_method -__all__ = ['deprecated_alias', 'private_method', 'protected_method'] +__all__ = ['deprecated', 'deprecated_alias', 'private_method', 'protected_method'] diff --git a/tensorlayer/decorators/deprecated.py b/tensorlayer/decorators/deprecated.py new file mode 100644 index 000000000..36e6b8fda --- /dev/null +++ b/tensorlayer/decorators/deprecated.py @@ -0,0 +1,103 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import sys +import functools + +from tensorflow.python.util import decorator_utils + +from tensorflow.python.util.deprecation import _call_location +from tensorflow.python.util.deprecation import _validate_deprecation_args + +from tensorlayer import tl_logging as logging + +import wrapt + +__all__ = ['deprecated'] + +# Allow deprecation warnings to be silenced temporarily with a context manager. +_PRINT_DEPRECATION_WARNINGS = True + +# Remember which deprecation warnings have been printed already. +_PRINTED_WARNING = {} + + +def add_notice_to_docstring(doc, no_doc_str, notice): + """Adds a deprecation notice to a docstring.""" + if not doc: + lines = [no_doc_str] + + else: + lines = decorator_utils._normalize_docstring(doc).splitlines() + + notice = [''] + notice + + if len(lines) > 1: + # Make sure that we keep our distance from the main body + if lines[1].strip(): + notice.append('') + + lines[1:1] = notice + else: + lines += notice + + return '\n'.join(lines) + + +def _add_deprecated_function_notice_to_docstring(doc, date, instructions): + """Adds a deprecation notice to a docstring for deprecated functions.""" + + if instructions: + deprecation_message = """ + .. warning:: + **THIS FUNCTION IS DEPRECATED:** It will be removed after %s. + *Instructions for updating:* %s. + """ % (('in a future version' if date is None else ('after %s' % date)), instructions) + + else: + deprecation_message = """ + .. warning:: + **THIS FUNCTION IS DEPRECATED:** It will be removed after %s. + """ % (('in a future version' if date is None else ('after %s' % date))) + + main_text = [deprecation_message] + + return add_notice_to_docstring(doc, 'DEPRECATED FUNCTION', main_text) + + +def deprecated(wrapped=None, date='', instructions='', warn_once=True): + + if wrapped is None: + return functools.partial(deprecated, date=date, instructions=instructions, warn_once=warn_once) + + @wrapt.decorator + def deprecated_wrapper(wrapped, instance, args, kwargs): + + _validate_deprecation_args(date, instructions) + + if _PRINT_DEPRECATION_WARNINGS: + + class_or_func_name = decorator_utils.get_qualified_name(wrapped) + + if class_or_func_name not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[class_or_func_name] = True + + logging.warning( + 'From %s: %s (from %s) is deprecated and will be removed %s.\n' + 'Instructions for updating: %s\n' % ( + _call_location(), class_or_func_name, wrapped.__module__, 'in a future version' + if date is None else ('after %s' % date), instructions + ) + ) + + return wrapped(*args, **kwargs) + + decorated = deprecated_wrapper(wrapped) + + if (sys.version_info > (3, 0)): # docstring can only be edited with Python 3 + wrapt.FunctionWrapper.__setattr__( + decorated, "__doc__", _add_deprecated_function_notice_to_docstring(wrapped.__doc__, date, instructions) + ) + + return decorated diff --git a/tensorlayer/decorators/deprecation.py b/tensorlayer/decorators/deprecated_alias.py similarity index 100% rename from tensorlayer/decorators/deprecation.py rename to tensorlayer/decorators/deprecated_alias.py diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py index f7172d0ee..c45498810 100644 --- a/tensorlayer/layers/__init__.py +++ b/tensorlayer/layers/__init__.py @@ -9,28 +9,29 @@ More functions can be found in `TensorFlow API `__. """ -from .binary import * +from .activation import * from .convolution import * from .core import * from .dense import * from .dropout import * from .extend import * from .flow_control import * -from .image_resize import * +from .image_resampling import * from .importer import * from .inputs import * +from .lambda_layers import * from .merge import * from .noise import * from .normalization import * from .object_detection import * from .padding import * from .pooling import * +from .quantize import * from .reconstruction import * from .recurrent import * +from .scale import * from .shape import * from .spatial_transformer import * -from .special_activation import * from .stack import * -from .super_resolution import * from .time_distribution import * from .utils import * diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/activation.py similarity index 100% rename from tensorlayer/layers/special_activation.py rename to tensorlayer/layers/activation.py diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index fede0d27a..1463af7be 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -20,6 +20,7 @@ from .separable_conv import * from .simplified_conv import * from .simplified_deconv import * +from .super_resolution import * from .ternary_conv import * __all__ = [ @@ -65,6 +66,10 @@ 'SeparableConv1d', 'SeparableConv2d', + # subpixel + 'SubpixelConv1d', + 'SubpixelConv2d', + # ternary 'TernaryConv2d', ] diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py similarity index 99% rename from tensorlayer/layers/super_resolution.py rename to tensorlayer/layers/convolution/super_resolution.py index 3c69ccffd..2903b4e78 100644 --- a/tensorlayer/layers/super_resolution.py +++ b/tensorlayer/layers/convolution/super_resolution.py @@ -18,7 +18,7 @@ class SubpixelConv2d(Layer): """It is a 2D sub-pixel up-sampling layer, usually be used - for Super-Resolution applications, see `SRGAN `__ for example. + for Super-Resolution applications, see `SRGAN `__ for example. Parameters ------------ diff --git a/tensorlayer/layers/image_resize.py b/tensorlayer/layers/image_resampling.py similarity index 100% rename from tensorlayer/layers/image_resize.py rename to tensorlayer/layers/image_resampling.py diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py index 868c7c716..c70b83379 100644 --- a/tensorlayer/layers/importer.py +++ b/tensorlayer/layers/importer.py @@ -2,93 +2,27 @@ # -*- coding: utf-8 -*- import tensorflow as tf -from tensorflow.python.util.deprecation import deprecated from tensorlayer.layers.core import Layer from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES from tensorlayer import tl_logging as logging +from tensorlayer.decorators import deprecated from tensorlayer.decorators import deprecated_alias __all__ = [ - 'LambdaLayer', 'SlimNetsLayer', 'KerasLayer', 'EstimatorLayer', ] -class LambdaLayer(Layer): - """A layer that takes a user-defined function using TensorFlow Lambda, for multiple inputs see :class:`ElementwiseLambdaLayer`. - - Parameters - ---------- - prev_layer : :class:`Layer` - Previous layer. - fn : function - The function that applies to the outputs of previous layer. - fn_args : dictionary or None - The arguments for the function (option). - name : str - A unique layer name. - - Examples - --------- - Non-parametric case - - >>> import tensorflow as tf - >>> import tensorlayer as tl - >>> x = tf.placeholder(tf.float32, shape=[None, 1], name='x') - >>> net = tl.layers.InputLayer(x, name='input') - >>> net = tl.layers.LambdaLayer(net, lambda x: 2*x, name='lambda') - - Parametric case, merge other wrappers into TensorLayer - - >>> from keras.layers import * - >>> from tensorlayer.layers import * - >>> def keras_block(x): - >>> x = Dropout(0.8)(x) - >>> x = Dense(800, activation='relu')(x) - >>> x = Dropout(0.5)(x) - >>> x = Dense(800, activation='relu')(x) - >>> x = Dropout(0.5)(x) - >>> logits = Dense(10, activation='linear')(x) - >>> return logits - >>> net = InputLayer(x, name='input') - >>> net = LambdaLayer(net, fn=keras_block, name='keras') - - """ - - @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - prev_layer, - fn, - fn_args=None, - name='lambda_layer', - ): - - super(LambdaLayer, self).__init__(prev_layer=prev_layer, fn_args=fn_args, name=name) - - logging.info("LambdaLayer %s" % self.name) - - if fn is None: - raise AssertionError("The `fn` argument cannot be None") - - with tf.variable_scope(name) as vs: - self.outputs = fn(self.inputs, **self.fn_args) - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - self._add_layers(self.outputs) - self._add_params(variables) - - class SlimNetsLayer(Layer): """A layer that merges TF-Slim models into TensorLayer. Models can be found in `slim-model `__, - see Inception V3 example on `Github `__. + see Inception V3 example on `Github `__. Parameters ---------- @@ -147,11 +81,13 @@ def __init__( self._add_params(slim_variables) -@deprecated("2018-06-30", "This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing.") +@deprecated( + date="2018-06-30", instructions="This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing" +) class KerasLayer(Layer): """A layer to import Keras layers into TensorLayer. - Example can be found here `tutorial_keras.py `__. + Example can be found here `tutorial_keras.py `__. Parameters ---------- @@ -189,11 +125,13 @@ def __init__( self._add_params(variables) -@deprecated("2018-06-30", "This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing.") +@deprecated( + date="2018-06-30", instructions="This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing" +) class EstimatorLayer(Layer): """A layer that accepts a user-defined model. - It is similar with :class:`KerasLayer`, see `tutorial_keras.py `__. + It is similar with :class:`KerasLayer`, see `tutorial_keras.py `__. Parameters ---------- diff --git a/tensorlayer/layers/lambda_layers.py b/tensorlayer/layers/lambda_layers.py new file mode 100644 index 000000000..c37c5d036 --- /dev/null +++ b/tensorlayer/layers/lambda_layers.py @@ -0,0 +1,138 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf + +from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES + +from tensorlayer import tl_logging as logging + +from tensorlayer.decorators import deprecated_alias + +__all__ = [ + 'LambdaLayer', + 'ElementwiseLambdaLayer', +] + + +class LambdaLayer(Layer): + """A layer that takes a user-defined function using TensorFlow Lambda, for multiple inputs see :class:`ElementwiseLambdaLayer`. + + Parameters + ---------- + prev_layer : :class:`Layer` + Previous layer. + fn : function + The function that applies to the outputs of previous layer. + fn_args : dictionary or None + The arguments for the function (option). + name : str + A unique layer name. + + Examples + --------- + Non-parametric case + + >>> import tensorflow as tf + >>> import tensorlayer as tl + >>> x = tf.placeholder(tf.float32, shape=[None, 1], name='x') + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.LambdaLayer(net, lambda x: 2*x, name='lambda') + + Parametric case, merge other wrappers into TensorLayer + + >>> from keras.layers import * + >>> from tensorlayer.layers import * + >>> def keras_block(x): + >>> x = Dropout(0.8)(x) + >>> x = Dense(800, activation='relu')(x) + >>> x = Dropout(0.5)(x) + >>> x = Dense(800, activation='relu')(x) + >>> x = Dropout(0.5)(x) + >>> logits = Dense(10, activation='linear')(x) + >>> return logits + >>> net = InputLayer(x, name='input') + >>> net = LambdaLayer(net, fn=keras_block, name='keras') + + """ + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__( + self, + prev_layer, + fn, + fn_args=None, + name='lambda_layer', + ): + + super(LambdaLayer, self).__init__(prev_layer=prev_layer, fn_args=fn_args, name=name) + + logging.info("LambdaLayer %s" % self.name) + + if fn is None: + raise AssertionError("The `fn` argument cannot be None") + + with tf.variable_scope(name) as vs: + self.outputs = fn(self.inputs, **self.fn_args) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + self._add_layers(self.outputs) + self._add_params(variables) + + +class ElementwiseLambdaLayer(Layer): + """A layer that use a custom function to combine multiple :class:`Layer` inputs. + + Parameters + ---------- + layers : list of :class:`Layer` + The list of layers to combine. + fn : function + The function that applies to the outputs of previous layer. + fn_args : dictionary or None + The arguments for the function (option). + act : activation function + The activation function of this layer. + name : str + A unique layer name. + + Examples + -------- + z = mean + noise * tf.exp(std * 0.5) + + >>> import tensorflow as tf + >>> import tensorlayer as tl + + >>> def func(noise, mean, std): + >>> return mean + noise * tf.exp(std * 0.5) + + >>> x = tf.placeholder(tf.float32, [None, 200]) + >>> noise_tensor = tf.random_normal(tf.stack([tf.shape(x)[0], 200])) + >>> noise = tl.layers.InputLayer(noise_tensor) + >>> net = tl.layers.InputLayer(x) + >>> net = tl.layers.DenseLayer(net, n_units=200, act=tf.nn.relu, name='dense1') + >>> mean = tl.layers.DenseLayer(net, n_units=200, name='mean') + >>> std = tl.layers.DenseLayer(net, n_units=200, name='std') + >>> z = tl.layers.ElementwiseLambdaLayer([noise, mean, std], fn=func, name='z') + """ + + def __init__( + self, + layers, + fn, + fn_args=None, + act=None, + name='elementwiselambda_layer', + ): + + super(ElementwiseLambdaLayer, self).__init__(prev_layer=layers, act=act, fn_args=fn_args, name=name) + logging.info("ElementwiseLambdaLayer %s" % self.name) + + with tf.variable_scope(name) as vs: + self.outputs = self._apply_activation(fn(*self.inputs, **self.fn_args)) + + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + self._add_layers(self.outputs) + self._add_params(variables) diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index ecdc653e7..5b3cc33f7 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -4,14 +4,12 @@ import tensorflow as tf from tensorlayer.layers.core import Layer -from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES from tensorlayer import tl_logging as logging __all__ = [ 'ConcatLayer', 'ElementwiseLayer', - 'ElementwiseLambdaLayer', ] @@ -133,60 +131,3 @@ def __init__( # self.all_drop.update(dict(layers[i].all_drop)) self._add_layers(self.outputs) - - -class ElementwiseLambdaLayer(Layer): - """A layer that use a custom function to combine multiple :class:`Layer` inputs. - - Parameters - ---------- - layers : list of :class:`Layer` - The list of layers to combine. - fn : function - The function that applies to the outputs of previous layer. - fn_args : dictionary or None - The arguments for the function (option). - act : activation function - The activation function of this layer. - name : str - A unique layer name. - - Examples - -------- - z = mean + noise * tf.exp(std * 0.5) - - >>> import tensorflow as tf - >>> import tensorlayer as tl - - >>> def func(noise, mean, std): - >>> return mean + noise * tf.exp(std * 0.5) - - >>> x = tf.placeholder(tf.float32, [None, 200]) - >>> noise_tensor = tf.random_normal(tf.stack([tf.shape(x)[0], 200])) - >>> noise = tl.layers.InputLayer(noise_tensor) - >>> net = tl.layers.InputLayer(x) - >>> net = tl.layers.DenseLayer(net, n_units=200, act=tf.nn.relu, name='dense1') - >>> mean = tl.layers.DenseLayer(net, n_units=200, name='mean') - >>> std = tl.layers.DenseLayer(net, n_units=200, name='std') - >>> z = tl.layers.ElementwiseLambdaLayer([noise, mean, std], fn=func, name='z') - """ - - def __init__( - self, - layers, - fn, - fn_args=None, - act=None, - name='elementwiselambda_layer', - ): - - super(ElementwiseLambdaLayer, self).__init__(prev_layer=layers, act=act, fn_args=fn_args, name=name) - logging.info("ElementwiseLambdaLayer %s" % self.name) - - with tf.variable_scope(name) as vs: - self.outputs = self._apply_activation(fn(*self.inputs, **self.fn_args)) - - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - self._add_layers(self.outputs) - self._add_params(variables) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index da54114fd..dd5b289a4 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -111,6 +111,7 @@ def __init__( is_train=False, beta_init=tf.zeros_initializer, gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), + moving_mean_init=tf.zeros_initializer(), name='batchnorm_layer', ): super(BatchNormLayer, self).__init__(prev_layer=prev_layer, act=act, name=name) @@ -155,7 +156,6 @@ def __init__( gamma = None # 2. - moving_mean_init = tf.zeros_initializer() moving_mean = tf.get_variable( 'moving_mean', params_shape, initializer=moving_mean_init, dtype=LayersConfig.tf_dtype, trainable=False diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py index 6b1f30f21..78575c263 100644 --- a/tensorlayer/layers/object_detection.py +++ b/tensorlayer/layers/object_detection.py @@ -40,7 +40,7 @@ class ROIPoolingLayer(Layer): Notes ----------- - This implementation is imported from `Deepsense-AI `__ . - - Please install it by the instruction `HERE `__. + - Please install it by the instruction `HERE `__. """ diff --git a/tensorlayer/layers/quantize.py b/tensorlayer/layers/quantize.py new file mode 100644 index 000000000..bbbb3c9ff --- /dev/null +++ b/tensorlayer/layers/quantize.py @@ -0,0 +1,45 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf + +from tensorlayer.layers.core import Layer + +from tensorlayer.layers.utils import quantize + +from tensorlayer import tl_logging as logging + +from tensorlayer.decorators import deprecated_alias + +__all__ = [ + 'SignLayer', +] + + +class SignLayer(Layer): + """The :class:`SignLayer` class is for quantizing the layer outputs to -1 or 1 while inferencing. + + Parameters + ---------- + prev_layer : :class:`Layer` + Previous layer. + name : a str + A unique layer name. + + """ + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__( + self, + prev_layer, + name='sign', + ): + super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) + + logging.info("SignLayer %s" % self.name) + + with tf.variable_scope(name): + # self.outputs = tl.act.sign(self.inputs) + self.outputs = quantize(self.inputs) + + self._add_layers(self.outputs) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 339fb6d18..8aa1f8c27 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -1473,7 +1473,7 @@ class Seq2Seq(Layer): See `Model `__ and `Sequence to Sequence Learning with Neural Networks `__. - - Please check this example `Chatbot in 200 lines of code `__. + - Please check this example `Chatbot in 200 lines of code `__. - The Author recommends users to read the source code of :class:`DynamicRNNLayer` and :class:`Seq2Seq`. Parameters diff --git a/tensorlayer/layers/binary.py b/tensorlayer/layers/scale.py similarity index 61% rename from tensorlayer/layers/binary.py rename to tensorlayer/layers/scale.py index e9d590e01..6417b2037 100644 --- a/tensorlayer/layers/binary.py +++ b/tensorlayer/layers/scale.py @@ -5,15 +5,12 @@ from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import quantize - from tensorlayer import tl_logging as logging from tensorlayer.decorators import deprecated_alias __all__ = [ 'ScaleLayer', - 'SignLayer', ] @@ -49,32 +46,3 @@ def __init__( self._add_layers(self.outputs) self._add_params(scale) - - -class SignLayer(Layer): - """The :class:`SignLayer` class is for quantizing the layer outputs to -1 or 1 while inferencing. - - Parameters - ---------- - prev_layer : :class:`Layer` - Previous layer. - name : a str - A unique layer name. - - """ - - @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - prev_layer, - name='sign', - ): - super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) - - logging.info("SignLayer %s" % (self.name)) - - with tf.variable_scope(name): - # self.outputs = tl.act.sign(self.inputs) - self.outputs = quantize(self.inputs) - - self._add_layers(self.outputs) diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index 4f8c0ec9b..00706f435 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -3,11 +3,11 @@ import tensorflow as tf -from tensorflow.python.util.deprecation import deprecated from tensorflow.python.ops.rnn_cell import LSTMStateTuple from tensorlayer import tl_logging as logging +from tensorlayer.decorators import deprecated from tensorlayer.decorators import deprecated_alias __all__ = [ @@ -37,7 +37,7 @@ def cabs(x): return tf.minimum(1.0, tf.abs(x), name='cabs') -@deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check naming.") +@deprecated(date="2018-06-30", instructions="TensorLayer relies on TensorFlow to check naming") def clear_layers_name(): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') @@ -195,7 +195,7 @@ def get_variables_with_name(name=None, train_only=True, verbose=False): return d_vars -@deprecated("2018-09-30", "This API is deprecated in favor of `tf.global_variables_initializer`.") +@deprecated(date="2018-09-30", instructions="This API is deprecated in favor of `tf.global_variables_initializer`") def initialize_global_variables(sess): """Initialize the global variables of TensorFlow. @@ -365,7 +365,7 @@ def quantize_weight(x, bitW, force_quantization=False): return 2 * _quantize_dorefa(x, bitW) - 1 -@deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check name reusing.") +@deprecated(date="2018-06-30", instructions="TensorLayer relies on TensorFlow to check name reusing") def set_name_reuse(enable=True): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 73be9119d..547170860 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -115,6 +115,8 @@ def save_images(images, size, image_path='_temp.png'): Examples --------- + >>> import numpy as np + >>> import tensorlayer as tl >>> images = np.random.rand(64, 100, 100, 3) >>> tl.visualize.save_images(images, [8, 8], 'temp.png') @@ -132,6 +134,11 @@ def merge(images, size): return img def imsave(images, size, path): + if np.max(images) <= 1 and (-1 <= np.min(images) < 0): + images = ((images + 1) * 127.5).astype(np.uint8) + elif np.max(images) <= 1 and np.min(images) >= 0: + images = (images * 255).astype(np.uint8) + return imageio.imwrite(path, merge(images, size)) if len(images) > size[0] * size[1]: diff --git a/tests/test_layers_special_activation.py b/tests/test_layers_activation.py similarity index 100% rename from tests/test_layers_special_activation.py rename to tests/test_layers_activation.py