From 3a44af12618e3dc7fc0a649e25ca8ae41bfddb25 Mon Sep 17 00:00:00 2001 From: zsdonghao Date: Sat, 29 Oct 2016 17:39:37 +0100 Subject: [PATCH] [TEST] TensorFlow 10, 11 Python 2, 3 --- README.md | 2 +- data/__init__.py | 6 ++++++ tensorlayer/layers.py | 15 +++++++++++---- tensorlayer/visualize.py | 6 +++--- tutorial_generate_text.py | 11 ++++++++--- tutorial_inceptionV3_tfslim.py | 2 +- tutorial_mnist.py | 4 ++-- tutorial_tfrecord.py | 5 +++-- tutorial_tfrecord2.py | 2 +- tutorial_tfrecord3.py | 12 +++++++++--- 10 files changed, 45 insertions(+), 20 deletions(-) create mode 100644 data/__init__.py diff --git a/README.md b/README.md index 2e4d61345..1072deae0 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ If you already had the pre-requisites ready, the simplest way to install TensorL ```bash -[for stable version] pip install tensorlayer==1.2.2 +[for stable version] pip install tensorlayer==1.2.3 [for master version] pip install git+https://github.com/zsdonghao/tensorlayer.git ``` diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 000000000..89a3bda8e --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,6 @@ + +from __future__ import absolute_import + + +from . import imagenet_classes +# from . import diff --git a/tensorlayer/layers.py b/tensorlayer/layers.py index 40abe2b8e..baaeea023 100755 --- a/tensorlayer/layers.py +++ b/tensorlayer/layers.py @@ -2671,15 +2671,22 @@ def __init__( layer = None, slim_layer = None, slim_args = {}, - name ='slim_layer', + name ='InceptionV3', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" tensorlayer:Instantiate SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) - with tf.variable_scope(name) as vs: - net, end_points = slim_layer(self.inputs, **slim_args) - slim_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=vs.name) + # with tf.variable_scope(name) as vs: + # net, end_points = slim_layer(self.inputs, **slim_args) + # slim_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=vs.name) + + net, end_points = slim_layer(self.inputs, **slim_args) + + slim_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=name) + if slim_variables == []: + print("No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details" % name) + self.outputs = net diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 7ad153d3a..908cf9fec 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -58,7 +58,7 @@ def W(W=None, second=10, saveable=True, shape=[28,28], name='mnist', fig_idx=239 # feature = np.zeros_like(feature) plt.imshow(np.reshape(feature ,(shape[0],shape[1])), cmap='gray', interpolation="nearest")#, vmin=np.min(feature), vmax=np.max(feature)) - plt.title(name) + # plt.title(name) # ------------------------------------------------------------ # plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest") plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick @@ -223,11 +223,11 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, plt.imshow( np.reshape(images[count-1,:,:], (n_row, n_col)), cmap='gray', interpolation="nearest") - plt.title(name) + # plt.title(name) elif n_color == 3: plt.imshow(images[count-1,:,:], cmap='gray', interpolation="nearest") - plt.title(name) + # plt.title(name) else: raise Exception("Unknown n_color") plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick diff --git a/tutorial_generate_text.py b/tutorial_generate_text.py index 3a4f9c465..8626594ea 100755 --- a/tutorial_generate_text.py +++ b/tutorial_generate_text.py @@ -1,4 +1,9 @@ - # Copyright 2016 TensorLayer. All Rights Reserved. +#! /usr/bin/python +# -*- coding: utf8 -*- + + + +# Copyright 2016 TensorLayer. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -264,7 +269,7 @@ def inference(x, is_training, num_steps, reuse=None): network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop1') network = tl.layers.RNNLayer(network, cell_fn=tf.nn.rnn_cell.BasicLSTMCell, - cell_init_args={'forget_bias': 0.0},# 'state_is_tuple': True}, + cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True}, n_hidden=hidden_size, initializer=tf.random_uniform_initializer(-init_scale, init_scale), n_steps=num_steps, @@ -275,7 +280,7 @@ def inference(x, is_training, num_steps, reuse=None): network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop2') network = tl.layers.RNNLayer(network, cell_fn=tf.nn.rnn_cell.BasicLSTMCell, - cell_init_args={'forget_bias': 0.0}, # 'state_is_tuple': True}, + cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True}, n_hidden=hidden_size, initializer=tf.random_uniform_initializer(-init_scale, init_scale), n_steps=num_steps, diff --git a/tutorial_inceptionV3_tfslim.py b/tutorial_inceptionV3_tfslim.py index 764654183..89b02ae00 100644 --- a/tutorial_inceptionV3_tfslim.py +++ b/tutorial_inceptionV3_tfslim.py @@ -100,7 +100,7 @@ def print_prob(prob): # 'reuse' : None, # 'scope' : 'InceptionV3' }, - name='' + name='InceptionV3' # <-- the name should be the same with the ckpt model ) saver = tf.train.Saver() diff --git a/tutorial_mnist.py b/tutorial_mnist.py index 976e9a7d0..146ec1d58 100644 --- a/tutorial_mnist.py +++ b/tutorial_mnist.py @@ -103,10 +103,10 @@ def main_test_layers(model='relu'): params = network.all_params # train - n_epoch = 1 + n_epoch = 100 batch_size = 128 learning_rate = 0.0001 - print_freq = 10 + print_freq = 5 train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost) diff --git a/tutorial_tfrecord.py b/tutorial_tfrecord.py index 959f4c7ce..1ac02750c 100644 --- a/tutorial_tfrecord.py +++ b/tutorial_tfrecord.py @@ -4,9 +4,10 @@ import tensorflow as tf import tensorlayer as tl -import os +import numpy as np from PIL import Image import io +import os """ @@ -67,7 +68,7 @@ label = example.features.feature['label'].int64_list.value ## converts a image from bytes image = Image.frombytes('RGB', (224, 224), img_raw[0]) - tl.visualize.frame(image, second=0.5, saveable=False, name='frame', fig_idx=1283) + tl.visualize.frame(np.asarray(image), second=0.5, saveable=False, name='frame', fig_idx=1283) print(label) diff --git a/tutorial_tfrecord2.py b/tutorial_tfrecord2.py index e85baa4c6..e8f43e6d4 100755 --- a/tutorial_tfrecord2.py +++ b/tutorial_tfrecord2.py @@ -45,7 +45,7 @@ ## Visualize a image # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) label = int(y_train[index]) - print(label) + # print(label) ## Convert the bytes back to image as follow: # image = Image.frombytes('RGB', (32, 32), img_raw) # image = np.fromstring(img_raw, np.float32) diff --git a/tutorial_tfrecord3.py b/tutorial_tfrecord3.py index 45b16e38c..1734f9736 100644 --- a/tutorial_tfrecord3.py +++ b/tutorial_tfrecord3.py @@ -117,7 +117,7 @@ def _bytes_feature_list(values): c = tf.contrib.learn.run_n(features, n=1, feed_dict=None) from PIL import Image im = Image.frombytes('RGB', (299, 299), c[0]['image/img_raw']) -tl.visualize.frame(im, second=1, saveable=False, name='frame', fig_idx=1236) +tl.visualize.frame(np.asarray(im), second=1, saveable=False, name='frame', fig_idx=1236) c = tf.contrib.learn.run_n(sequence_features, n=1, feed_dict=None) print(c[0]) @@ -334,10 +334,16 @@ def prefetch_input_data(reader, img = tf.decode_raw(context["image/img_raw"], tf.uint8) img = tf.reshape(img, [height, width, 3]) img = tf.image.convert_image_dtype(img, dtype=tf.float32) +# for TensorFlow 0.10 +# img = tf.image.resize_images(img, +# new_height=resize_height, +# new_width=resize_width, +# method=tf.image.ResizeMethod.BILINEAR) +# for TensorFlow 0.11 img = tf.image.resize_images(img, - new_height=resize_height, - new_width=resize_width, + size=(resize_height, resize_width), method=tf.image.ResizeMethod.BILINEAR) + # Crop to final dimensions. if is_training: img = tf.random_crop(img, [height, width, 3])