diff --git a/.travis.yml b/.travis.yml
index b76d596..d390d20 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,10 +7,12 @@ before_install:
- sudo apt-get install -y software-properties-common
- sudo add-apt-repository -y "deb http://us.archive.ubuntu.com/ubuntu/ trusty universe multiverse restricted"
- sudo apt-get update -qq
- - sudo apt-get install -y opencl-headers fglrx
+ - sudo apt-get install -y opencl-headers fglrx ocl-icd-opencl-dev
env:
- TOX_ENV=py3
- TOX_ENV=py27
+ - TOX_ENV=py27-tf
+ - TOX_ENV=py3-tf
- TOX_ENV=docs
install:
- pip install --upgrade pip
diff --git a/docs/backends.rst b/docs/backends.rst
index 6f5208b..42992ef 100644
--- a/docs/backends.rst
+++ b/docs/backends.rst
@@ -1,10 +1,12 @@
Multiple Backend Support
========================
-The ``dtcwt`` library currently provides two backends for computing the wavelet
-transform: a `NumPy `_ based implementation and an OpenCL
+
+The ``dtcwt`` library currently provides three backends for computing the wavelet
+transform: a `NumPy `_ based implementation, an OpenCL
implementation which uses the `PyOpenCL `_
-bindings for Python.
+bindings for Python, and a Tensorflow implementation which uses the
+`Tensorflow `_ bindings for Python.
NumPy
'''''
@@ -26,27 +28,59 @@ may not be full-featured.
OpenCL support depends on the `PyOpenCL
`_ package being installed and an
OpenCL implementation being installed on your machine. Attempting to use an
-OpenCL backen without both of these being present will result in a runtime (but
+OpenCL backend without both of these being present will result in a runtime (but
not import-time) exception.
+Tensorflow
+''''''''''
+
+If you want to take advantage of having a GPU on your machine,
+some transforms and algorithms have been implemented with a Tensorflow backend.
+This backend will provide an identical API to the NumPy backend.
+I.e. NumPy-based input may be passed to a tensorflow backend in the same manner
+as it was passed to the NumPy backend. In which case it
+will be converted to a tensorflow variable, the transform performed, and then
+converted back to a NumPy variable afterwards. This conversion between types can
+be avoided if a tensorflow variable is passed to the dtcwt Transforms.
+
+The real speedup gained from using GPUs is obtained by parallel processing. For
+this reason, when using the tensorflow backend, the Transforms can accept
+batches of images. To do this, see the `forward_channels` and `inverse_channels`
+methods. More information is in the :ref:`tensorflowbackend` section.
+
+Tensorflow support depends on the
+`Tensorflow `_ python package being installed in the
+current python environment, as well as the necessary CUDA + CUDNN libraries
+installed). Attempting to use a Tensorflow backend without the python package
+available will result in a runtime (but not import-time) exception. Attempting
+to use the Tensorflow backend without the CUDA and CUDNN libraries properly
+installed and linked will result in the Tensorflow backend being used, but
+operations will be run on the CPU rather than the GPU.
+
+If you do not have a GPU, some speedup can still be seen for using Tensorflow with
+the CPU vs the plain NumPy backend, as tensorflow will naturally use multiple
+processors.
+
Which backend should I use?
'''''''''''''''''''''''''''
-The top-level transform routines, such as :py:class`dtcwt.Transform2d`, will
+The top-level transform routines, such as :py:class:`dtcwt.Transform2d`, will
automatically use the NumPy backend. If you are not primarily focussed on
speed, this is the correct choice since the NumPy backend has the fullest
feature support, is the best tested and behaves correctly given single- and
double-precision input.
If you care about speed and need only single-precision calculations, the OpenCL
-backend can provide significant speed-up. On the author's system, the 2D
-transform sees around a times 10 speed improvement.
+or Tensorflow backends can provide significant speed-up.
+On the author's system, the 2D transform sees around a times 10 speed
+improvement for the OpenCL backend, and a 8-10 times speed up for the Tensorflow
+backend.
Using a backend
'''''''''''''''
-The NumPy and OpenCL backends live in the :py:mod:`dtcwt.numpy`
-and :py:mod:`dtcwt.opencl` modules respectively. Both provide
+The NumPy, OpenCL and Tensorflow backends live in the :py:mod:`dtcwt.numpy`,
+:py:mod:`dtcwt.opencl`, and :py:mod:`dtcwt.tf` modules respectively. All provide
implementations of some subset of the DTCWT library functionality.
Access to the 2D transform is via a :py:class:`dtcwt.Transform2d` instance. For
@@ -72,10 +106,19 @@ switch to the OpenCL backend
.. code-block:: python
dtcwt.push_backend('opencl')
+ xfm = Transform2d()
# ... Transform2d, etc now use OpenCL ...
-As is suggested by the name, changing the backend manipulates a stack behind
-the scenes and so one can temporarily switch backend using
+and to switch to the Tensorflow backend
+
+.. code-block:: python
+
+ dtcwt.push_backend('tf')
+ xfm = Transform2d()
+ # ... Transform2d, etc now use Tensorflow ...
+
+As is suggested by the name, changing the backend manipulates a stack behind the
+scenes and so one can temporarily switch backend using
:py:func:`dtcwt.push_backend` and :py:func:`dtcwt.pop_backend`
.. code-block:: python
diff --git a/docs/reference.rst b/docs/reference.rst
index 483f835..7ef1ebd 100644
--- a/docs/reference.rst
+++ b/docs/reference.rst
@@ -74,3 +74,23 @@ OpenCL
.. automodule:: dtcwt.opencl.lowlevel
:members:
+.. _tensorflowbackend:
+
+Tensorflow
+''''''''''
+Currently the Tensorflow backend only supports single precision operations, and
+only has functionality for the Transform1d() and Transform2d() classes (i.e.
+changing the backend to 'tf' will still use the numpy Transform3d() class).
+
+To preserve functionality, the Transform1d() and Transform2d() classes have
+a `forward` method which behaves identically to the NumPy backend. However, to
+get speedups with tensorflow, we want to feed our transform batches of images.
+For this reason, the 1-D and 2-D transforms also have `forward_channels` and
+`inverse_channels` methods. See the below documentation for how to use these.
+
+.. automodule:: dtcwt.tf
+ :members:
+ :inherited-members:
+
+.. automodule:: dtcwt.tf.lowlevel
+ :members:
diff --git a/dtcwt/__init__.py b/dtcwt/__init__.py
index 1971d59..58b8050 100644
--- a/dtcwt/__init__.py
+++ b/dtcwt/__init__.py
@@ -19,6 +19,7 @@
import dtcwt.numpy
import dtcwt.opencl
+import dtcwt.tf
# An array of dictionaries. Each dictionary stores the top-level module
# variables for that backend.
@@ -38,6 +39,12 @@
'Transform3d': dtcwt.numpy.Transform3d,
'Pyramid': dtcwt.opencl.Pyramid,
},
+ 'tf': {
+ 'Transform1d': dtcwt.tf.Transform1d,
+ 'Transform2d': dtcwt.tf.Transform2d,
+ 'Transform3d': dtcwt.numpy.Transform3d,
+ 'Pyramid': dtcwt.tf.Pyramid,
+ },
}
def _update_from_current_backend():
diff --git a/dtcwt/_version.py b/dtcwt/_version.py
index 39f7286..fa470ba 100644
--- a/dtcwt/_version.py
+++ b/dtcwt/_version.py
@@ -1,2 +1,2 @@
# IMPORTANT: before release, remove the 'devN' tag from the release name
-__version__ = '0.12.0dev1'
+__version__ = '0.13.0dev1'
diff --git a/dtcwt/numpy/transform2d.py b/dtcwt/numpy/transform2d.py
index a3da89f..683e4cc 100644
--- a/dtcwt/numpy/transform2d.py
+++ b/dtcwt/numpy/transform2d.py
@@ -74,8 +74,10 @@ def forward(self, X, nlevels=3, include_scale=False):
original_size = X.shape
if len(X.shape) >= 3:
- raise ValueError('The entered image is {0}, please enter each image slice separately.'.
- format('x'.join(list(str(s) for s in X.shape))))
+ raise ValueError('The entered image is {0}, which is invalid '.
+ format('x'.join(list(str(s) for s in X.shape))) +
+ 'for the 2D transform in a numpy backend. ' +
+ 'Please enter each image slice separately.')
# The next few lines of code check to see if the image is odd in size, if so an extra ...
# row/column will be added to the bottom/right of the image
@@ -150,9 +152,9 @@ def forward(self, X, nlevels=3, include_scale=False):
Yh[level][:,:,0:6:5] = q2c(coldfilt(Hi,h0b,h0a).T) # Horizontal
Yh[level][:,:,2:4:1] = q2c(coldfilt(Lo,h1b,h1a).T) # Vertical
if len(self.qshift) >= 12:
- Yh[level][:,:,1:5:3] = q2c(coldfilt(Ba,h2b,h2a).T) # Diagonal
+ Yh[level][:,:,1:5:3] = q2c(coldfilt(Ba,h2b,h2a).T) # Diagonal
else:
- Yh[level][:,:,1:5:3] = q2c(coldfilt(Hi,h1b,h1a).T) # Diagonal
+ Yh[level][:,:,1:5:3] = q2c(coldfilt(Hi,h1b,h1a).T) # Diagonal
if include_scale:
Yscale[level] = LoLo
@@ -267,7 +269,7 @@ def inverse(self, pyramid, gain_mask=None):
if np.any(np.array(Z.shape) != S[:2]):
raise ValueError('Sizes of highpasses are not valid for DTWAVEIFM2')
-
+
current_level = current_level - 1
if current_level == 1:
@@ -300,7 +302,7 @@ def q2c(y):
"""
Convert from quads in y to complex numbers in z.
"""
-
+
j2 = (np.sqrt(0.5) * np.array([1, 1j])).astype(appropriate_complex_type_for(y))
# Arrange pixels from the corners of the quads into
@@ -310,7 +312,7 @@ def q2c(y):
# | |
# c----d
- # Combine (a,b) and (d,c) to form two complex subimages.
+ # Combine (a,b) and (d,c) to form two complex subimages.
p = y[0::2, 0::2]*j2[0] + y[0::2, 1::2]*j2[1] # p = (a + jb) / sqrt(2)
q = y[1::2, 1::2]*j2[0] - y[1::2, 0::2]*j2[1] # q = (d - jc) / sqrt(2)
diff --git a/dtcwt/tf/__init__.py b/dtcwt/tf/__init__.py
new file mode 100644
index 0000000..0ce195e
--- /dev/null
+++ b/dtcwt/tf/__init__.py
@@ -0,0 +1,16 @@
+"""
+Provide low-level Tensorflow accelerated operations. This backend requires that
+Tensorflow be installed. Works best with a GPU but still offers good
+improvements with a CPU.
+
+"""
+
+from .common import Pyramid
+from .transform1d import Transform1d
+from .transform2d import Transform2d
+
+__all__ = [
+ 'Pyramid',
+ 'Transform1d',
+ 'Transform2d',
+]
diff --git a/dtcwt/tf/common.py b/dtcwt/tf/common.py
new file mode 100644
index 0000000..409656c
--- /dev/null
+++ b/dtcwt/tf/common.py
@@ -0,0 +1,75 @@
+from __future__ import absolute_import
+
+try:
+ import tensorflow as tf
+except ImportError:
+ # The lack of tensorflow will be caught by the low-level routines.
+ pass
+
+
+class Pyramid(object):
+ """A tensorflow representation of a transform domain signal.
+
+ An interface-compatible version of
+ :py:class:`dtcwt.Pyramid` where the initialiser
+ arguments are assumed to be :py:class:`tf.Variable` instances.
+
+ The attributes defined in :py:class:`dtcwt.Pyramid`
+ are implemented via properties. The original tf arrays may be accessed
+ via the ``..._op(s)`` attributes.
+
+ .. py:attribute:: lowpass_op
+
+ A tensorflow tensor that can be evaluated in a session to return
+ the coarsest scale lowpass signal for the input, X.
+
+ .. py:attribute:: highpasses_op
+
+ A tuple of tensorflow tensors, where each element is the complex
+ subband coefficients for corresponding scales finest to coarsest.
+
+ .. py:attribute:: scales_ops
+
+ *(optional)* A tuple where each element is a tensorflow tensor
+ containing the lowpass signal for corresponding scales finest to
+ coarsest. This is not required for the inverse and may be *None*.
+ """
+ def __init__(self, lowpass, highpasses, scales=None, numpy=False):
+ self.lowpass_op = lowpass
+ self.highpasses_ops = highpasses
+ self.scales_ops = scales
+ self.numpy = numpy
+
+ @property
+ def lowpass(self):
+ if not hasattr(self, '_lowpass'):
+ if self.lowpass_op is None:
+ self._lowpass = None
+ else:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ self._lowpass = sess.run(self.lowpass_op)
+ return self._lowpass
+
+ @property
+ def highpasses(self):
+ if not hasattr(self, '_highpasses'):
+ if self.highpasses_ops is None:
+ self._highpasses = None
+ else:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ self._highpasses = \
+ tuple(sess.run(x) for x in self.highpasses_ops)
+ return self._highpasses
+
+ @property
+ def scales(self):
+ if not hasattr(self, '_scales'):
+ if self.scales_ops is None:
+ self._scales = None
+ else:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ self._scales = tuple(sess.run(x) for x in self.scales_ops)
+ return self._scales
diff --git a/dtcwt/tf/lowlevel.py b/dtcwt/tf/lowlevel.py
new file mode 100644
index 0000000..732b546
--- /dev/null
+++ b/dtcwt/tf/lowlevel.py
@@ -0,0 +1,475 @@
+from __future__ import absolute_import
+
+try:
+ import tensorflow as tf
+ _HAVE_TF = True
+except ImportError:
+ _HAVE_TF = False
+
+from dtcwt.utils import as_column_vector
+import numpy as np
+
+
+def _as_row_tensor(h):
+ if isinstance(h, tf.Tensor):
+ h = tf.reshape(h, [1, -1])
+ else:
+ h = as_column_vector(h).T
+ h = tf.constant(h, tf.float32)
+ return h
+
+
+def _as_col_tensor(h):
+ if isinstance(h, tf.Tensor):
+ h = tf.reshape(h, [-1, 1])
+ else:
+ h = as_column_vector(h)
+ h = tf.constant(h, tf.float32)
+ return h
+
+
+def _conv_2d(X, h, strides=[1,1,1,1]):
+ """
+ Perform 2d convolution in tensorflow.
+
+ X will to be manipulated to be of shape [batch, height, width, ch],
+ and h to be of shape [height, width, ch, num]. This function does the
+ necessary reshaping before calling the conv2d function, and does the
+ reshaping on the output, returning Y of shape [batch, height, width]
+ """
+
+ # Check the shape of X is what we expect
+ if len(X.shape) != 3:
+ raise ValueError('X needs to be of shape [batch, height, width] ' +
+ 'for conv_2d')
+
+ # Check the shape of h is what we expect
+ if len(h.shape) != 2:
+ raise ValueError('Filter inputs must only have height and width ' +
+ 'for conv_2d')
+
+ # Add in the unit dimensions for conv
+ X = tf.expand_dims(X, axis=-1)
+ h = tf.expand_dims(tf.expand_dims(h, axis=-1),axis=-1)
+
+ # Have to reverse h as tensorflow 2d conv is actually cross-correlation
+ h = tf.reverse(h, axis=[0,1])
+ Y = tf.nn.conv2d(X, h, strides=strides, padding='VALID')
+
+ # Remove the final dimension, returning a result of shape
+ # [batch, height, width]
+ Y = tf.squeeze(Y, axis=-1)
+
+ return Y
+
+
+def _conv_2d_transpose(X, h, out_shape, strides=[1,1,1,1]):
+ """
+ Perform 2d transpose convolution in tensorflow.
+
+ X will to be manipulated to be of shape [batch, height, width, ch], and h to
+ be of shape [height, width, ch, num]. This function does the necessary
+ reshaping before calling the conv2d function, and does the reshaping on the
+ output, returning Y of shape [batch, height, width]
+ """
+
+ # Check the shape of X is what we expect
+ if len(X.shape) != 3:
+ raise ValueError('X needs to be of shape [batch, height, width] ' +
+ 'for conv_2d')
+ # Check the shape of h is what we expect
+ if len(h.shape) != 2:
+ raise ValueError('Filter inputs must only have height and width ' +
+ 'for conv_2d')
+
+ # Add in the unit dimensions for conv
+ X = tf.expand_dims(X, axis=-1)
+ h = tf.expand_dims(tf.expand_dims(h, axis=-1),axis=-1)
+
+ # Have to reverse h as tensorflow 2d conv is actually cross-correlation
+ h = tf.reverse(h, axis=[0,1])
+ # Transpose h as we will be using the transpose convolution
+ h = tf.transpose(h, perm=[1, 0, 2, 3])
+
+ Y = tf.nn.conv2d(X, h, output_shape=out_shape, strides=strides,
+ padding='VALID')
+
+ # Remove the final dimension, returning a result of shape
+ # [batch, height, width]
+ Y = tf.squeeze(Y, axis=-1)
+
+ return Y
+
+
+def _tf_pad(x, szs, padding='SYMMETRIC'):
+ """
+ Tensorflow can't handle padding by more than the dimension of the image.
+ This wrapper allows us to build padding up successively.
+ """
+ def get_size(x):
+ # Often the batch will be None. Convert these to 0s
+ x_szs = x.get_shape().as_list()
+ x_szs = [0 if val is None else val for val in x_szs]
+ return x_szs
+
+ x_szs = get_size(x)
+ gt = [[sz[0] > x_sz, sz[1] > x_sz] for sz,x_sz in zip(szs, x_szs)]
+ while np.any(gt):
+ # This creates an intermediate padding amount that will bring in
+ # dimensions that are too big by the size of x.
+ szs_step = np.int32(gt) * np.stack([x_szs, x_szs], axis=-1)
+ x = tf.pad(x, szs_step, padding)
+ szs = szs - szs_step
+ x_szs = get_size(x)
+ gt = [[sz[0] > x_sz, sz[1] > x_sz] for sz,x_sz in zip(szs, x_szs)]
+
+ # Pad by the remaining amount
+ x = tf.pad(x, szs, 'SYMMETRIC')
+ return x
+
+
+def colfilter(X, h, align=False):
+ """
+ Filter the columns of image *X* using filter vector *h*, without decimation.
+
+ :param X: an image whose columns are to be filtered
+ :param h: the filter coefficients.
+ :param align: If true, then will have Y keep the same output shape as X,
+ even if h has even length. Makes no difference if len(h) is odd.
+
+ :returns Y: the filtered image.
+
+ If len(h) is odd, each output sample is aligned with each input sample
+ and *Y* is the same size as *X*.
+ If len(h) is even, each output sample is aligned with the mid point of
+ each pair of input samples, and Y.shape = X.shape + [1 0].
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , August 2013
+ .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000
+ .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000
+ """
+ # Make the function flexible to accepting h in multiple forms
+ h_t = _as_col_tensor(h)
+ m = h_t.get_shape().as_list()[0]
+ m2 = m // 2
+
+ # Symmetrically extend with repeat of end samples.
+ # Pad only the second dimension of the tensor X (the columns)
+ if m % 2 == 0 and align:
+ X = _tf_pad(X, [[0, 0], [m2 - 1, m2], [0, 0]], 'SYMMETRIC')
+ else:
+ X = _tf_pad(X, [[0, 0], [m2, m2], [0, 0]], 'SYMMETRIC')
+
+ Y = _conv_2d(X, h_t, strides=[1,1,1,1])
+
+ return Y
+
+
+def rowfilter(X, h, align=False):
+ """
+ Filter the rows of image *X* using filter vector *h*, without decimation.
+
+ :param X: a tensor of images whose rows are to be filtered
+ :param h: the filter coefficients.
+ :param align: If true, then will have Y keep the same output shape as X,
+ even if h has even length. Makes no difference if len(h) is odd.
+
+ :returns Y: the filtered image.
+
+ If len(h) is odd, each output sample is aligned with each input sample
+ and *Y* is the same size as *X*.
+ If len(h) is even, each output sample is aligned with the mid point of each
+ pair of input samples, and Y.shape = X.shape + [0 1].
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , August 2013
+ .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000
+ .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000
+ """
+ # Make the function flexible to accepting h in multiple forms
+ h_t = _as_row_tensor(h)
+ m = h_t.get_shape().as_list()[1]
+ m2 = m // 2
+
+ # Symmetrically extend with repeat of end samples.
+ # Pad only the second dimension of the tensor X (the columns)
+ if m % 2 == 0 and align:
+ X = _tf_pad(X, [[0, 0], [0, 0], [m2 - 1, m2]], 'SYMMETRIC')
+ else:
+ X = _tf_pad(X, [[0, 0], [0, 0], [m2, m2]], 'SYMMETRIC')
+
+ Y = _conv_2d(X, h_t, strides=[1,1,1,1])
+
+ return Y
+
+
+def coldfilt(X, ha, hb, no_decimate=False):
+ """
+ Filter the columns of image X using the two filters ha and hb =
+ reverse(ha).
+
+ :param X: The input, of size [batch, h, w]
+ :param ha: Filter to be used on the odd samples of x.
+ :param hb: Filter to bue used on the even samples of x.
+ :param no_decimate: If true, keep the same input size
+
+ Both filters should be even length, and h should be approx linear
+ phase with a quarter sample (i.e. an :math:`e^{j \pi/4}`) advance from
+ its mid pt (i.e. :math:`|h(m/2)| > |h(m/2 + 1)|`)::
+
+ ext top edge bottom edge ext
+ Level 1: ! | ! | !
+ odd filt on . b b b b a a a a a a a a b b b b
+ odd filt on . a a a a b b b b b b b b a a a a
+ Level 2: ! | ! | !
+ +q filt on x b b a a a a b b
+ -q filt on o a a b b b b a a
+
+ The output is decimated by two from the input sample rate and the results
+ from the two filters, Ya and Yb, are interleaved to give Y.
+ Symmetric extension with repeated end samples is used on the composite X
+ columns before each filter is applied.
+
+ :raises ValueError if the number of rows in X is not a multiple of 4, the
+ length of ha does not match hb or the lengths of ha or hb are non-even.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , August 2013
+ .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000
+ .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000
+ """
+
+ r, c = X.get_shape().as_list()[1:]
+ r2 = r // 2
+ if r % 4 != 0:
+ raise ValueError('No. of rows in X must be a multiple of 4\n' +
+ 'X was {}'.format(X.get_shape().as_list()))
+
+ ha_t = _as_col_tensor(ha)
+ hb_t = _as_col_tensor(hb)
+ if ha_t.shape != hb_t.shape:
+ raise ValueError('Shapes of ha and hb must be the same\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ m = ha_t.get_shape().as_list()[0]
+ if m % 2 != 0:
+ raise ValueError('Lengths of ha and hb must be even\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ # Do the 2d convolution, but only evaluated at every second sample
+ # for both X_odd and X_even
+ rows = r2
+ if no_decimate:
+ pass
+
+ # Symmetrically extend with repeat of end samples.
+ # Pad only the second dimension of the tensor X (the columns).
+ X = _tf_pad(X, [[0, 0], [m, m], [0, 0]], 'SYMMETRIC')
+
+ # Take the odd and even columns of X
+ X_odd = X[:, 2:r + 2 * m - 2:2, :]
+ X_even = X[:, 3:r + 2 * m - 2:2, :]
+
+ a_rows = _conv_2d(X_odd, ha_t, strides=[1,2,1,1])
+ b_rows = _conv_2d(X_even, hb_t, strides=[1,2,1,1])
+
+ # Stack a_rows and b_rows (both of shape [Batch, r/4, c]) along the third
+ # dimension to make a tensor of shape [Batch, r/4, 2, c].
+ Y = tf.cond(tf.reduce_sum(ha_t * hb_t) > 0,
+ lambda: tf.stack([a_rows, b_rows],axis=2),
+ lambda: tf.stack([b_rows, a_rows],axis=2))
+
+ # Reshape result to be shape [Batch, r/2, c]. This reshaping interleaves
+ # the columns
+ Y = tf.reshape(Y, [-1, rows, c])
+
+ return Y
+
+
+def rowdfilt(X, ha, hb, no_decimate=False):
+ """
+ Filter the rows of image X using the two filters ha and hb = reverse(ha).
+
+ :param X: The input, of size [batch, h, w]
+ :param ha: Filter to be used on the odd samples of x.
+ :param hb: Filter to bue used on the even samples of x.
+ :param no_decimate: If true, keep the same input size
+
+ Both filters should be even length, and h should be approx linear
+ phase with a quarter sample advance from its mid pt (i.e. :math:`|h(m/2)| >
+ |h(m/2 + 1)|`)::
+
+ ext top edge bottom edge ext
+ Level 1: ! | ! | !
+ odd filt on . b b b b a a a a a a a a b b b b
+ odd filt on . a a a a b b b b b b b b a a a a
+ Level 2: ! | ! | !
+ +q filt on x b b a a a a b b
+ -q filt on o a a b b b b a a
+
+ The output is decimated by two from the input sample rate and the results
+ from the two filters, Ya and Yb, are interleaved to give Y. Symmetric
+ extension with repeated end samples is used on the composite X rows
+ before each filter is applied.
+
+ :raises ValueError if the number of columns in X is not a multiple of 4, the
+ length of ha does not match hb or the lengths of ha or hb are non-even.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , August 2013
+ .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000
+ .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000
+ """
+
+ r, c = X.get_shape().as_list()[1:]
+ c2 = c // 2
+ if c % 4 != 0:
+ raise ValueError('No. of rows in X must be a multiple of 4\n' +
+ 'X was {}'.format(X.get_shape().as_list()))
+
+ ha_t = _as_row_tensor(ha)
+ hb_t = _as_row_tensor(hb)
+ if ha_t.shape != hb_t.shape:
+ raise ValueError('Shapes of ha and hb must be the same\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ m = ha_t.get_shape().as_list()[1]
+ if m % 2 != 0:
+ raise ValueError('Lengths of ha and hb must be even\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ # Symmetrically extend with repeat of end samples.
+ # Pad only the second dimension of the tensor X (the rows).
+ # SYMMETRIC extension means the edge sample is repeated twice, whereas
+ # REFLECT only has the edge sample once
+ X = _tf_pad(X, [[0, 0], [0, 0], [m, m]], 'SYMMETRIC')
+
+ # Take the odd and even columns of X
+ X_odd = X[:,:,2:c + 2 * m - 2:2]
+ X_even = X[:,:,3:c + 2 * m - 2:2]
+
+ # Do the 2d convolution, but only evaluated at every second sample
+ # for both X_odd and X_even
+ cols = c2
+ if no_decimate:
+ pass
+
+ a_cols = _conv_2d(X_odd, ha_t, strides=[1,1,2,1])
+ b_cols = _conv_2d(X_even, hb_t, strides=[1,1,2,1])
+
+ # Stack a_cols and b_cols (both of shape [Batch, r, c/4]) along the fourth
+ # dimension to make a tensor of shape [Batch, r, c/4, 2].
+ Y = tf.cond(tf.reduce_sum(ha_t * hb_t) > 0,
+ lambda: tf.stack([a_cols, b_cols], axis=3),
+ lambda: tf.stack([b_cols, a_cols], axis=3))
+
+ # Reshape result to be shape [Batch, r, c/2]. This reshaping interleaves
+ # the columns
+ Y = tf.reshape(Y, [-1, r, cols])
+
+ return Y
+
+
+def colifilt(X, ha, hb, no_decimate=False):
+ """
+ Filter the columns of image X using the two filters ha and hb =
+ reverse(ha).
+
+ :param X: The input, of size [batch, h, w]
+ :param ha: Filter to be used on the odd samples of x.
+ :param hb: Filter to bue used on the even samples of x.
+ :param no_decimate: Not implemented yet
+
+ Both filters should be even length, and h should be approx linear
+ phase with a quarter sample advance from its mid pt (i.e `:math:`|h(m/2)| >
+ |h(m/2 + 1)|`).
+
+ .. code-block:: text
+
+ ext left edge right edge ext
+ Level 2: ! | ! | !
+ +q filt on x b b a a a a b b
+ -q filt on o a a b b b b a a
+ Level 1: ! | ! | !
+ odd filt on . b b b b a a a a a a a a b b b b
+ odd filt on . a a a a b b b b b b b b a a a a
+
+ The output is interpolated by two from the input sample rate and the
+ results from the two filters, Ya and Yb, are interleaved to give Y.
+ Symmetric extension with repeated end samples is used on the composite X
+ columns before each filter is applied.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , August 2013
+ .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000
+ .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000
+ """
+
+ # A quick hack to handle undecimated inputs. Simply take every second sample
+ # as if it had been decimated.
+ r, c = X.get_shape().as_list()[1:]
+ if r % 2 != 0:
+ raise ValueError('No. of rows in X must be a multiple of 2.\n' +
+ 'X was {}'.format(X.get_shape().as_list()))
+
+ ha_t = _as_col_tensor(ha)
+ hb_t = _as_col_tensor(hb)
+ if ha_t.shape != hb_t.shape:
+ raise ValueError('Shapes of ha and hb must be the same.\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ m = ha_t.get_shape().as_list()[0]
+ m2 = m // 2
+ if ha_t.get_shape().as_list()[0] % 2 != 0:
+ raise ValueError('Lengths of ha and hb must be even.\n' +
+ 'ha was {}, hb was {}'.format(ha_t.shape, hb_t.shape))
+
+ X = _tf_pad(X, [[0, 0], [m2, m2], [0, 0]], 'SYMMETRIC')
+
+ ha_odd_t = ha_t[::2,:]
+ ha_even_t = ha_t[1::2,:]
+ hb_odd_t = hb_t[::2,:]
+ hb_even_t = hb_t[1::2,:]
+
+ if m2 % 2 == 0:
+ # m/2 is even, so set up t to start on d samples.
+ # Set up vector for symmetric extension of X with repeated end samples.
+
+ # Take the odd and even columns of X
+ X1, X2 = tf.cond(
+ tf.reduce_sum(ha_t * hb_t) > 0,
+ lambda: (X[:, 1:r + m - 2:2, :], X[:, 0:r + m - 3:2, :]),
+ lambda: (X[:, 0:r + m - 3:2, :], X[:, 1:r + m - 2:2, :]))
+ X3, X4 = tf.cond(
+ tf.reduce_sum(ha_t * hb_t) > 0,
+ lambda: (X[:, 3:r + m:2, :], X[:, 2:r + m - 1:2, :]),
+ lambda: (X[:, 2:r + m - 1:2, :], X[:, 3:r + m:2, :]))
+
+ y1 = _conv_2d(X2, ha_even_t)
+ y2 = _conv_2d(X1, hb_even_t)
+ y3 = _conv_2d(X4, ha_odd_t)
+ y4 = _conv_2d(X3, hb_odd_t)
+
+ else:
+ # m/2 is odd, so set up t to start on d samples.
+ # Set up vector for symmetric extension of X with repeated end samples.
+
+ # Take the odd and even columns of X
+ X1, X2 = tf.cond(
+ tf.reduce_sum(ha_t * hb_t) > 0,
+ lambda: (X[:, 2:r + m - 1:2, :], X[:, 1:r + m - 2:2, :]),
+ lambda: (X[:, 1:r + m - 2:2, :], X[:, 2:r + m - 1:2, :]))
+
+ y1 = _conv_2d(X2, ha_odd_t)
+ y2 = _conv_2d(X1, hb_odd_t)
+ y3 = _conv_2d(X2, ha_even_t)
+ y4 = _conv_2d(X1, hb_even_t)
+
+ # Stack 4 tensors of shape [batch, r2, c] into one tensor [batch, r2, 4, c]
+ Y = tf.stack([y1,y2,y3,y4], axis=2)
+
+ # Reshape to be [batch, r * 2, c]. This interleaves the rows
+ Y = tf.reshape(Y, [-1,2*r,c])
+
+ return Y
diff --git a/dtcwt/tf/transform1d.py b/dtcwt/tf/transform1d.py
new file mode 100644
index 0000000..d9336b5
--- /dev/null
+++ b/dtcwt/tf/transform1d.py
@@ -0,0 +1,550 @@
+from __future__ import absolute_import
+
+import numpy as np
+
+from six.moves import xrange
+
+from dtcwt.coeffs import biort as _biort, qshift as _qshift
+from dtcwt.defaults import DEFAULT_BIORT, DEFAULT_QSHIFT
+from dtcwt.numpy.common import Pyramid as Pyramid_np
+from dtcwt.utils import asfarray
+from dtcwt.tf import Pyramid
+from dtcwt.tf.lowlevel import coldfilt, colfilter, colifilt
+
+try:
+ import tensorflow as tf
+ from tensorflow.python.framework import dtypes
+ tf_dtypes = frozenset(
+ [dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16,
+ dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32,
+ dtypes.quint8, dtypes.complex64, dtypes.complex128,
+ dtypes.float32_ref, dtypes.float64_ref, dtypes.int8_ref,
+ dtypes.int16_ref, dtypes.int32_ref, dtypes.int64_ref, dtypes.uint8_ref,
+ dtypes.qint8_ref, dtypes.qint32_ref, dtypes.quint8_ref,
+ dtypes.complex64_ref, dtypes.complex128_ref]
+ )
+except ImportError:
+ # The lack of tensorflow will be caught by the low-level routines.
+ pass
+
+np_dtypes = frozenset(
+ [np.dtype('float16'), np.dtype('float32'), np.dtype('float64'),
+ np.dtype('int8'), np.dtype('int16'), np.dtype('int32'),
+ np.dtype('int64'), np.dtype('uint8'), np.dtype('uint16'),
+ np.dtype('uint32'), np.dtype('complex64'), np.dtype('complex128')]
+)
+
+
+class Transform1d(object):
+ """
+ An implementation of the 1D DT-CWT in Tensorflow.
+
+ :param biort: Level 1 wavelets to use. See :py:func:`dtcwt.coeffs.biort`.
+ :param qshift: Level >= 2 wavelets to use. See
+ :py:func:`dtcwt.coeffs.qshift`.
+
+ .. note::
+
+ Calling the methods in this class with different inputs will slightly
+ vary the results. If you call the
+ :py:meth:`~dtcwt.tf.Transform1d.forward` or
+ :py:meth:`~dtcwt.tf.Transform1d.forward_channels` methods with a numpy
+ array, they load this array into a :py:class:`tf.Variable` and create
+ the graph. Subsequent calls to :py:attr:`dtcwt.tf.Pyramid.lowpass` or
+ other attributes in the pyramid will create a session and evaluate these
+ parameters. If the above methods are called with a tensorflow variable
+ or placeholder, these will be used to create the graph. As such, to
+ evaluate the results, you will need to look at the
+ :py:attr:`dtcwt.tf.Pyramid.lowpass_op` attribute (calling the `lowpass`
+ attribute will try to evaluate the graph with no initialized variables
+ and likely result in a runtime error).
+
+ The behaviour is similar for the
+ :py:meth:`~dtcwt.tf.Transform1d.inverse` and
+ :py:meth:`~dtcwt.tf.Transform1d.inverse_channels` methods, except these
+ return an array, rather than a Pyramid style class. If a
+ :py:class:`dtcwt.tf.Pyramid` was created by calling the forward methods
+ with a numpy array, providing this pyramid to the inverse methods will
+ return a numpy array. If however a :py:class:`dtcwt.tf.Pyramid` was
+ created by calling the forward methods with a tensorflow variable, the
+ result from calling the inverse methods will also be a tensorflow
+ variable.
+ """
+ def __init__(self, biort=DEFAULT_BIORT, qshift=DEFAULT_QSHIFT):
+ self.biort = biort
+ self.qshift = qshift
+
+ def forward(self, X, nlevels=3, include_scale=False):
+ """Perform a *n*-level DTCWT decompostion on a 1D column vector *X* (or on
+ the columns of a matrix *X*).
+
+ Can provide the forward transform with either an np array (naive usage),
+ or a tensorflow variable or placeholder (designed usage). To transform
+ batches of vectors, use the :py:meth:`forward_channels` method.
+
+ :param X: 1D real array or 2D real array whose columns are to be
+ transformed.
+ :param nlevels: Number of levels of wavelet decomposition
+
+ :returns: A :py:class:`dtcwt.tf.Pyramid` object representing the
+ transform result.
+
+ If *biort* or *qshift* are strings, they are used as an argument to the
+ :py:func:`biort` or :py:func:`qshift` functions. Otherwise, they are
+ interpreted as tuples of vectors giving filter coefficients. In the
+ *biort* case, this should be (h0o, g0o, h1o, g1o). In the *qshift* case,
+ this should be (h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b).
+
+ .. codeauthor:: Fergal Cotter , Sep 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ # Check if a numpy array was provided
+ numpy = False
+ try:
+ dtype = X.dtype
+ except AttributeError:
+ X = asfarray(X)
+ dtype = X.dtype
+
+ if dtype in np_dtypes:
+ numpy = True
+ # Need this because colfilter and friends assumes input is 2d
+ if len(X.shape) == 1:
+ X = np.atleast_2d(X).T
+ X = tf.Variable(X, dtype=tf.float32, trainable=False)
+ elif dtype in tf_dtypes:
+ if len(X.get_shape().as_list()) == 1:
+ X = tf.expand_dims(X, axis=-1)
+ else:
+ raise ValueError('I cannot handle the variable you have ' +
+ 'provided of type ' + str(X.dtype) + '. ' +
+ 'Inputs should be a numpy or tf array')
+
+ X_shape = tuple(X.get_shape().as_list())
+ size = '{}'.format(X_shape[0])
+ name = 'dtcwt_fwd_{}'.format(size)
+ if len(X_shape) == 2:
+ # Need to make it a batch for tensorflow
+ X = tf.expand_dims(X, axis=0)
+ elif len(X_shape) >= 3:
+ raise ValueError(
+ 'The entered variable has too many ' +
+ 'dimensions - ' + str(X_shape) + '.')
+
+ # Do the forward transform
+ with tf.variable_scope(name):
+ Yl, Yh, Yscale = self._forward_ops(X, nlevels)
+
+ Yl = Yl[0]
+ Yh = tuple(x[0] for x in Yh)
+ Yscale = tuple(x[0] for x in Yscale)
+
+ if include_scale:
+ return Pyramid(Yl, Yh, Yscale, numpy)
+ else:
+ return Pyramid(Yl, Yh, None, numpy)
+
+ def forward_channels(self, X, nlevels=3, include_scale=False):
+ """Perform a *n*-level DTCWT decompostion on a 3D array *X*.
+
+ Can provide the forward transform with either an np array (naive usage),
+ or a tensorflow variable or placeholder (designed usage).
+
+ :param X: 3D real array. Batch of matrices whose columns are to be
+ transformed (i.e. the second dimension).
+ :param nlevels: Number of levels of wavelet decomposition
+
+ :returns: A :py:class:`dtcwt.tf.Pyramid` object representing the
+ transform result.
+
+ If *biort* or *qshift* are strings, they are used as an argument to the
+ :py:func:`biort` or :py:func:`qshift` functions. Otherwise, they are
+ interpreted as tuples of vectors giving filter coefficients. In the
+ *biort* case, this should be (h0o, g0o, h1o, g1o). In the *qshift* case,
+ this should be (h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b).
+
+ .. codeauthor:: Fergal Cotter , Sep 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ # Check if a numpy array was provided
+ numpy = False
+ try:
+ dtype = X.dtype
+ except AttributeError:
+ X = asfarray(X)
+ dtype = X.dtype
+
+ if dtype in np_dtypes:
+ numpy = True
+ if len(X.shape) != 3:
+ raise ValueError(
+ 'Incorrect input shape for the forward_channels ' +
+ 'method ' + str(X.shape) + '. For Inputs of 1 or 2 ' +
+ 'dimensions, use the forward method.')
+ # Need this because colfilter and friends assumes input is 2d
+ X = tf.Variable(X, dtype=tf.float32, trainable=False)
+ elif dtype in tf_dtypes:
+ X_shape = X.get_shape().as_list()
+ if len(X.get_shape().as_list()) != 3:
+ raise ValueError(
+ 'Incorrect input shape for the forward_channels ' +
+ 'method ' + str(X_shape) + '. For Inputs of 1 or 2 ' +
+ 'dimensions, use the forward method.')
+ else:
+ raise ValueError('I cannot handle the variable you have ' +
+ 'provided of type ' + str(X.dtype) + '. ' +
+ 'Inputs should be a numpy or tf array')
+
+ X_shape = tuple(X.get_shape().as_list())
+ size = '{}'.format(X_shape[1])
+ name = 'dtcwt_fwd_{}'.format(size)
+
+ # Do the forward transform
+ with tf.variable_scope(name):
+ Yl, Yh, Yscale = self._forward_ops(X, nlevels)
+
+ if include_scale:
+ return Pyramid(Yl, Yh, Yscale, numpy)
+ else:
+ return Pyramid(Yl, Yh, None, numpy)
+
+ def inverse(self, pyramid, gain_mask=None):
+ """Perform an *n*-level dual-tree complex wavelet (DTCWT) 1D
+ reconstruction.
+
+ :param pyramid: A :py:class:`dtcwt.Pyramid`-like object containing
+ the transformed signal.
+ :param gain_mask: Gain to be applied to each subband.
+
+ :returns: Reconstructed real array. Will be a tf Variable if the Pyramid
+ was made with tf inputs, otherwise a numpy array.
+
+
+ The *l*-th element of *gain_mask* is gain for wavelet subband at level
+ l. If gain_mask[l] == 0, no computation is performed for band *l*.
+ Default *gain_mask* is all ones. Note that *l* is 0-indexed.
+
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ # A tensorflow object was provided
+ numpy = False
+ if isinstance(pyramid, Pyramid):
+ Yl = pyramid.lowpass_op
+ Yh = pyramid.highpasses_ops
+ numpy = pyramid.numpy
+
+ # Check if a numpy pyramid was provided
+ elif isinstance(pyramid, Pyramid_np) or \
+ hasattr(pyramid, 'lowpass') and hasattr(pyramid, 'highpasses'):
+ numpy = True
+ Yl, Yh = pyramid.lowpass, pyramid.highpasses
+ Yl = tf.Variable(Yl, trainable=False, dtype=tf.float32)
+ Yh = tuple(
+ tf.Variable(level, trainable=False, dtype=tf.complex64)
+ for level in Yh)
+ else:
+ raise ValueError(
+ 'Unknown pyramid provided to inverse transform')
+
+ # Need to make sure it has at least 3 dimensions for tensorflow
+ Yl_shape = tuple(Yl.get_shape().as_list())
+ if len(Yl_shape) == 2:
+ Yl = tf.expand_dims(Yl, axis=0)
+ Yh = tuple(tf.expand_dims(x, axis=0) for x in Yh)
+ elif len(Yl_shape) >= 3:
+ raise ValueError(
+ 'The entered variables have too many ' +
+ 'dimensions - ' + str(Yl_shape) + '. For batches of ' +
+ 'images with multiple channels (i.e. 3 or 4 dimensions), ' +
+ 'please either enter each channel separately, or use ' +
+ 'the inverse_channels method.')
+
+ # Do the inverse transform
+ s = Yl.get_shape().as_list()[1]
+ nlevels = len(Yh)
+ size = '{}_up_{}'.format(s, nlevels)
+ name = 'dtcwt_inv_{}'.format(size)
+ with tf.variable_scope(name):
+ X = self._inverse_ops(Yl, Yh, gain_mask)
+
+ # Chop off the first dimension
+ X = X[0]
+
+ # Return a 1d vector or a column vector
+ if X.get_shape().as_list()[1] == 1:
+ X = X[:,0]
+
+ if numpy:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ X = sess.run(X)
+
+ return X
+
+ def inverse_channels(self, pyramid, gain_mask=None):
+ """Perform an *n*-level dual-tree complex wavelet (DTCWT) 1D
+ reconstruction on a 3D array of signals. The inverse is done on the
+ second dimension of these.
+
+ This is designed to work after calling the
+ :py:meth:`~dtcwt.tf.Transform1d.forward_channels` method.
+
+ :param pyramid: A :py:class:`dtcwt.Pyramid`-like object containing
+ the transformed signal. The lowpass signal in the pyramid should be
+ a 3D array to use this method.
+ :param gain_mask: Gain to be applied to each subband.
+
+ :returns: Reconstructed array. Will be a tf Variable if the Pyramid was
+ made with tf inputs, otherwise a numpy array.
+
+ The *l*-th element of *gain_mask* is gain for wavelet subband at level
+ l. If gain_mask[l] == 0, no computation is performed for band *l*.
+ Default *gain_mask* is all ones. Note that *l* is 0-indexed.
+
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ # A tensorflow object was provided
+ numpy = False
+ if isinstance(pyramid, Pyramid):
+ Yl = pyramid.lowpass_op
+ Yl_shape = Yl.get_shape().as_list()
+ if len(Yl_shape) != 3:
+ raise ValueError(
+ 'Incorrect input shape for the forward_channels ' +
+ 'method ' + str(Yl_shape) + '. For Inputs of 1 or 2 ' +
+ 'dimensions, use the forward method.')
+ Yh = pyramid.highpasses_ops
+ numpy = pyramid.numpy
+
+ # Check if a numpy pyramid was provided
+ elif isinstance(pyramid, Pyramid_np) or \
+ hasattr(pyramid, 'lowpass') and hasattr(pyramid, 'highpasses'):
+ numpy = True
+ Yl, Yh = pyramid.lowpass, pyramid.highpasses
+ if len(Yl.shape) != 3:
+ raise ValueError(
+ 'Incorrect input shape for the forward_channels ' +
+ 'method ' + str(Yl.shape) + '. For Inputs of 1 or 2 ' +
+ 'dimensions, use the forward method.')
+
+ Yl = tf.Variable(Yl, trainable=False, dtype=tf.float32)
+ Yh = tuple(
+ tf.Variable(level, trainable=False, dtype=tf.complex64)
+ for level in Yh)
+ else:
+ raise ValueError(
+ 'Unknown pyramid provided to inverse transform')
+
+ # Do the inverse transform
+ s = Yl.get_shape().as_list()[1]
+ nlevels = len(Yh)
+ size = '{}_up_{}'.format(s, nlevels)
+ name = 'dtcwt_inv_{}'.format(size)
+ with tf.variable_scope(name):
+ X = self._inverse_ops(Yl, Yh, gain_mask)
+
+ if numpy:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ X = sess.run(X)
+
+ return X
+
+ def _forward_ops(self, X, nlevels=3):
+ """ Perform a *n*-level DTCWT-2D decompostion on a 2D matrix *X*.
+
+ For column inputs, we still need the input shape to be 3D, but with 1 as
+ the last dimension.
+
+ :param X: 3D real array of size [batch, h, w]
+ :param nlevels: Number of levels of wavelet decomposition
+ :param extended: True if a singleton dimension was added at the
+ beginning of the input. Signal to remove afterwards.
+
+ :returns: A tuple of Yl, Yh, Yscale
+ """
+ biort = self.biort
+ qshift = self.qshift
+
+ # Try to load coefficients if biort is a string parameter
+ try:
+ h0o, g0o, h1o, g1o = _biort(biort)
+ except TypeError:
+ h0o, g0o, h1o, g1o = biort
+
+ # Try to load coefficients if qshift is a string parameter
+ try:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = _qshift(qshift)
+ except TypeError:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = qshift
+
+ # Check the shape and form of the input
+ if X.dtype not in tf_dtypes:
+ raise ValueError('X needs to be a tf variable or placeholder')
+
+ original_size = X.get_shape().as_list()[1:]
+
+ # ############################ Resize #################################
+ # The next few lines of code check to see if the image is odd in size,
+ # if so an extra ... row/column will be added to the bottom/right of the
+ # image
+ # initial_row_extend = 0
+ # initial_col_extend = 0
+ # If the row count of X is not divisible by 2 then we need to
+ # extend X by adding a row at the bottom
+ if original_size[0] % 2 != 0:
+ # X = tf.pad(X, [[0, 0], [0, 1], [0, 0]], 'SYMMETRIC')
+ raise ValueError('Size of input X must be a multiple of 2')
+
+ # extended_size = X.get_shape().as_list()[1:]
+
+ if nlevels == 0:
+ return X, (), ()
+
+ # ########################### Initialise ###############################
+ Yh = [None, ] * nlevels
+ # This is only required if the user specifies a third output
+ # component.
+ Yscale = [None, ] * nlevels
+
+ # ############################ Level 1 #################################
+ # Uses the biorthogonal filters
+ if nlevels >= 1:
+ # Do odd top-level filters on cols.
+ Hi = colfilter(X, h1o)
+ Lo = colfilter(X, h0o)
+
+ # Convert Hi to complex form by taking alternate rows
+ Yh[0] = tf.cast(Hi[:,::2,:], tf.complex64) + \
+ 1j*tf.cast(Hi[:,1::2,:], tf.complex64)
+ Yscale[0] = Lo
+
+ # ############################ Level 2+ ################################
+ # Uses the qshift filters
+ for level in xrange(1, nlevels):
+ # If the row count of Lo is not divisible by 4 (it will be
+ # divisible by 2), add 2 extra rows to make it so
+ if Lo.get_shape().as_list()[1] % 4 != 0:
+ Lo = tf.pad(Lo, [[0, 0], [1, 1], [0, 0]], 'SYMMETRIC')
+
+ # Do even Qshift filters on cols.
+ Hi = coldfilt(Lo, h1b, h1a)
+ Lo = coldfilt(Lo, h0b, h0a)
+
+ # Convert Hi to complex form by taking alternate rows
+ Yh[level] = tf.cast(Hi[:,::2,:], tf.complex64) + \
+ 1j * tf.cast(Hi[:,1::2,:], tf.complex64)
+ Yscale[level] = Lo
+
+ Yl = Lo
+
+ return Yl, tuple(Yh), tuple(Yscale)
+
+ def _inverse_ops(self, Yl, Yh, gain_mask=None):
+ """Perform an *n*-level dual-tree complex wavelet (DTCWT) 1D
+ reconstruction.
+
+ :param Yl: The lowpass output from a forward transform. Should be a
+ tensorflow variable.
+ :param Yh: The tuple of highpass outputs from a forward transform.
+ Should be tensorflow variables.
+ :param gain_mask: Gain to be applied to each subband.
+
+ :returns: A tf.Variable holding the output
+
+ The *l*-th element of *gain_mask* is gain for wavelet subband at level
+ l. If gain_mask[l] == 0, no computation is performed for band *l*.
+ Default *gain_mask* is all ones. Note that *l* is 0-indexed.
+
+ .. codeauthor:: Fergal Cotter , Sep 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ # Which wavelets are to be used?
+ biort = self.biort
+ qshift = self.qshift
+ a = len(Yh) # No of levels.
+
+ if gain_mask is None:
+ gain_mask = np.ones(a) # Default gain_mask.
+ gain_mask = np.array(gain_mask)
+
+ # Try to load coefficients if biort is a string parameter
+ try:
+ h0o, g0o, h1o, g1o = _biort(biort)
+ except TypeError:
+ h0o, g0o, h1o, g1o = biort
+
+ # Try to load coefficients if qshift is a string parameter
+ try:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = _qshift(qshift)
+ except TypeError:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = qshift
+
+ level = a-1 # No of levels = no of rows in L.
+ if level < 0:
+ # if there are no levels in the input, just return the Yl value
+ return Yl
+
+ # Reconstruct levels 2 and above in reverse order.
+ Lo = Yl
+ while level >= 1:
+ Hi = c2q1d(Yh[level]*gain_mask[level])
+ Lo = colifilt(Lo, g0b, g0a) + colifilt(Hi, g1b, g1a)
+
+ # If Lo is not the same length as the next Therefore we have to clip
+ # Lo so it is the same height as the next Yh. Yh => t1 was extended.
+ Lo_shape = Lo.get_shape().as_list()
+ next_shape = Yh[level-1].get_shape().as_list()
+ if Lo_shape[1] != 2 * next_shape[1]:
+ Lo = Lo[:,1:-1]
+ Lo_shape = Lo.get_shape().as_list()
+
+ # Check the row shapes across the entire matrix
+ if (np.any(np.asanyarray(Lo_shape[1:]) !=
+ np.asanyarray(next_shape[1:] * np.array((2,1))))):
+ raise ValueError('Yh sizes are not valid for DTWAVEIFM')
+
+ level -= 1
+
+ # Reconstruct level 1.
+ if level == 0:
+ Hi = c2q1d(Yh[level]*gain_mask[level])
+ Z = colfilter(Lo,g0o) + colfilter(Hi,g1o)
+
+ return Z
+
+
+# =============================================================================
+# ********** INTERNAL FUNCTION **********
+# =============================================================================
+def c2q1d(x):
+ """ An internal function to convert a 1D Complex vector back to a real
+ array, which is twice the height of x.
+ """
+ # Input has shape [batch, r, c, 2]
+ r, c = x.get_shape().as_list()[1:3]
+ x1 = tf.real(x)
+ x2 = tf.imag(x)
+ # Stack 2 inputs of shape [batch, r, c] to [batch, r, 2, c]
+ y = tf.stack([x1, x2], axis=-2)
+ # Reshaping interleaves the results
+ y = tf.reshape(y, [-1, 2 * r, c])
+
+ return y
+
+# vim:sw=4:sts=4:et
diff --git a/dtcwt/tf/transform2d.py b/dtcwt/tf/transform2d.py
new file mode 100644
index 0000000..418a31d
--- /dev/null
+++ b/dtcwt/tf/transform2d.py
@@ -0,0 +1,969 @@
+from __future__ import absolute_import
+
+import numpy as np
+import logging
+
+from six.moves import xrange
+
+from dtcwt.coeffs import biort as _biort, qshift as _qshift
+from dtcwt.defaults import DEFAULT_BIORT, DEFAULT_QSHIFT
+from dtcwt.utils import asfarray
+from dtcwt.tf import Pyramid
+from dtcwt.numpy import Pyramid as Pyramid_np
+
+from dtcwt.tf.lowlevel import coldfilt, rowdfilt, rowfilter, colfilter, colifilt
+
+try:
+ import tensorflow as tf
+ from tensorflow.python.framework import dtypes
+ tf_dtypes = frozenset(
+ [dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16,
+ dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32,
+ dtypes.quint8, dtypes.complex64, dtypes.complex128,
+ dtypes.float32_ref, dtypes.float64_ref, dtypes.int8_ref,
+ dtypes.int16_ref, dtypes.int32_ref, dtypes.int64_ref, dtypes.uint8_ref,
+ dtypes.qint8_ref, dtypes.qint32_ref, dtypes.quint8_ref,
+ dtypes.complex64_ref, dtypes.complex128_ref]
+ )
+except ImportError:
+ # The lack of tensorflow will be caught by the low-level routines.
+ pass
+
+np_dtypes = frozenset(
+ [np.dtype('float16'), np.dtype('float32'), np.dtype('float64'),
+ np.dtype('int8'), np.dtype('int16'), np.dtype('int32'),
+ np.dtype('int64'), np.dtype('uint8'), np.dtype('uint16'),
+ np.dtype('uint32'), np.dtype('complex64'), np.dtype('complex128')]
+)
+
+
+class Transform2d(object):
+ """
+ An implementation of the 2D DT-CWT via Tensorflow.
+
+ :param biort: The biorthogonal wavelet family to use.
+ :param qshift: The quarter shift wavelet family to use.
+
+ .. note::
+
+ *biort* and *qshift* are the wavelets which parameterise the transform.
+ If *biort* or *qshift* are strings, they are used as an argument to the
+ :py:func:`dtcwt.coeffs.biort` or :py:func:`dtcwt.coeffs.qshift`
+ functions. Otherwise, they are interpreted as tuples of vectors giving
+ filter coefficients. In the *biort* case, this should be (h0o, g0o, h1o,
+ g1o). In the *qshift* case, this should be (h0a, h0b, g0a, g0b, h1a,
+ h1b, g1a, g1b).
+
+ .. note::
+
+ Calling the methods in this class with different inputs will slightly
+ vary the results. If you call the
+ :py:meth:`~dtcwt.tf.Transform2d.forward` or
+ :py:meth:`~dtcwt.tf.Transform2d.forward_channels` methods with a numpy
+ array, they load this array into a :py:class:`tf.Variable` and create
+ the graph. Subsequent calls to :py:attr:`dtcwt.tf.Pyramid.lowpass` or
+ other attributes in the pyramid will create a session and evaluate these
+ parameters. If the above methods are called with a tensorflow variable
+ or placeholder, these will be used to create the graph. As such, to
+ evaluate the results, you will need to look at the
+ :py:attr:`dtcwt.tf.Pyramid.lowpass_op` attribute (calling the `lowpass`
+ attribute will try to evaluate the graph with no initialized variables
+ and likely result in a runtime error).
+
+ The behaviour is similar for the inverse methods, except these return an
+ array, rather than a Pyramid style class. If a
+ :py:class:`dtcwt.tf.Pyramid` was created by calling the forward methods
+ with a numpy array, providing this pyramid to the inverse methods will
+ return a numpy array. If however a :py:class:`dtcwt.tf.Pyramid` was
+ created by calling the forward methods with a tensorflow variable, the
+ result from calling the inverse methods will also be a tensorflow
+ variable.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, Sept 2001
+ .. codeauthor:: Cian Shaffrey, Cambridge University, Sept 2001
+ """
+
+ def __init__(self, biort=DEFAULT_BIORT, qshift=DEFAULT_QSHIFT):
+ try:
+ self.biort = _biort(biort)
+ except TypeError:
+ self.biort = biort
+
+ # Load quarter sample shift wavelets
+ try:
+ self.qshift = _qshift(qshift)
+ except TypeError:
+ self.qshift = qshift
+
+ def forward(self, X, nlevels=3, include_scale=False):
+ """ Perform a forward transform on an image.
+
+ Can provide the forward transform with either an np array (naive
+ usage), or a tensorflow variable or placeholder (designed usage). To
+ transform batches of images, use the :py:meth:`forward_channels` method.
+
+ :param ndarray X: Input image which you wish to transform. Can be a
+ numpy array, tensorflow Variable or tensorflow placeholder. See
+ comments below.
+ :param int nlevels: Number of levels of the dtcwt transform to
+ calculate.
+ :param bool include_scale: Whether or not to return the lowpass results
+ at each scale of the transform, or only at the highest scale (as is
+ custom for multi-resolution analysis)
+
+ :returns: A :py:class:`dtcwt.tf.Pyramid` object
+
+ .. note::
+
+ If a numpy array is provided, the forward function will create a
+ tensorflow variable to hold the input image, and then create the
+ graph of the right size to match the input, and then feed the
+ input into the graph and evaluate it. This operation will
+ return a :py:class:`Pyramid` object similar to how running
+ the numpy version would.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, Sept 2001
+ .. codeauthor:: Cian Shaffrey, Cambridge University, Sept 2001
+ """
+
+ # Check if a numpy array was provided
+ numpy = False
+ try:
+ dtype = X.dtype
+ except AttributeError:
+ X = asfarray(X)
+ dtype = X.dtype
+
+ if dtype in np_dtypes:
+ numpy = True
+ X = np.atleast_2d(X)
+ X = tf.Variable(X, dtype=tf.float32, trainable=False)
+
+ if X.dtype not in tf_dtypes:
+ raise ValueError('I cannot handle the variable you have ' +
+ 'provided of type ' + str(X.dtype) + '. ' +
+ 'Inputs should be a numpy or tf array')
+
+ X_shape = tuple(X.get_shape().as_list())
+ if len(X_shape) == 2:
+ # Need to make it a batch for tensorflow
+ X = tf.expand_dims(X, axis=0)
+ elif len(X_shape) >= 3:
+ raise ValueError(
+ 'The entered variable has too many ' +
+ 'dimensions - ' + str(X_shape) + '. For batches of ' +
+ 'images with multiple channels (i.e. 3 or 4 dimensions), ' +
+ 'please either enter each channel separately, or use ' +
+ 'the forward_channels method.')
+
+ X_shape = tuple(X.get_shape().as_list())
+ original_size = X_shape[1:]
+ size = '{}x{}'.format(original_size[0], original_size[1])
+ name = 'dtcwt_fwd_{}'.format(size)
+ with tf.variable_scope(name):
+ Yl, Yh, Yscale = self._forward_ops(X, nlevels)
+
+ Yl = Yl[0]
+ Yh = tuple(x[0] for x in Yh)
+ Yscale = tuple(x[0] for x in Yscale)
+
+ if include_scale:
+ return Pyramid(Yl, Yh, Yscale, numpy)
+ else:
+ return Pyramid(Yl, Yh, None, numpy)
+
+ def forward_channels(self, X, data_format, nlevels=3,
+ include_scale=False):
+ """ Perform a forward transform on an image with multiple channels.
+
+ Will perform the DTCWT independently on each channel.
+
+ :param X: Input image which you wish to transform.
+ :param int nlevels: Number of levels of the dtcwt transform to
+ calculate.
+ :param bool include_scale: Whether or not to return the lowpass results
+ at each scale of the transform, or only at the highest scale (as is
+ custom for multiresolution analysis)
+ :param str data_format: An optional string of the form:
+ "nhw" (or "chw"), "hwn" (or "hwc"), "nchw" or "nhwc". Note that for
+ these strings, 'n' is used to indicate where the batch dimension is,
+ 'c' is used to indicate where the image channels are, 'h' is used to
+ indicate where the row dimension is, and 'c' is used to indicate
+ where the columns are. If the data_format is:
+
+ - "nhw" : the input will be interpreted as a batch of 2D images,
+ with the batch dimension as the first.
+ - "chw" : will function exactly the same as "nhw" but is offered
+ to indicate the input is a 2D image with channels.
+ - "hwn" : the input will be interpreted as a batch of 2D images
+ with the batch dimension as the last.
+ - "hwc" : will function exatly the same as "hwc" but is offered
+ to indicate the input is a 2D image with channels.
+ - "nchw" : the input is a batch of images with channel dimension
+ as the second dimension. Batch dimension is first.
+ - "nhwc" : the input is a batch of images with channel dimension
+ as the last dimension. Batch dimension is first.
+
+ :returns: A :py:class:`dtcwt.tf.Pyramid` object
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, Sept 2001
+ .. codeauthor:: Cian Shaffrey, Cambridge University, Sept 2001
+ """
+ data_format = data_format.lower()
+ formats_3d = ("nhw", "chw", "hwn", "hwc")
+ formats_4d = ("nchw", "nhwc")
+ formats = formats_3d + formats_4d
+ if data_format not in formats:
+ raise ValueError('The data format must be one of: {}'.
+ format(formats))
+
+ try:
+ dtype = X.dtype
+ except AttributeError:
+ X = asfarray(X)
+ dtype = X.dtype
+
+ numpy = False
+ if dtype in np_dtypes:
+ numpy = True
+ X = np.atleast_2d(X)
+ X = tf.Variable(X, dtype=tf.float32, trainable=False)
+
+ if X.dtype not in tf_dtypes:
+ raise ValueError('I cannot handle the variable you have ' +
+ 'provided of type ' + str(X.dtype) + '. ' +
+ 'Inputs should be a numpy or tf array.')
+
+ X_shape = X.get_shape().as_list()
+ if not ((len(X_shape) == 3 and data_format in formats_3d) or
+ (len(X_shape) == 4 and data_format in formats_4d)):
+ raise ValueError(
+ 'The entered variable has incorrect shape - ' +
+ str(X_shape) + ' for the specified data_format ' +
+ data_format + '.')
+
+ # Reshape the inputs to all be 3d inputs of shape (batch, h, w)
+ if data_format in formats_4d:
+ # Move all of the channels into the batch dimension for the
+ # input. This may involve transposing, depending on the data
+ # format
+ with tf.variable_scope('ch_to_batch'):
+ s = X.get_shape().as_list()[1:]
+ size = '{}x{}'.format(s[0], s[1])
+ name = 'dtcwt_fwd_{}'.format(size)
+ if data_format == 'nhwc':
+ nch = s[2]
+ X = tf.transpose(X, perm=[0, 3, 1, 2])
+ X = tf.reshape(X, [-1, s[0], s[1]])
+ else:
+ nch = s[0]
+ X = tf.reshape(X, [-1, s[1], s[2]])
+ elif data_format == "hwn" or data_format == "hwc":
+ s = X.get_shape().as_list()[:2]
+ size = '{}x{}'.format(s[0], s[1])
+ name = 'dtcwt_fwd_{}'.format(size)
+ with tf.variable_scope('ch_to_start'):
+ X = tf.transpose(X, perm=[2,0,1])
+ else:
+ s = X.get_shape().as_list()[1:3]
+ size = '{}x{}'.format(s[0], s[1])
+ name = 'dtcwt_fwd_{}'.format(size)
+
+ # Do the dtcwt, now with a 3 dimensional input
+ with tf.variable_scope(name):
+ Yl, Yh, Yscale = self._forward_ops(X, nlevels)
+
+ # Reshape it all again to match the input
+ if data_format in formats_4d:
+ # Put the channels back into their correct positions
+ with tf.variable_scope('batch_to_ch'):
+ # Reshape Yl
+ s = Yl.get_shape().as_list()[1:]
+ Yl = tf.reshape(Yl, [-1, nch, s[0], s[1]], name='Yl_reshape')
+ if data_format == 'nhwc':
+ Yl = tf.transpose(Yl, [0, 2, 3, 1], name='Yl_ch_to_end')
+
+ # Reshape Yh
+ with tf.variable_scope('Yh'):
+ Yh_new = [None,] * nlevels
+ for i in range(nlevels):
+ s = Yh[i].get_shape().as_list()[1:]
+ Yh_new[i] = tf.reshape(
+ Yh[i], [-1, nch, s[0], s[1], s[2]],
+ name='scale{}_reshape'.format(i))
+ if data_format == 'nhwc':
+ Yh_new[i] = tf.transpose(
+ Yh_new[i], [0, 2, 3, 1, 4],
+ name='scale{}_ch_to_end'.format(i))
+ Yh = tuple(Yh_new)
+
+ # Reshape Yscale
+ if include_scale:
+ with tf.variable_scope('Yscale'):
+ Yscale_new = [None,] * nlevels
+ for i in range(nlevels):
+ s = Yscale[i].get_shape().as_list()[1:]
+ Yscale_new[i] = tf.reshape(
+ Yscale[i], [-1, nch, s[0], s[1]],
+ name='scale{}_reshape'.format(i))
+ if data_format == 'nhwc':
+ Yscale_new[i] = tf.transpose(
+ Yscale_new[i], [0, 2, 3, 1],
+ name='scale{}_ch_to_end'.format(i))
+ Yscale = tuple(Yscale_new)
+
+ elif data_format == "hwn" or data_format == "hwc":
+ with tf.variable_scope('ch_to_end'):
+ Yl = tf.transpose(Yl, perm=[1,2,0], name='Yl')
+ Yh = tuple(
+ tf.transpose(x, [1, 2, 0, 3], name='Yh{}'.format(i))
+ for i,x in enumerate(Yh))
+ if include_scale:
+ Yscale = tuple(
+ tf.transpose(x, [1, 2, 0], name='Yscale{}'.format(i))
+ for i,x in enumerate(Yscale))
+
+ # Return the pyramid
+ if include_scale:
+ return Pyramid(Yl, Yh, Yscale, numpy)
+ else:
+ return Pyramid(Yl, Yh, None, numpy)
+
+ def inverse(self, pyramid, gain_mask=None):
+ """ Perform an inverse transform on an image.
+
+ Can provide the inverse transform with either an np array (naive
+ usage), or a tensorflow variable or placeholder (designed usage).
+
+ :param pyramid: A :py:class:`dtcwt.tf.Pyramid` like class holding
+ the transform domain representation to invert
+ :param gain_mask: Gain to be applied to each sub-band. Should have shape
+ (6, nlevels) or be None.
+
+ :returns: An array , X, compatible with the reconstruction. Will be a tf
+ Variable if the Pyramid was made with tf inputs, otherwise a numpy
+ array.
+
+ .. note::
+
+ A tf.Variable is returned if the pyramid input was a Pyramid class.
+ If it wasn't, then, we return a numpy array (note that this is
+ inefficient, as in both cases we have to construct the graph - in
+ the second case, we then execute it and discard it).
+
+ The (*d*, *l*)-th element of *gain_mask* is gain for subband with
+ direction *d* at level *l*. If gain_mask[d,l] == 0, no computation is
+ performed for band (d,l). Default *gain_mask* is all ones. Note that
+ both *d* and *l* are zero-indexed.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, Sept 2001
+ .. codeauthor:: Cian Shaffrey, Cambridge University, Sept 2001
+ """
+
+ # A tensorflow object was provided
+ numpy = False
+ if isinstance(pyramid, Pyramid):
+ Yl = pyramid.lowpass_op
+ Yh = pyramid.highpasses_ops
+ numpy = pyramid.numpy
+
+ # Check if a numpy pyramid was provided
+ elif isinstance(pyramid, Pyramid_np) or \
+ hasattr(pyramid, 'lowpass') and hasattr(pyramid, 'highpasses'):
+ numpy = True
+ Yl, Yh = pyramid.lowpass, pyramid.highpasses
+ Yl = tf.Variable(Yl, trainable=False, dtype=tf.float32)
+ Yh = tuple(
+ tf.Variable(level, trainable=False, dtype=tf.complex64)
+ for level in Yh)
+ else:
+ raise ValueError(
+ 'Unknown pyramid provided to inverse transform')
+
+ # Need to make sure it has at least 3 dimensions for tensorflow
+ Yl_shape = tuple(Yl.get_shape().as_list())
+ if len(Yl_shape) == 2:
+ Yl = tf.expand_dims(Yl, axis=0)
+ Yh = tuple(tf.expand_dims(x, axis=0) for x in Yh)
+ elif len(Yl_shape) >= 3:
+ raise ValueError(
+ 'The entered variables have too many ' +
+ 'dimensions - ' + str(Yl_shape) + '. For batches of ' +
+ 'images with multiple channels (i.e. 3 or 4 dimensions), ' +
+ 'please either enter each channel separately, or use ' +
+ 'the inverse_channels method.')
+
+ # Do the inverse transform
+ s = Yl.get_shape().as_list()[1:]
+ nlevels = len(Yh)
+ size = '{}x{}_up_{}'.format(s[0], s[1], nlevels)
+ name = 'dtcwt_inv_{}'.format(size)
+ with tf.variable_scope(name):
+ X = self._inverse_ops(Yl, Yh, gain_mask)
+
+ # Chop off the first dimension
+ X = X[0]
+
+ if numpy:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ X = sess.run(X)
+
+ return X
+
+ def inverse_channels(self, pyramid, data_format, gain_mask=None):
+ """
+ Perform an inverse transform on an image with multiple channels.
+
+ Must provide with a tensorflow variable or placeholder (unlike the more
+ general :py:meth:`~dtcwt.tf.Transform2d.inverse`).
+
+ This is designed to work after calling the
+ :py:meth:`~dtcwt.tf.Transform2d.forward_channels` method. You must use
+ the same data_format for the inverse_channels as the one used for the
+ forward_channels (unless you have explicitly reshaped the output).
+
+ :param pyramid: A :py:class:`dtcwt.tf.Pyramid` like class holding
+ the transform domain representation to invert
+ :param str data_format: An optional string of the form:
+ "nhw" (or "chw"), "hwn" (or "hwc"), "nchw" or "nhwc". Note that for
+ these strings, 'n' is used to indicate where the batch dimension is,
+ 'c' is used to indicate where the image channels are, 'h' is used to
+ indicate where the row dimension is, and 'c' is used to indicate
+ where the columns are. If the data_format is::
+
+ * "nhw" - the input will be interpreted as a batch of 2D images,
+ with the batch dimension as the first.
+ * "chw" - will function exactly the same as "nhw" but it offered
+ to indicate the input is a 2D image with channels.
+ * "hwn" - the input will be interpreted as a batch of 2D images
+ with the batch dimension as the last.
+ * "hwc" - will function exatly the same as "hwc" but is offered
+ to indicate the input is a 2D image with channels.
+ * "nchw" - the input is a batch of images with channel dimension
+ as the second dimension. Batch dimension is first.
+ * "nhwc" - the input is a batch of images with channel dimension
+ as the last dimension. Batch dimension is first.
+
+ :param gain_mask: Gain to be applied to each subband. Should have shape
+ [6, nlevels].
+
+ :returns: An array , X, compatible with the reconstruction. Will be a tf
+ Variable if the Pyramid was made with tf inputs, otherwise a numpy
+ array.
+
+
+ The (*d*, *l*)-th element of *gain_mask* is gain for subband with
+ direction *d* at level *l*. If gain_mask[d,l] == 0, no computation is
+ performed for band (d,l). Default *gain_mask* is all ones. Note that
+ both *d* and *l* are zero-indexed.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, Sept 2001
+ .. codeauthor:: Cian Shaffrey, Cambridge University, Sept 2001
+ """
+ # Input checking
+ data_format = data_format.lower()
+ formats_3d = ("nhw", "chw", "hwn", "hwc")
+ formats_4d = ("nchw", "nhwc")
+ formats = formats_3d + formats_4d
+ if data_format not in formats:
+ raise ValueError('The data format must be one of: {}'.
+ format(formats))
+
+ # A tensorflow object was provided
+ numpy = False
+ if isinstance(pyramid, Pyramid):
+ Yl = pyramid.lowpass_op
+ Yh = pyramid.highpasses_ops
+ numpy = pyramid.numpy
+
+ # Check if a numpy pyramid was provided
+ elif isinstance(pyramid, Pyramid_np) or \
+ hasattr(pyramid, 'lowpass') and hasattr(pyramid, 'highpasses'):
+ numpy = True
+ Yl, Yh = pyramid.lowpass, pyramid.highpasses
+ Yl = tf.Variable(Yl, trainable=False, dtype=tf.float32)
+ Yh = tuple(
+ tf.Variable(level, trainable=False, dtype=tf.complex64)
+ for level in Yh)
+ else:
+ raise ValueError(
+ 'Unknown pyramid provided to inverse transform')
+
+ # Check the shape was correct
+ Yl_shape = Yl.get_shape().as_list()
+ if not ((len(Yl_shape) == 3 and data_format in formats_3d) or
+ (len(Yl_shape) == 4 and data_format in formats_4d)):
+ raise ValueError(
+ 'The entered variable has incorrect shape - ' +
+ str(Yl_shape) + ' for the specified data_format ' +
+ data_format + '.')
+
+ # Reshape the inputs to all be 3d inputs of shape (batch, h, w)
+ if data_format in formats_4d:
+ if data_format == "nhwc":
+ channel_ax = 3
+ else:
+ channel_ax = 1
+ # Move all of the channels into the batch dimension for the lowpass
+ # input. This may involve transposing, depending on the data format
+ with tf.variable_scope('ch_to_batch'):
+ s = Yl.get_shape().as_list()
+ num_channels = s[channel_ax]
+ nlevels = len(Yh)
+ if data_format == "nhwc":
+ size = '{}x{}_up_{}'.format(s[1], s[2], nlevels)
+ Yl = tf.transpose(Yl, [0, 3, 1, 2])
+ Yl = tf.reshape(Yl, [-1, s[1], s[2]])
+ else:
+ size = '{}x{}_up_{}'.format(s[2], s[3], nlevels)
+ Yl = tf.reshape(Yl, [-1, s[2], s[3]])
+
+ # Move all of the channels into the batch dimension for the
+ # highpass input. This may involve transposing, depending on the
+ # data format
+ Yh_new = []
+ for scale in Yh:
+ s = scale.get_shape().as_list()
+ if s[channel_ax] != num_channels:
+ raise ValueError(
+ """The number of channels has to be consistent for all
+ inputs across the channel axis {}. You fed in Yl: {}
+ and Yh: {}""".format(channel_ax, Yl, Yh))
+ if data_format == "nhwc":
+ scale = tf.transpose(scale, [0, 3, 1, 2, 4])
+ Yh_new.append(tf.reshape(scale, [-1, s[1], s[2], s[4]]))
+ else:
+ Yh_new.append(tf.reshape(scale, [-1, s[2], s[3], s[4]]))
+ Yh = Yh_new
+
+ elif data_format == "hwn" or data_format == "hwc":
+ s = Yl.get_shape().as_list()
+ num_channels = s[2]
+ size = '{}x{}'.format(s[0], s[1])
+ with tf.variable_scope('ch_to_start'):
+ Yl = tf.transpose(Yl, perm=[2,0,1], name='Yl')
+ Yh = tuple(
+ tf.transpose(x, [2, 0, 1, 3], name='Yh{}'.format(i))
+ for i,x in enumerate(Yh))
+
+ else:
+ s = Yl.get_shape().as_list()
+ size = '{}x{}'.format(s[1], s[2])
+ num_channels = s[0]
+
+ # Do the inverse dtcwt, now with the same shape input
+ name = 'dtcwt_inv_{}_{}channels'.format(size, num_channels)
+ with tf.variable_scope(name):
+ X = self._inverse_ops(Yl, Yh, gain_mask)
+
+ # Reshape the output to match the input shape.
+ if data_format in formats_4d:
+ with tf.variable_scope('batch_to_ch'):
+ s = X.get_shape().as_list()
+ X = tf.reshape(X, [-1, num_channels, s[1], s[2]])
+ if data_format == "nhwc":
+ X = tf.transpose(X, [0, 2, 3, 1], name='X')
+ else:
+ if data_format == "hwn" or data_format == "hwc":
+ with tf.variable_scope('ch_to_end'):
+ X = tf.transpose(X, [1, 2, 0], name="X")
+
+ # If the user expects numpy back, evaluate the data.
+ if numpy:
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ X = sess.run(X)
+
+ return X
+
+ def _forward_ops(self, X, nlevels=3):
+ """ Perform a *n*-level DTCWT-2D decompostion on a 2D matrix *X*.
+
+ :param X: 3D real array of size [batch, h, w]
+ :param nlevels: Number of levels of wavelet decomposition
+ :param extended: True if a singleton dimension was added at the
+ beginning of the input. Signal to remove afterwards.
+
+ :returns: A tuple of Yl, Yh, Yscale
+ """
+
+ # If biort has 6 elements instead of 4, then it's a modified
+ # rotationally symmetric wavelet
+ # FIXME: there's probably a nicer way to do this
+ if len(self.biort) == 4:
+ h0o, g0o, h1o, g1o = self.biort
+ elif len(self.biort) == 6:
+ h0o, g0o, h1o, g1o, h2o, g2o = self.biort
+ else:
+ raise ValueError('Biort wavelet must have 6 or 4 components.')
+
+ # If qshift has 12 elements instead of 8, then it's a modified
+ # rotationally symmetric wavelet
+ # FIXME: there's probably a nicer way to do this
+ if len(self.qshift) == 8:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = self.qshift
+ elif len(self.qshift) == 12:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b, h2a, h2b = self.qshift[:10]
+ else:
+ raise ValueError('Qshift wavelet must have 12 or 8 components.')
+
+ # Check the shape and form of the input
+ if X.dtype not in tf_dtypes:
+ raise ValueError('X needs to be a tf variable or placeholder')
+
+ original_size = X.get_shape().as_list()[1:]
+
+ if len(original_size) >= 3:
+ raise ValueError(
+ """The entered variable has too many dimensions {}. If
+ the final dimension are colour channels, please enter each
+ channel separately.""".format(original_size))
+
+ # ############################ Resize #################################
+ # The next few lines of code check to see if the image is odd in size,
+ # if so an extra ... row/column will be added to the bottom/right of the
+ # image
+ initial_row_extend = 0
+ initial_col_extend = 0
+ # If the row count of X is not divisible by 2 then we need to
+ # extend X by adding a row at the bottom
+ if original_size[0] % 2 != 0:
+ bottom_row = tf.slice(X, [0, original_size[0] - 1, 0], [-1, 1, -1])
+ X = tf.concat([X, bottom_row], axis=1)
+ initial_row_extend = 1
+
+ # If the col count of X is not divisible by 2 then we need to
+ # extend X by adding a col to the right
+ if original_size[1] % 2 != 0:
+ right_col = tf.slice(X, [0, 0, original_size[1] - 1], [-1, -1, 1])
+ X = tf.concat([X, right_col], axis=2)
+ initial_col_extend = 1
+
+ extended_size = X.get_shape().as_list()[1:3]
+
+ if nlevels == 0:
+ return X, (), ()
+
+ # ########################### Initialise ###############################
+ Yh = [None, ] * nlevels
+ # This is only required if the user specifies a third output
+ # component.
+ Yscale = [None, ] * nlevels
+
+ # ############################ Level 1 #################################
+ # Uses the biorthogonal filters
+ if nlevels >= 1:
+ # Do odd top-level filters on cols.
+ Lo = colfilter(X, h0o)
+ Hi = colfilter(X, h1o)
+ if len(self.biort) >= 6:
+ Ba = colfilter(X, h2o)
+
+ # Do odd top-level filters on rows.
+ LoLo = rowfilter(Lo, h0o)
+ LoLo_shape = LoLo.get_shape().as_list()[1:]
+
+ # Horizontal wavelet pair (15 & 165 degrees)
+ horiz = q2c(rowfilter(Hi, h0o))
+
+ # Vertical wavelet pair (75 & 105 degrees)
+ vertic = q2c(rowfilter(Lo, h1o))
+
+ # Diagonal wavelet pair (45 & 135 degrees)
+ if len(self.biort) >= 6:
+ diag = q2c(rowfilter(Ba, h2o))
+ else:
+ diag = q2c(rowfilter(Hi, h1o))
+
+ # Pack all 6 tensors into one
+ Yh[0] = tf.stack(
+ [horiz[0], diag[0], vertic[0], vertic[1], diag[1], horiz[1]],
+ axis=3)
+
+ Yscale[0] = LoLo
+
+ # ############################ Level 2+ ################################
+ # Uses the qshift filters
+ for level in xrange(1, nlevels):
+ row_size, col_size = LoLo_shape[0], LoLo_shape[1]
+ # If the row count of LoLo is not divisible by 4 (it will be
+ # divisible by 2), add 2 extra rows to make it so
+ if row_size % 4 != 0:
+ LoLo = tf.pad(LoLo, [[0, 0], [1, 1], [0, 0]], 'SYMMETRIC')
+
+ # If the col count of LoLo is not divisible by 4 (it will be
+ # divisible by 2), add 2 extra cols to make it so
+ if col_size % 4 != 0:
+ LoLo = tf.pad(LoLo, [[0, 0], [0, 0], [1, 1]], 'SYMMETRIC')
+
+ # Do even Qshift filters on cols.
+ Lo = coldfilt(LoLo, h0b, h0a)
+ Hi = coldfilt(LoLo, h1b, h1a)
+ if len(self.qshift) >= 12:
+ Ba = coldfilt(LoLo, h2b, h2a)
+
+ # Do even Qshift filters on rows.
+ LoLo = rowdfilt(Lo, h0b, h0a)
+ LoLo_shape = LoLo.get_shape().as_list()[1:3]
+
+ # Horizontal wavelet pair (15 & 165 degrees)
+ horiz = q2c(rowdfilt(Hi, h0b, h0a))
+
+ # Vertical wavelet pair (75 & 105 degrees)
+ vertic = q2c(rowdfilt(Lo, h1b, h1a))
+
+ # Diagonal wavelet pair (45 & 135 degrees)
+ if len(self.qshift) >= 12:
+ diag = q2c(rowdfilt(Ba, h2b, h2a))
+ else:
+ diag = q2c(rowdfilt(Hi, h1b, h1a))
+
+ # Pack all 6 tensors into one
+ Yh[level] = tf.stack(
+ [horiz[0], diag[0], vertic[0], vertic[1], diag[1], horiz[1]],
+ axis=3)
+
+ Yscale[level] = LoLo
+
+ Yl = LoLo
+
+ if initial_row_extend == 1 and initial_col_extend == 1:
+ logging.warn('The image entered is now a {0} NOT a {1}.'.format(
+ 'x'.join(list(str(s) for s in extended_size)),
+ 'x'.join(list(str(s) for s in original_size))))
+ logging.warn(
+ """The bottom row and rightmost column have been duplicated,
+ prior to decomposition.""")
+
+ if initial_row_extend == 1 and initial_col_extend == 0:
+ logging.warn('The image entered is now a {0} NOT a {1}.'.format(
+ 'x'.join(list(str(s) for s in extended_size)),
+ 'x'.join(list(str(s) for s in original_size))))
+ logging.warn(
+ 'The bottom row has been duplicated, prior to decomposition.')
+
+ if initial_row_extend == 0 and initial_col_extend == 1:
+ logging.warn('The image entered is now a {0} NOT a {1}.'.format(
+ 'x'.join(list(str(s) for s in extended_size)),
+ 'x'.join(list(str(s) for s in original_size))))
+ logging.warn(
+ """The rightmost column has been duplicated, prior to
+ decomposition.""")
+
+ return Yl, tuple(Yh), tuple(Yscale)
+
+ def _inverse_ops(self, Yl, Yh, gain_mask=None):
+ """Perform an *n*-level dual-tree complex wavelet (DTCWT) 2D
+ reconstruction.
+
+ :param Yl: The lowpass output from a forward transform. Should be a
+ tensorflow variable.
+ :param Yh: The tuple of highpass outputs from a forward transform.
+ Should be tensorflow variables.
+ :param gain_mask: Gain to be applied to each subband.
+
+ :returns: A tf.Variable holding the output
+
+ The (*d*, *l*)-th element of *gain_mask* is gain for subband with
+ direction *d* at level *l*. If gain_mask[d,l] == 0, no computation is
+ performed for band (d,l). Default *gain_mask* is all ones. Note that
+ both *d* and *l* are zero-indexed.
+
+ .. codeauthor:: Fergal Cotter , Feb 2017
+ .. codeauthor:: Rich Wareham , Aug 2013
+ .. codeauthor:: Nick Kingsbury, Cambridge University, May 2002
+ .. codeauthor:: Cian Shaffrey, Cambridge University, May 2002
+
+ """
+ a = len(Yh) # No of levels.
+
+ if gain_mask is None:
+ gain_mask = np.ones((6, a)) # Default gain_mask.
+
+ gain_mask = np.array(gain_mask)
+
+ # If biort has 6 elements instead of 4, then it's a modified
+ # rotationally symmetric wavelet
+ # FIXME: there's probably a nicer way to do this
+ if len(self.biort) == 4:
+ h0o, g0o, h1o, g1o = self.biort
+ elif len(self.biort) == 6:
+ h0o, g0o, h1o, g1o, h2o, g2o = self.biort
+ else:
+ raise ValueError('Biort wavelet must have 6 or 4 components.')
+
+ # If qshift has 12 elements instead of 8, then it's a modified
+ # rotationally symmetric wavelet
+ # FIXME: there's probably a nicer way to do this
+ if len(self.qshift) == 8:
+ h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = self.qshift
+ elif len(self.qshift) == 12:
+ h0a, h0b, g0a, g0b, h1a, h1b, \
+ g1a, g1b, h2a, h2b, g2a, g2b = self.qshift
+ else:
+ raise ValueError('Qshift wavelet must have 12 or 8 components.')
+
+ current_level = a
+ Z = Yl
+
+ # This ensures that for level 1 we never do the following
+ while current_level >= 2:
+ lh = c2q(Yh[current_level - 1][:, :, :, 0:6:5],
+ gain_mask[[0, 5],
+ current_level - 1])
+ hl = c2q(Yh[current_level - 1][:, :, :, 2:4:1],
+ gain_mask[[2, 3],
+ current_level - 1])
+ hh = c2q(Yh[current_level - 1][:, :, :, 1:5:3],
+ gain_mask[[1, 4],
+ current_level - 1])
+
+ # Do even Qshift filters on columns.
+ y1 = colifilt(Z, g0b, g0a) + colifilt(lh, g1b, g1a)
+
+ if len(self.qshift) >= 12:
+ y2 = colifilt(hl, g0b, g0a)
+ y2bp = colifilt(hh, g2b, g2a)
+
+ # Do even Qshift filters on rows.
+ y1T = tf.transpose(y1, perm=[0, 2, 1])
+ y2T = tf.transpose(y2, perm=[0, 2, 1])
+ y2bpT = tf.transpose(y2bp, perm=[0, 2, 1])
+ Z = tf.transpose(
+ colifilt(y1T, g0b, g0a) +
+ colifilt(y2T, g1b, g1a) +
+ colifilt(y2bpT, g2b, g2a),
+ perm=[0, 2, 1])
+ else:
+ y2 = colifilt(hl, g0b, g0a) + colifilt(hh, g1b, g1a)
+
+ # Do even Qshift filters on rows.
+ y1T = tf.transpose(y1, perm=[0, 2, 1])
+ y2T = tf.transpose(y2, perm=[0, 2, 1])
+ Z = tf.transpose(
+ colifilt(y1T, g0b, g0a) +
+ colifilt(y2T, g1b, g1a),
+ perm=[0, 2, 1])
+
+ # Check size of Z and crop as required
+ Z_r, Z_c = Z.get_shape().as_list()[1:3]
+ S_r, S_c = Yh[current_level - 2].get_shape().as_list()[1:3]
+ # check to see if this result needs to be cropped for the rows
+ if Z_r != S_r * 2:
+ Z = Z[:, 1:-1, :]
+ # check to see if this result needs to be cropped for the cols
+ if Z_c != S_c * 2:
+ Z = Z[:, :, 1:-1]
+
+ # Assert that the size matches at this stage
+ Z_r, Z_c = Z.get_shape().as_list()[1:3]
+ if Z_r != S_r * 2 or Z_c != S_c * 2:
+ raise ValueError(
+ 'Sizes of highpasses {}x{} are not '.format(Z_r, Z_c) +
+ 'compatible with {}x{} from next level'.format(S_r, S_c))
+
+ current_level = current_level - 1
+
+ if current_level == 1:
+ lh = c2q(Yh[current_level - 1][:, :, :, 0:6:5],
+ gain_mask[[0, 5],
+ current_level - 1])
+ hl = c2q(Yh[current_level - 1][:, :, :, 2:4:1],
+ gain_mask[[2, 3],
+ current_level - 1])
+ hh = c2q(Yh[current_level - 1][:, :, :, 1:5:3],
+ gain_mask[[1, 4],
+ current_level - 1])
+
+ # Do odd top-level filters on columns.
+ y1 = colfilter(Z, g0o) + colfilter(lh, g1o)
+
+ if len(self.biort) >= 6:
+ y2 = colfilter(hl, g0o)
+ y2bp = colfilter(hh, g2o)
+
+ # Do odd top-level filters on rows.
+ Z = rowfilter(y1, g0o) + rowfilter(y2, g1o) + \
+ rowfilter(y2bp, g2o)
+ else:
+ y2 = colfilter(hl, g0o) + colfilter(hh, g1o)
+
+ # Do odd top-level filters on rows.
+ Z = rowfilter(y1, g0o) + rowfilter(y2, g1o)
+
+ return Z
+
+
+def q2c(y):
+ """
+ Convert from quads in y to complex numbers in z.
+ """
+
+ # Arrange pixels from the corners of the quads into
+ # 2 subimages of alternate real and imag pixels.
+ # a----b
+ # | |
+ # | |
+ # c----d
+ # Combine (a,b) and (d,c) to form two complex subimages.
+ a, b = y[:, 0::2, 0::2], y[:, 0::2, 1::2]
+ c, d = y[:, 1::2, 0::2], y[:, 1::2, 1::2]
+
+ p = tf.complex(a / np.sqrt(2), b / np.sqrt(2)) # p = (a + jb) / sqrt(2)
+ q = tf.complex(d / np.sqrt(2), -c / np.sqrt(2)) # q = (d - jc) / sqrt(2)
+
+ # Form the 2 highpasses in z.
+ return (p - q, p + q)
+
+
+def c2q(w, gain):
+ """
+ Scale by gain and convert from complex w(:,:,1:2) to real quad-numbers
+ in z.
+
+ Arrange pixels from the real and imag parts of the 2 highpasses
+ into 4 separate subimages .
+ A----B Re Im of w(:,:,1)
+ | |
+ | |
+ C----D Re Im of w(:,:,2)
+
+ """
+
+ # Input has shape [batch, r, c, 2]
+ r, c = w.get_shape().as_list()[1:3]
+
+ sc = np.sqrt(0.5) * gain
+ P = w[:, :, :, 0] * sc[0] + w[:, :, :, 1] * sc[1]
+ Q = w[:, :, :, 0] * sc[0] - w[:, :, :, 1] * sc[1]
+
+ # Recover each of the 4 corners of the quads.
+ x1 = tf.real(P)
+ x2 = tf.imag(P)
+ x3 = tf.imag(Q)
+ x4 = -tf.real(Q)
+
+ # Stack 2 inputs of shape [batch, r, c] to [batch, r, 2, c]
+ x_rows1 = tf.stack([x1, x3], axis=-2)
+ # Reshaping interleaves the results
+ x_rows1 = tf.reshape(x_rows1, [-1, 2 * r, c])
+ # Do the same for the even columns
+ x_rows2 = tf.stack([x2, x4], axis=-2)
+ x_rows2 = tf.reshape(x_rows2, [-1, 2 * r, c])
+
+ # Stack the two [batch, 2*r, c] tensors to [batch, 2*r, c, 2]
+ x_cols = tf.stack([x_rows1, x_rows2], axis=-1)
+ y = tf.reshape(x_cols, [-1, 2 * r, 2 * c])
+
+ return y
diff --git a/dtcwt/utils.py b/dtcwt/utils.py
index ca0ed32..f28baa5 100644
--- a/dtcwt/utils.py
+++ b/dtcwt/utils.py
@@ -5,52 +5,89 @@
import functools
import numpy as np
+
+def unpack(pyramid, backend='numpy'):
+ """ Unpacks a pyramid give back the constituent parts.
+
+ :param pyramid: The Pyramid of DTCWT transforms you wish to unpack
+ :param str backend: A string from 'numpy', 'opencl', or 'tf' indicating
+ which attributes you want to unpack from the pyramid.
+
+ :returns: returns a generator which can be unpacked into the Yl, Yh and
+ Yscale components of the pyramid. The generator will only return 2
+ values if the pyramid was created with the include_scale parameter set
+ to false.
+
+ .. note::
+
+ You can still unpack a tf or opencl pyramid as if it were created by a
+ numpy. In this case it will return a numpy array, rather than the
+ backend specific array type.
+ """
+ backend = backend.lower()
+ if backend == 'numpy':
+ yield pyramid.lowpass
+ yield pyramid.highpasses
+ if pyramid.scales is not None:
+ yield pyramid.scales
+ elif backend == 'opencl':
+ yield pyramid.cl_lowpass
+ yield pyramid.cl_highpasses
+ if pyramid.cl_scales is not None:
+ yield pyramid.cl_scales
+ elif backend == 'tf':
+ yield pyramid.lowpass_op
+ yield pyramid.highpasses_ops
+ if pyramid.scales_ops is not None:
+ yield pyramid.scales_ops
+
+
def drawedge(theta,r,w,N):
"""Generate an image of size N * N pels, of an edge going from 0 to 1
in height at theta degrees to the horizontal (top of image = 1 if angle = 0).
r is a two-element vector, it is a coordinate in ij coords through
which the step should pass.
The shape of the intensity step is half a raised cosine w pels wide (w>=1).
-
- T. E . Gale's enhancement to drawedge() for MATLAB, transliterated
+
+ T. E . Gale's enhancement to drawedge() for MATLAB, transliterated
to Python by S. C. Forshaw, Nov. 2013. """
-
+
# convert theta from degrees to radians
- thetar = np.array(theta * np.pi / 180)
-
+ thetar = np.array(theta * np.pi / 180)
+
# Calculate image centre from given width
- imCentre = (np.array([N,N]).T - 1) / 2 + 1
-
+ imCentre = (np.array([N,N]).T - 1) / 2 + 1
+
# Calculate values to subtract from the plane
- r = np.array([np.cos(thetar), np.sin(thetar)])*(-1) * (r - imCentre)
+ r = np.array([np.cos(thetar), np.sin(thetar)])*(-1) * (r - imCentre)
# check width of raised cosine section
w = np.maximum(1,w)
-
-
+
+
ramp = np.arange(0,N) - (N+1)/2
hgrad = np.sin(thetar)*(-1) * np.ones([N,1])
vgrad = np.cos(thetar)*(-1) * np.ones([1,N])
plane = ((hgrad * ramp) - r[0]) + ((ramp * vgrad).T - r[1])
x = 0.5 + 0.5 * np.sin(np.minimum(np.maximum(plane*(np.pi/w), np.pi/(-2)), np.pi/2))
-
+
return x
def drawcirc(r,w,du,dv,N):
-
- """Generate an image of size N*N pels, containing a circle
+
+ """Generate an image of size N*N pels, containing a circle
radius r pels and centred at du,dv relative
- to the centre of the image. The edge of the circle is a cosine shaped
+ to the centre of the image. The edge of the circle is a cosine shaped
edge of width w (from 10 to 90% points).
-
+
Python implementation by S. C. Forshaw, November 2013."""
-
+
# check value of w to avoid dividing by zero
w = np.maximum(w,1)
-
+
#x plane
x = np.ones([N,1]) * ((np.arange(0,N,1, dtype='float') - (N+1) / 2 - dv) / r)
-
+
# y vector
y = (((np.arange(0,N,1, dtype='float') - (N+1) / 2 - du) / r) * np.ones([1,N])).T
@@ -75,7 +112,7 @@ def appropriate_complex_type_for(X):
"""
X = asfarray(X)
-
+
if np.issubsctype(X.dtype, np.complex64) or np.issubsctype(X.dtype, np.complex128):
return X.dtype
elif np.issubsctype(X.dtype, np.float32):
@@ -88,7 +125,7 @@ def appropriate_complex_type_for(X):
def as_column_vector(v):
"""Return *v* as a column vector with shape (N,1).
-
+
"""
v = np.atleast_2d(v)
if v.shape[0] == 1:
diff --git a/tests/Speed Tests.ipynb b/tests/Speed Tests.ipynb
new file mode 100644
index 0000000..be67482
--- /dev/null
+++ b/tests/Speed Tests.ipynb
@@ -0,0 +1,997 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Speed Comparisons between the Numpy an TF implementations of the DTCWT\n",
+ "Operations were performed on a system with a GTX 1080 GPU and Intel Xeon CPU E5-2660 CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-10T22:55:10.246398Z",
+ "start_time": "2017-08-10T22:55:08.676078Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "import matplotlib.pyplot as plt\n",
+ "import datasets\n",
+ "import dtcwt\n",
+ "import dtcwt.tf\n",
+ "import tensorflow as tf\n",
+ "from time import time\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import py3nvml\n",
+ "plt.style.use('seaborn')\n",
+ "py3nvml.grab_gpus(1);"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Analysis of Small Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "## DTCWT on a single small image (64x64)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:30:51.360177Z",
+ "start_time": "2017-08-07T11:30:49.828104Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# Create the input\n",
+ "h, w = 64, 64\n",
+ "in_ = np.random.randn(1,h,w)\n",
+ "\n",
+ "# Set up the transforms\n",
+ "nlevels = 3\n",
+ "tf.reset_default_graph()\n",
+ "fwd = dtcwt.Transform2d() # Numpy Transform\n",
+ "fwd_tf = dtcwt.tf.Transform2d() # Tensorflow Transform\n",
+ "\n",
+ "in_placeholder = tf.placeholder(tf.float32, [None, h, w])\n",
+ "out_tf = fwd_tf.forward(in_placeholder, nlevels=nlevels)\n",
+ "out_fft = tf.fft2d(tf.cast(in_placeholder, tf.complex64))\n",
+ "\n",
+ "sess = tf.Session()\n",
+ "sess.run(tf.global_variables_initializer())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Numpy Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:30:56.101356Z",
+ "start_time": "2017-08-07T11:30:51.362870Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "5.63 ms ± 592 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "small_np = %timeit -o for i in in_: fwd.forward(i, nlevels=nlevels)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TF implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:30:56.989200Z",
+ "start_time": "2017-08-07T11:30:56.103634Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2.26 ms ± 444 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "small_tf = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### (Comparison) Using an FFT in Tensorflow\n",
+ "We can safely assume that something like the FFT is an optimized, fast operation to do. This is a good yardstick to gauge the overheads with working on a GPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:30:57.470088Z",
+ "start_time": "2017-08-07T11:30:56.992097Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.82 ms ± 68.2 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%timeit sess.run(out_fft, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## DTCWT on a batch of small images (100x64x64)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:30:57.510947Z",
+ "start_time": "2017-08-07T11:30:57.473103Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "in_ = np.random.randn(100,h,w)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Numpy Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:02.709545Z",
+ "start_time": "2017-08-07T11:30:57.513113Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "642 ms ± 68.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "small_np_batch = %timeit -o for i in in_: fwd.forward(i, nlevels=nlevels)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TF implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:04.972253Z",
+ "start_time": "2017-08-07T11:31:02.713718Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2.77 ms ± 126 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "small_tf_batch = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### (Comparison) Using an FFT in Tensorflow"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:10.279027Z",
+ "start_time": "2017-08-07T11:31:04.975413Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "6.53 ms ± 439 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%timeit sess.run(out_fft, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Small Image Conclusion"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:10.504659Z",
+ "start_time": "2017-08-07T11:31:10.282293Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAHiCAYAAAAeWT4MAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XlcVPX+x/H3yAgqooIhaFpuZeWuoOE1KRQ1laLMtAXT\nW6npzWvmbpmiYotdvdqi/OzR7pIbZlbuYrmbkmXZpl5NBQxBEJTN8/uj29xIxREdBr68no9Hj0ee\nmTnzmTlzeM2cmQGbZVmWAABAqVbO3QMAAICrR9ABADAAQQcAwAAEHQAAAxB0AAAMQNABADAAQYfL\nzJkzR+PHj3f3GCXKwYMHde+996ply5Z677333D0OXGzHjh3q0KGD499hYWHaunXrRc87YcIEvf76\n68U1Ggxkd/cAKL1atmzp+P+zZ8/K09NTHh4ekqRJkyZp0KBB7hrNISoqSvfcc4969erl7lEkSfPm\nzVPbtm21YsUKt1x/VFSUEhISZLfbZbPZVLduXXXt2lX9+vWTp6enJkyYoJUrV0qScnNzZVmWPD09\nJUmtW7fWvHnzlJOTo7lz52rlypVKTk6Wn5+f2rZtqyFDhighIUGvv/66PvvsM8d19u/fX4mJiRcs\nCwkJ0ZtvvulYdrHH0D333FMcd0uJEB0d7e4RUMoRdBTZ3r17Hf8fFhamKVOmqF27dm6cqOTKy8uT\n3W7X8ePH1b1796tax9WaMGGCevXqpaysLH3zzTeKiYnRli1b9M477yg6OtoRltmzZ+s///mPpk+f\nXuDyQ4cOVVJSkqZPn67bbrtNZ8+e1ccff6xt27apQ4cOOnjwoE6dOiU/Pz/l5eXpwIEDqlixYoFl\nCQkJevrpp3kMAdcQh9zhMrNnz9aIESMkSb/++qsaNWqkpUuXKjQ0VMHBwVqwYIH27duniIgIBQUF\nXfAKZcmSJbr77rsVHBysxx9/XMeOHZMkWZalmJgYhYSEqFWrVoqIiNCPP/54wfXPmDFDu3fvVnR0\ntFq2bOlY/549e9SzZ0+1bt1aPXv21J49ey55G8LCwjR37lx169ZNwcHBGjt2rLKzsx2nb9y4Uffe\ne6+CgoLUp08fHThwoMBlY2NjFRERoRYtWqhv377asWOHY55Dhw4pIyNDo0aN0u2336677rpLb7zx\nhs6fPy9JWrZsmfr06aOYmBi1bdtWs2fPLrAsKChIHTt21J49e7Rs2TKFhoYqJCREy5cvd2r7VKpU\nSW3bttWbb76phIQEbdq06bKX2bp1q7Zu3ao33nhDzZo1k91ul4+Pjx555BH16tVLAQEBqlOnjnbt\n2iVJ+u6779SwYUMFBwcXWHb+/Hk1bdrUqTn/7HL310MPPaSXXnpJwcHBCgsLU3x8/CXXFRsbqzvu\nuEMtW7ZUly5dtG3bNkm/P26HDh2qESNGqGXLloqIiNChQ4c0d+5chYSEKDQ0VF9++aVjPUuXLtXd\nd9+tli1bqmPHjlq4cOEV3y5JGjNmjGbMmCHpf4fq/+///k8hISFq37691q1bp/j4eHXp0kVt2rTR\nnDlzHJfdt2+fevfuraCgILVv317R0dHKyclxnP7ll1+qS5cuat26tSZOnKhHH31Uixcvdpx+tfsa\nSgaCjmL19ddfa82aNZoxY4ZiYmI0Z84cvfPOO1q1apU+++wz7dy5U5K0bt06zZ07V6+99pq2bdum\n1q1b69lnn5X0+w+n3bt3a/Xq1frqq680c+ZMVatW7YLreuaZZxQUFKQJEyZo7969mjBhgtLS0jRw\n4EBFRUVpx44d6t+/vwYOHKjU1NRLzrxy5Uq99dZbWrt2rQ4dOqQ33nhD0u9hGjdunKKjo7Vjxw71\n7t1bgwcPLvCDdNWqVYqNjdXu3bv13nvvFZinXr16mjx5sjIyMrRu3Tq9//77WrFihZYuXeq4/L59\n+1SnTh1t2bJFTz31lGNZo0aNtGPHDvXo0UPDhw/XN998o7Vr1+qVV15RdHS0MjMznd4mtWrVUpMm\nTbR79+7Lnnfr1q1q1qyZatasecnz/Dneu3btUlBQkFq3bl1gWfPmzVW+fHmnZ/yDM/dXvXr1tH37\ndj3xxBMaP368LvbbrQ8ePKgPP/xQS5Ys0d69e/XWW2/p+uuvd5z+xxO1Xbt26dZbb9Xjjz+u8+fP\na/PmzRoyZIgmTJjgOG/16tU1d+5c7dmzR9OmTdO0adO0f//+K75tf/Xbb78pOztbmzdv1tChQ/Xc\nc8/p448/1tKlS/Xhhx/qjTfe0NGjRyVJ5cqV09ixY7V9+3YtXLhQ27Zt0/z58yVJp06d0tChQ/Xs\ns89qx44dqlevXoEjI9diX0PJQNBRrIYMGSIvLy+1b99elSpVUo8ePVS9enUFBAQoKChI3333nSRp\n4cKFGjBggBo0aCC73a5Bgwbp+++/17Fjx2S325WZmamDBw/Ksiw1aNBANWrUcOr6N23apBtvvFGR\nkZGy2+3q0aOH6tevr40bN17yMo888ohq1qypatWq6amnntKqVaskSYsWLVLv3r3VvHlzeXh46L77\n7lP58uWVkJDguGxUVJRq1qypChUqXLDe/Px8ffrpp3r22WdVuXJl1a5dW/3799fHH3/sOE+NGjUU\nFRUlu93uWEft2rXVs2dPeXh4qFu3bjpx4oSGDBkiT09PtW/fXp6enjpy5IhT98efr+f06dOXPV9a\nWpr8/f0LPU9wcLDjycHu3bsdQf/zsjZt2lzRfJJz91etWrX04IMPOrbHyZMn9dtvv12wLg8PD+Xk\n5OiXX35Rbm6uateurRtuuMFxelBQkO644w7Z7XZ17dpVqampGjBggMqXL69u3brp2LFjSk9PlyTd\neeeduuGGG2Sz2dSmTRv97W9/c+rJ0eXY7XY99dRTjutMTU1V3759VblyZd10001q2LChfvjhB0lS\nkyZN1KJFC9ntdtWuXVu9e/d2PIHavHmzbrrpJnXu3Fl2u119+/bVdddd57geV+1rKH68h45iVb16\ndcf/e3l5XfDvrKwsSdLx48cVExOjl156yXG6ZVlKSkpSSEiIHnnkEUVHR+vYsWPq3LmzRo8ercqV\nK1/2+pOTk1WrVq0Cy2rVqqWkpKRLXubPr0Zr1aql5ORkx4xxcXH64IMPHKfn5uY6Tv/rZf8qNTVV\nubm5Beb56yyBgYEXXO7P99kfkf/zD2gvL68reoUuSUlJSQU+5Hgp1apV0+HDhws9T3BwsMaPH6/T\np0/r66+/1vTp0+Xt7a2TJ0/q9OnT2rNnjx577LErmk9y7v768/1QsWJFSXI8pv7sxhtv1Lhx4zR7\n9mz9/PPPat++vcaMGaOAgABJF97Hvr6+jg/r/XGfZ2VlqUqVKoqPj9frr7+uw4cP6/z58zp37pxu\nvvnmK759f1WtWrULrvOv+8sf2/nQoUN68cUX9e233+rs2bPKz89X48aNJf3+mP/z48hmsxX4t6v2\nNRQ/XqGjRKpZs6YmTZqk3bt3O/7bt2+fWrVqJUnq27evli1bpk8//VSHDx/WvHnznFpvjRo1dPz4\n8QLLTpw44fhBfjEnTpxw/P/x48cdr1Bq1qypQYMGFZjx66+/Vo8ePRznt9lsl1yvr6+vypcvX2Ce\nv85S2OWvlRMnTmj//v0KCgq67HnbtWunffv2KTEx8ZLnqVOnjmrUqKFFixapZs2a8vb2liS1aNFC\nixYtUmZmplq0aHHFczpzf12JiIgILViwQBs3bpTNZrvgw3/OyMnJ0dChQ/X3v/9dW7Zs0e7du9Wh\nQ4eLHuZ3pYkTJ6p+/fpavXq19uzZo2eeecYxg7+/f4EnPZZlFdh+rtrXUPwIOkqkPn36KDY2Vj/9\n9JOk3z8M9cfXnvbt26evv/5aubm5qlixojw9PVWu3MUfytddd53jfUZJCg0N1eHDh7Vy5Url5eXp\n008/1c8//6w777zzkrPMnz9fiYmJSktL05w5c9StWzdJUq9evbRw4UJ9/fXXsixLWVlZ2rRpk86c\nOePUbfTw8FDXrl01Y8YMnTlzRseOHdPbb79dbF/VOnv2rHbu3KnBgwerWbNmCg0Nvexl2rVrp3bt\n2mnIkCH69ttvlZeXpzNnzmjBggVasmSJ43xBQUF65513CjxJaN26td555x01adLkom9BXM61vL8O\nHjyobdu2KScnR56envLy8rrkY6gwOTk5ysnJkZ+fn+x2u+Lj47Vly5YrXs/VyszMlLe3t7y9vfXL\nL79owYIFjtNCQ0P1ww8/aN26dcrLy9OHH35Y4G2Ia7Wvwf3YMiiRwsPD9cQTT2j48OFq1aqVevTo\noc2bN0v6/YfXc889pzZt2uiuu+5StWrV9Pjjj190PX379tXq1asVHBysKVOmyNfXV3PmzNHbb7+t\ntm3bat68eZozZ478/PwuOUuPHj3097//XZ06ddINN9zg+HBa06ZNNXnyZEVHRys4OFidO3fWsmXL\nruh2Pv/886pYsaI6deqkhx9+WD169FDPnj2vaB1X6o9P2bdr104xMTHq3Lmz5s2b5/QP6lmzZik0\nNNTxocOIiAh9++23Bb5uFhwcrJSUFLVu3dqxLCgoSCkpKQoODi7y7Nfq/srJydGrr76qtm3bqn37\n9jp16pSGDx9+xeupXLmynnvuOQ0bNkzBwcH65JNPFBYWdsXruVqjR4/WJ598olatWun55593POmU\nJD8/P/373//WK6+8orZt2+rnn39WkyZNHB9KvFb7GtzPZhX3sSGgFOG70TDN+fPn1aFDB02fPl23\n3367u8fBNcQrdAAw3BdffKH09HTl5OQ4vr9elM8xoGTjU+4AYLiEhASNGDFCOTk5atiwoV5//fUi\nfY4BJRuH3AEAMACH3AEAMABBBwDAAKX6PfSTJzPcPYLb+PpWUmrqhb8BCyUP26r0YFuVDmV5O/n7\n+1zyNF6hl1J2u4e7R4CT2FalB9uqdGA7XRxBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAAD\nEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDA\nAAQdAAADEHQAAAxgd/cAAMxW6eUYd4/gPG8vVcrMdvcUl5U1apy7R0AJxCt0AAAMQNABADAAQQcA\nwAAEHQAAAxB0AAAMQNABADAAQQcAwAAEHQAAAxB0AAAMQNABADAAQQcAwAAEHQAAAxB0AAAMQNAB\nADAAQQcAwAAEHQAAAxB0AAAMQNABADAAQQcAwAAEHQAAAxB0AAAMQNABADCAS4Oenp6uoUOHqmvX\nrrr77ru1d+9epaWlqX///urcubP69++v06dPS5Isy9KUKVMUHh6uiIgI7d+/35WjAQBgFJcGferU\nqbrjjjv0+eefa8WKFWrQoIFiY2MVEhKiNWvWKCQkRLGxsZKkzZs36/Dhw1qzZo0mT56siRMnunI0\nAACM4rKgZ2RkaNeuXXrggQckSZ6enqpSpYrWr1+vyMhISVJkZKTWrVsnSY7lNptNLVq0UHp6upKT\nk101HgAARnFZ0H/99Vf5+flp7NixioyM1Pjx45WVlaWUlBTVqFFDkuTv76+UlBRJUlJSkgIDAx2X\nDwwMVFJSkqvGAwDAKHZXrTgvL0/fffednn/+eTVv3lxTpkxxHF7/g81mk81mK/J1+PpWkt3ucbWj\nllr+/j7uHgFOKtPbytvL3RNcEe9SMK93WX48/VeZ3qcuwWVBDwwMVGBgoJo3by5J6tq1q2JjY1W9\nenUlJyerRo0aSk5Olp+fnyQpICBAiYmJjssnJiYqICCg0OtITc1y1fglnr+/j06ezHD3GHBCWd9W\nlTKz3T2C07y9vZRZCubNKsOPJ6ls71OFPZFx2SF3f39/BQYG6uDBg5Kkbdu2qUGDBgoLC1NcXJwk\nKS4uTh07dpQkx3LLspSQkCAfHx/HoXkAAFA4l71Cl6Tnn39eI0aMUG5ururUqaNp06bp/PnzGjZs\nmJYsWaJatWpp5syZkqTQ0FDFx8crPDxcFStWVExMjCtHAwDAKDbLsix3D1FUZfWQi1S2DzmVNmV9\nW1V6ufQ8OS81h9xHjXP3CG5VlvcptxxyBwAAxYegAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIO\nAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICg\nAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg\n6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIAB\nCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBg\nAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAawu3LlYWFh8vb2Vrly5eTh4aFly5YpLS1N\nzzzzjI4dO6brr79eM2fOVNWqVWVZlqZOnar4+HhVqFBBL774oho3buzK8QAAMIbLX6G/++67WrFi\nhZYtWyZJio2NVUhIiNasWaOQkBDFxsZKkjZv3qzDhw9rzZo1mjx5siZOnOjq0QAAMEaxH3Jfv369\nIiMjJUmRkZFat25dgeU2m00tWrRQenq6kpOTi3s8AABKJZcecpekxx9/XDabTb1791bv3r2VkpKi\nGjVqSJL8/f2VkpIiSUpKSlJgYKDjcoGBgUpKSnKc92J8fSvJbvdw7Q0owfz9fdw9ApxUpreVt5e7\nJ7gi3qVgXu+y/Hj6rzK9T12CS4O+YMECBQQEKCUlRf3791f9+vULnG6z2WSz2Yq8/tTUrKsdsdTy\n9/fRyZMZ7h4DTijr26pSZra7R3Cat7eXMkvBvFll+PEkle19qrAnMi495B4QECBJql69usLDw7Vv\n3z5Vr17dcSg9OTlZfn5+jvMmJiY6LpuYmOi4PAAAKJzLgp6VlaUzZ844/n/Lli266aabFBYWpri4\nOElSXFycOnbsKEmO5ZZlKSEhQT4+PoUebgcAAP/jskPuKSkpGjJkiCQpPz9fPXr0UIcOHdS0aVMN\nGzZMS5YsUa1atTRz5kxJUmhoqOLj4xUeHq6KFSsqJibGVaMBAGAcm2VZlruHKKqy+h6KVLbfQypt\nyvq2qvRy6XlyXmreQx81zt0juFVZ3qfc9h46AAAoHgQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAAD\nEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDA\nAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEA\nMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQA\nAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQd\nAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAzg8qDn5+crMjJSAwcOlCQdPXpUvXr1Unh4uIYN\nG6acnBxJUk5OjoYNG6bw8HD16tVLv/76q6tHAwDAGC4P+nvvvacGDRo4/j19+nT169dPa9euVZUq\nVbRkyRJJ0uLFi1WlShWtXbtW/fr10/Tp0109GgAAxnBp0BMTE7Vp0yY98MADkiTLsrR9+3Z16dJF\nknTfffdp/fr1kqQNGzbovvvukyR16dJF27Ztk2VZrhwPAABjuDToMTExGjlypMqV+/1qUlNTVaVK\nFdntdklSYGCgkpKSJElJSUmqWbOmJMlut8vHx0epqamuHA8AAGPYXbXijRs3ys/PT02aNNGOHTtc\nch2+vpVkt3u4ZN2lgb+/j7tHgJPK9Lby9nL3BFfEuxTM612WH0//Vab3qUtwWdD37NmjDRs2aPPm\nzcrOztaZM2c0depUpaenKy8vT3a7XYmJiQoICJAkBQQE6MSJEwoMDFReXp4yMjLk6+tb6HWkpma5\navwSz9/fRydPZrh7DDihrG+rSpnZ7h7Bad7eXsosBfNmleHHk1S296nCnsi47JD7s88+q82bN2vD\nhg3617/+pdtvv12vvvqq2rZtq9WrV0uSli9frrCwMElSWFiYli9fLklavXq1br/9dtlsNleNBwCA\nUYr9e+gjR47U22+/rfDwcKWlpalXr16SpAceeEBpaWkKDw/X22+/rREjRhT3aAAAlFo2qxR/lLys\nHnKRyvYhp9KmrG+rSi/HuHsEp5WaQ+6jxrl7BLcqy/uUWw65AwCA4kPQAQAwAEEHAMAABB0AAAMQ\ndAAADEDQAQAwAEEHAMAABB0AAAMQdAAADOD0H2c5d+6cTp48KS8vL9WoUcOVMwEAgCtUaNDPnz+v\nuLg4LV68WAcOHFDlypWVk5Mju92uTp06qV+/fqpXr15xzQoAAC6h0KD36dNHLVu21NixY9W4cWN5\nePz+t8dTUlL0xRdfaMKECerTp4+6d+9eLMMCAICLKzToc+bMkZ+f3wXLq1evrsjISEVGRurUqVMu\nGw4AADin0A/FXSzmKSkpSkhIKPQ8AACgeDn1KfeHH35YGRkZSk9PV2RkpMaPH6+XXnrJ1bMBAAAn\nORX0rKws+fj4aOPGjYqIiNDKlSv15Zdfuno2AADgJKeCnpOTI0nasWOH/va3v6lcuXKOD8gBAAD3\ncyrobdq0Ubdu3fTVV1+pTZs2Sk9PV7ly/E4aAABKCqd+scwLL7ygAwcOqE6dOipfvrwyMjI0ZcoU\nV88GAACc5FTQbTab6tatq8TERCUmJkqSPD09XToYAABwnlNBf++99zRjxgxVrVrVcajdZrNp/fr1\nLh0OAAA4x6mgv/vuu/r8888VEBDg6nkAAEAROPXJtsDAQGIOAEAJ5tQr9Kefflrjx49XaGiovLy8\nHMtDQ0NdNhgAAHCeU0HfuHGjNm7cqMOHDxd4D52gAwBQMjgV9LVr12rDhg2qUKGCq+cBAABF4NR7\n6HXq1JHd7lT7AQCAGzhV6RtvvFGPPfaYOnXqVOD754888ojLBgMAAM5zKui5ubm64YYb9OOPP7p6\nHgAAUAROBX3atGmungMAAFyFQt9D//bbbwu9cE5Ojn755ZdrOhAAALhyhb5Cj42NVVZWlnr06KHm\nzZvruuuuU3Z2tg4dOqQvvvhC8fHxGjNmjBo0aFBc8wIAgIsoNOizZs3Svn37tGjRIr3++utKTExU\nxYoVdfPNN6tTp0768MMPVbly5eKaFQAAXMJl30Nv1qyZmjVrVhyzAACAInLqe+gAAKBkI+gAABiA\noAMAYACCDgCAAZwKekpKikaMGOH4Va8HDhzQggULXDoYAABwnlNBf+6559S6dWulp6dLkurXr6/5\n8+e7dDAAAOA8p4KelJSkhx56SB4eHpIkT09Px99FBwAA7udUlf/6p1PT09NlWZZLBgIAAFfOqT/O\nEh4ergkTJigzM1PLli3T/Pnz1bNnT1fPBgAAnORU0J988kl9/PHHSk9PV3x8vKKionTvvfe6ejYA\nAOAkp4IuSffcc4/uueceV84CAACKyKmgp6Sk6IMPPtCRI0eUl5fnWP7vf//bZYMBAADnORX0wYMH\n67bbblNISIjjk+4AAKDkcCroZ8+e1QsvvODqWQAAQBE59bW15s2b64cffnD1LAAAoIiceoXep08f\nPfroowoMDJSXl5dj+ZIlS1w2GAAAcJ5TQR85cqQGDRqk2267jffQAQAogZwKupeXlx5//HFXzwIA\nAIrIqffQ77jjDm3evNnVswAAgCJy6hX6Rx99pNjYWHl7e8vT01OWZclms2nbtm2ung8AADjBqaAv\nXbrU1XMAAICr4FTQr7/+elfPAQAArkKhQR85cqReeeUV9ezZUzab7YLTC/vaWnZ2th555BHl5OQo\nPz9fXbp00dChQ3X06FENHz5caWlpaty4sV5++WV5enoqJydHo0aN0v79+1WtWjXNmDFDtWvXvvpb\nCABAGVBo0B977DFJ0ujRo694xZ6ennr33Xfl7e2t3NxcPfzww+rQoYPefvtt9evXT927d9eECRO0\nZMkSPfzww1q8eLGqVKmitWvXatWqVZo+fbpmzpxZtFsFAEAZU+in3OfPny9JatOmzUX/K4zNZpO3\nt7ckKS8vT3l5ebLZbNq+fbu6dOkiSbrvvvu0fv16SdKGDRt03333SZK6dOmibdu2ybKsq7t1AACU\nEYW+Qv/++++vauX5+fm6//77deTIET388MOqU6eOqlSpIrv996sNDAxUUlKSJCkpKUk1a9b8fSi7\nXT4+PkpNTZWfn98l1+/rW0l2e9n9RTf+/j7uHgFOKtPbytvr8ucpQbxLwbzeZfnx9F9lep+6BKf/\nHnpReHh4aMWKFUpPT9eQIUN08ODBa7r+1NSsa7q+0sTf30cnT2a4eww4oaxvq0qZ2e4ewWne3l7K\nLAXzZpXhx5NUtvepwp7IFBr0H3/8USEhIRcsv9LvoVepUkVt27ZVQkKC0tPTlZeXJ7vdrsTERAUE\nBEiSAgICdOLECQUGBiovL08ZGRny9fV1av0AAJR1hQa9bt26io2NLdKKT506JbvdripVqujcuXPa\nunWrnnzySbVt21arV69W9+7dtXz5coWFhUmSwsLCtHz5crVs2VKrV6/W7bffftFP1gMAgAsVGnRP\nT88ifwc9OTlZY8aMUX5+vizLUteuXXXXXXepYcOGeuaZZzRz5kzdeuut6tWrlyTpgQce0MiRIxUe\nHq6qVatqxowZRbpeAADKokKDXr58+SKv+JZbblFcXNwFy+vUqXPR7697eXlp1qxZRb4+AADKskK/\ntvbRRx8V1xwAAOAqOPXX1gAAQMlG0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAAD\nEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDA\nAAQdAACVHaXPAAAR0ElEQVQDEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQd\nAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABB\nBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMABBBwDAAAQdAAADEHQAAAxA\n0AEAMABBBwDAAAQdAAADEHQAAAxA0AEAMIDLgn7ixAlFRUWpW7du6t69u959911JUlpamvr376/O\nnTurf//+On36tCTJsixNmTJF4eHhioiI0P79+101GgAAxnFZ0D08PDRmzBh9+umnWrRokebPn6+f\nf/5ZsbGxCgkJ0Zo1axQSEqLY2FhJ0ubNm3X48GGtWbNGkydP1sSJE101GgAAxnFZ0GvUqKHGjRtL\nkipXrqz69esrKSlJ69evV2RkpCQpMjJS69atkyTHcpvNphYtWig9PV3JycmuGg8AAKMUy3vov/76\nq77//ns1b95cKSkpqlGjhiTJ399fKSkpkqSkpCQFBgY6LhMYGKikpKTiGA8AgFLP7uoryMzM1NCh\nQzVu3DhVrly5wGk2m002m63I6/b1rSS73eNqRyy1/P193D0CnFSmt5W3l7snuCLepWBe77L8ePqv\nMr1PXYJLg56bm6uhQ4cqIiJCnTt3liRVr15dycnJqlGjhpKTk+Xn5ydJCggIUGJiouOyiYmJCggI\nKHT9qalZrhu+hPP399HJkxnuHgNOKOvbqlJmtrtHcJq3t5cyS8G8WWX48SSV7X2qsCcyLjvkblmW\nxo8fr/r166t///6O5WFhYYqLi5MkxcXFqWPHjgWWW5alhIQE+fj4OA7NAwCAwrnsFfpXX32lFStW\n6Oabb9a9994rSRo+fLgGDBigYcOGacmSJapVq5ZmzpwpSQoNDVV8fLzCw8NVsWJFxcTEuGo0AACM\n47KgBwUF6YcffrjoaX98J/3PbDabXnjhBVeNAwCA0fhNcQAAGICgAwBgAIIOAIABCDoAAAYg6AAA\nGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoA\nAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIO\nAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICg\nAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg\n6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAZwWdDHjh2rkJAQ9ejRw7Es\nLS1N/fv3V+fOndW/f3+dPn1akmRZlqZMmaLw8HBFRERo//79rhoLAAAjuSzo999/v+bNm1dgWWxs\nrEJCQrRmzRqFhIQoNjZWkrR582YdPnxYa9as0eTJkzVx4kRXjQUAgJFcFvTg4GBVrVq1wLL169cr\nMjJSkhQZGal169YVWG6z2dSiRQulp6crOTnZVaMBAGAce3FeWUpKimrUqCFJ8vf3V0pKiiQpKSlJ\ngYGBjvMFBgYqKSnJcd5L8fWtJLvdw3UDl3D+/j7uHgFOKtPbytvL3RNcEe9SMK93WX48/VeZ3qcu\noViD/mc2m002m+2q1pGamnWNpil9/P19dPJkhrvHgBPK+raqlJnt7hGc5u3tpcxSMG9WGX48SWV7\nnyrsiUyxfsq9evXqjkPpycnJ8vPzkyQFBAQoMTHRcb7ExEQFBAQU52gAAJRqxRr0sLAwxcXFSZLi\n4uLUsWPHAssty1JCQoJ8fHwue7gdAAD8j8sOuQ8fPlw7d+5UamqqOnTooKeffloDBgzQsGHDtGTJ\nEtWqVUszZ86UJIWGhio+Pl7h4eGqWLGiYmJiXDUWAABGslmWZbl7iKIqq++hSGX7PaTSpqxvq0ov\nl54n6KXmPfRR49w9gluV5X2qxLyHDgAAXIOgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIAB\nCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBg\nAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAA\nGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoA\nAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIOAIABCDoAAAYg6AAAGICgAwBgAIIO\nAIABCDoAAAYg6AAAGICgAwBgAIIOAIAB7O4e4M82b96sqVOn6vz58+rVq5cGDBhQrNdf6eWYYr2+\nq+LtpUqZ2e6ewilZo8a5ewQAMF6JeYWen5+v6OhozZs3T6tWrdInn3yin3/+2d1jAQBQKpSYV+j7\n9u3TjTfeqDp16kiSunfvrvXr16thw4Zungwl0csve7p7BKd5e0uZmaVj3lGjctw9AtyotOxX7FMX\nV2JeoSclJSkwMNDx74CAACUlJblxIgAASo8S8wq9KPz9fa7tCl+Zdm3X52Le7h7ASa6Y85VXXLBS\nl/Jy9wBOcsGc7FfXnKtmLF37VRnepy6hxLxCDwgIUGJiouPfSUlJCggIcONEAACUHiUm6E2bNtXh\nw4d19OhR5eTkaNWqVQoLC3P3WAAAlAol5pC73W7XhAkT9MQTTyg/P189e/bUTTfd5O6xAAAoFWyW\nZVnuHgIAAFydEnPIHQAAFB1BBwDAAAS9FOrTp0+RL7ts2TJFR0dfw2nMk56erg8//LDAspdeeknd\nu3fXSy+9dNnLJyUlaejQoUW+/tmzZ+utt94q8uVxeUXZD+bMmXPZ84wZM0aff/55Uccy2pXsV1e6\nD1xs3RcTFRWlb775xun1ljYEvRRauHChu0cwWnp6uhYsWFBg2UcffaSPP/5Yo0ePvuzlAwICNGvW\nLFeNBzeZO3euu0co1a52v7rSdZdFJeZT7ib69ddf9eSTT6p169bau3evAgIC9MYbb6hChQqKiopS\no0aNtGvXLuXn5ysmJkbNmjUrcPmffvpJY8eOVW5urs6fP6/Zs2erbt26atmypfbu3asdO3botdde\nk6+vr3788Uc1btxY06dPl81mU3x8vKZNm6ZKlSqpVatWOnr06AU/kE6dOqUXXnhBx48flySNGzdO\nrVu3Lrb7p6R69dVXdeTIEd17771q166dDh06pKysLN1///0aOHCgunXr5jjvzp07NXXqVEmSzWbT\nBx98oLS0NA0aNEiffPKJli1bpg0bNujs2bM6evSoOnXqpFGjRkmSFi9erHnz5snHx0e33HKLPD09\nNWHChAKzHDlyRJMmTVJqaqoqVKigyZMnq0GDBsV3Z5RQV7tvSdKJEycUFRWlpKQk3XPPPfrHP/4h\nSRo8eLASExOVnZ2tvn37qnfv3po+fbrOnTune++9Vw0bNtSrr76quLg4vfXWW7LZbGrUqJFe+e9v\nZdm9e7feeecdnTx5UiNHjlTXrl2L9b4pqa5kv5KkAwcOqHfv3kpNTdUTTzyhBx98UJmZmRo8eLDS\n09OVl5enf/7zn+rUqdMF6x49erRiY2O1cuVK2Ww2dejQQSNGjJAkff7555o0aZIyMjI0depUBQUF\nuePucA0LLnP06FHr1ltvtb777jvLsixr6NChVlxcnGVZlvXoo49a48ePtyzLsnbu3Gl17979gstH\nR0dbK1assCzLsrKzs62zZ89almVZLVq0sCzLsrZv3261atXKOnHihJWfn289+OCD1q5du6xz585Z\nHTp0sI4cOWJZlmU988wz1oABAyzLsqylS5dakyZNsizLsoYPH27t2rXLsizLOnbsmNW1a1eX3A+l\nzdGjRy/YHn/c5381cOBAa/fu3ZZlWdaZM2es3NzcApdfunSpFRYWZqWnp1vnzp2z7rzzTuv48eNW\nYmKiddddd1mpqalWTk6O9dBDDzm2y6xZs6x58+ZZlmVZffv2tQ4dOmRZlmUlJCRYUVFRrrjJpc7V\n7ltLly61/va3v1mnTp2yzp49a3Xv3t3at2+fZVmWlZqaalmW5Vh+6tQpy7IKPgZ+/PFHq3PnzlZK\nSkqBy4wePdp6+umnrfz8fOunn36yOnXq5IqbXypdyX41a9YsKyIiwjp79qyVkpJidejQwUpMTLRy\nc3OtjIwMy7IsKyUlxerUqZN1/vz5C9a9adMmq3fv3lZWVpZlWf/bPo8++qg1bdo0x3kee+yxa30z\n3YpX6C5Wu3Zt3XrrrZKkxo0b69ixY47TunfvLkkKDg7WmTNnlJ6eripVqjhOb9GihebMmaPExER1\n7txZdevWvWD9zZo1c/wO/FtuuUXHjh2Tt7e36tSpU+AP3Xz00UcXXHbr1q0F/qLdmTNnlJmZKW/v\n0vDLL0uGVq1a6cUXX1RERIQ6d+580fsuJCREPj6//5riBg0a6NixY0pLS1NwcLCqVasmSeratasO\nHz5c4HKZmZnau3ev/vnPfzqW5eTwx1P+cDX7liS1a9dOvr6+kqTw8HB99dVXatq0qd5//32tXbtW\n0u+v4v/zn/84zveH7du3q2vXrvLz85Mkx3aUpE6dOqlcuXJq2LChfvvtt2t8q8uOjh07qkKFCqpQ\noYLatm2rb775RqGhofrXv/6lXbt2qVy5ckpKSrrofbxt2zbdf//9qlixoqSC2yc8PFzShY8ZExB0\nF/P0/N9fBPLw8FB29v/+hrnNZitw3r/+OyIiQs2bN9emTZs0YMAATZo0SSEhIYWuPz8/3+nZzp8/\nr48++kheXqXldyKXPAMGDFBoaKji4+P10EMPad68eRfcn0XdRpZlqUqVKlqxYsU1ndkUV7NvXeo8\nO3bs0NatW7Vo0SJVrFhRUVFRBdZ7pXOh6C62zVauXKlTp05p2bJlKl++vMLCwoq8fcqVK3dFPy9L\nAz4U50affvqppN/fc/Px8XG8ivvD0aNHVadOHfXt21cdO3bUDz/84NR669Wrp6NHj+rXX38tcD1/\n1b59e73//vuOf3///fdFuRnG8fb2VmZmplPnPXLkiBo1aqQBAwaoadOmOnTokFOXa9q0qXbt2qXT\np08rLy9Pa9asueA8lStXVu3atfXZZ59J+j3wBw4ccP6GlGGX27ckacuWLUpLS9O5c+e0bt06tWrV\nShkZGapataoqVqyoX375RQkJCY7z2+125ebmSpJuv/12ff7550pNTZUkpaWlFcOtKt2uZL+SpPXr\n1ys7O1upqanauXOnmjZtqoyMDFWvXl3ly5fX9u3bHa+w/7rudu3aadmyZTp79qyksrN9eIXuRl5e\nXoqMjFReXp5iYmIuOP2zzz7TihUrZLfbdd1112ngwIFOrbdChQp64YUX9MQTT6hSpUpq0qTJRc83\nfvx4RUdHKyIiQvn5+QoKCuIrbZJ8fX3VqlUr9ejRQ3fccUehn8B99913tWPHDtlsNt10003q0KGD\nkpOTL3sdAQEBGjhwoHr16qWqVauqfv36F43OK6+8ookTJ+rNN99UXl6eunXrpltuueWqbl9ZcLl9\nS/r97aqnn37a8aG4pk2bqlGjRlq4cKHuvvtu1atXTy1atHCc/8EHH9Q999yj2267Ta+++qoGDRqk\nqKgolStXTrfddptefPHF4rp5pdKV7FeS1KhRI/Xt21epqakaPHiwAgICFBERoaeeekoRERFq0qSJ\n6tevf8l1HzhwQD179lT58uUVGhqq4cOHF8fNdCt+9aubREVFadSoUWratKlL1v/He+GWZWnSpEmq\nW7eu+vXr55LrQtH8sY3y8vL0j3/8Qz179nS8v4eic/W+BZRUvEI31OLFi7V8+XLl5ubq1ltvVe/e\nvd09Ev7itdde09atW5Wdna327durU6dO7h4JQCnGK3QAAAzAh+IAADAAQQcAwAAEHQAAAxB0AAAM\nQNABADAAQQcAwAD/DzOv+htPD7E0AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "objects = ('np single', 'tf single', 'np batch', 'tf batch')\n",
+ "y_pos = np.arange(len(objects))\n",
+ "performance = [small_np.average, small_tf.average, small_np_batch.average, small_tf_batch.average]\n",
+ "performance = [i*1000 for i in performance]\n",
+ "fig, ax = plt.subplots(1, figsize=(8,8))\n",
+ "ax.bar(y_pos, performance, align='center', alpha=0.5, color=['red', 'blue'])\n",
+ "plt.xticks(y_pos, objects)\n",
+ "plt.ylabel('Time (ms)')\n",
+ "plt.title('Times to perform DTCWT on small images')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Analysis of Large Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## DTCWT on a single large image (512x512)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:11.373219Z",
+ "start_time": "2017-08-07T11:31:10.506792Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# Create the input\n",
+ "h, w = 512, 512\n",
+ "in_ = np.random.randn(1,h,w)\n",
+ "\n",
+ "# Set up the transforms\n",
+ "nlevels = 3\n",
+ "tf.reset_default_graph()\n",
+ "fwd = dtcwt.Transform2d() # Numpy Transform\n",
+ "fwd_tf = dtcwt.tf.Transform2d() # Tensorflow Transform\n",
+ "\n",
+ "in_placeholder = tf.placeholder(tf.float32, [None, h, w])\n",
+ "out_tf = fwd_tf.forward(in_placeholder, nlevels=nlevels)\n",
+ "out_fft = tf.fft2d(tf.cast(in_placeholder, tf.complex64))\n",
+ "\n",
+ "sess = tf.Session()\n",
+ "sess.run(tf.global_variables_initializer())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Numpy Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:16.031492Z",
+ "start_time": "2017-08-07T11:31:11.375442Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "57.3 ms ± 5.27 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_np = %timeit -o for i in in_: fwd.forward(i, nlevels=nlevels)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TF implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:18.058548Z",
+ "start_time": "2017-08-07T11:31:16.033601Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2.39 ms ± 109 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_tf = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### (Comparison) Using an FFT in Tensorflow"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:23.107977Z",
+ "start_time": "2017-08-07T11:31:18.061652Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "6.19 ms ± 325 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%timeit sess.run(out_fft, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## DTCWT on a batch of large images (100x512x512)\n",
+ "Batches are something tensorflow naturally handles. This should widen the gap as we only have to copy data to the GPU once for multiple images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:31:24.473872Z",
+ "start_time": "2017-08-07T11:31:23.112698Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "in_ = np.random.randn(100,512,512)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Numpy Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:08.472588Z",
+ "start_time": "2017-08-07T11:31:24.477445Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "5.44 s ± 189 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_np_batch = %timeit -o for i in in_: fwd.forward(i, nlevels=nlevels).lowpass"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TF implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:09.632071Z",
+ "start_time": "2017-08-07T11:32:08.474983Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "124 ms ± 4.32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_tf_batch = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### (Comparison) Using an FFT in Tensorflow"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:10.702518Z",
+ "start_time": "2017-08-07T11:32:09.634609Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "113 ms ± 2.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%timeit sess.run(out_fft, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## DTCWT on a batch of large images with a convolution afterwards\n",
+ "This again should widen the gap, as having already calculated something on the GPU, we don't need to transfer the data there again"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:12.112465Z",
+ "start_time": "2017-08-07T11:32:10.706380Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "h, w = 512, 512\n",
+ "in_ = np.random.randn(100, h, w)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Numpy Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:12.156396Z",
+ "start_time": "2017-08-07T11:32:12.114544Z"
+ },
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "fwd = dtcwt.Transform2d()\n",
+ "tf.reset_default_graph()\n",
+ "sess = tf.Session()\n",
+ "highs = tf.placeholder(tf.float32, [None, h>>3, w>>3, 6])\n",
+ "weights = tf.get_variable('weights', shape=(5,5,6,64))\n",
+ "step = tf.nn.conv2d(highs, weights, strides=[1,1,1,1], padding='SAME')\n",
+ "sess.run(tf.global_variables_initializer())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:57.965489Z",
+ "start_time": "2017-08-07T11:32:12.159253Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "5.7 s ± 139 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_np_conv = %timeit -o sess.run(step, {highs: [abs(fwd.forward(i, nlevels=3).highpasses[2]) for i in in_]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TF Implementation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:32:58.860531Z",
+ "start_time": "2017-08-07T11:32:57.968404Z"
+ },
+ "collapsed": true,
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "tf.reset_default_graph()\n",
+ "in_placeholder = tf.placeholder(tf.float32, [None, h, w])\n",
+ "fwd_tf = dtcwt.tf.Transform2d() \n",
+ "out_tf = fwd_tf.forward(in_placeholder, nlevels=3)\n",
+ "p = tf.abs(out_tf.highpasses_ops[2])\n",
+ "weights = tf.get_variable('weights', shape=(5,5,6,64))\n",
+ "out = tf.nn.conv2d(p, weights, strides=[1,1,1,1], padding='SAME')\n",
+ "sess = tf.Session()\n",
+ "sess.run(tf.global_variables_initializer())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T11:33:10.477651Z",
+ "start_time": "2017-08-07T11:32:58.863436Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "143 ms ± 1.61 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "large_tf_conv = %timeit -o sess.run(out, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Running DTCWT with TF backend on a CPU\n",
+ "Perhaps there is still a speed-up when using a CPU and tensorflow?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T12:03:40.293069Z",
+ "start_time": "2017-08-07T12:03:28.964177Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "12.8 ms ± 389 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Create the input\n",
+ "h, w = 512, 512\n",
+ "in_ = np.random.randn(1,h,w)\n",
+ "\n",
+ "# Set up the transforms\n",
+ "nlevels = 3\n",
+ "tf.reset_default_graph()\n",
+ "with tf.device(\"/cpu:0\"):\n",
+ " fwd_tf = dtcwt.tf.Transform2d() # Tensorflow Transform\n",
+ " in_placeholder = tf.placeholder(tf.float32, [None, h, w])\n",
+ " out_tf = fwd_tf.forward(in_placeholder, nlevels=nlevels)\n",
+ "\n",
+ "sess = tf.Session()\n",
+ "sess.run(tf.global_variables_initializer())\n",
+ "large_tf_cpu = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T12:03:47.646900Z",
+ "start_time": "2017-08-07T12:03:42.065636Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "516 ms ± 17.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "in_ = np.random.randn(100,h,w)\n",
+ "large_tf_batch_cpu = %timeit -o sess.run(out_tf.lowpass_op, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T12:04:18.946481Z",
+ "start_time": "2017-08-07T12:04:13.202643Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "610 ms ± 27.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "tf.reset_default_graph()\n",
+ "with tf.device(\"/cpu:0\"):\n",
+ " in_placeholder = tf.placeholder(tf.float32, [None, h, w])\n",
+ " fwd_tf = dtcwt.tf.Transform2d() \n",
+ " out_tf = fwd_tf.forward(in_placeholder, nlevels=3)\n",
+ " p = tf.abs(out_tf.highpasses_ops[2])\n",
+ " weights = tf.get_variable('weights', shape=(5,5,6,64))\n",
+ " out = tf.nn.conv2d(p, weights, strides=[1,1,1,1], padding='SAME')\n",
+ "sess = tf.Session()\n",
+ "sess.run(tf.global_variables_initializer())\n",
+ "large_tf_conv_cpu = %timeit -o sess.run(out, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Large Image Conclusion"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-07T12:05:41.444267Z",
+ "start_time": "2017-08-07T12:05:40.930019Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtoAAALSCAYAAADjvXZeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs/X2cV3WB//8/B0ZQkUsXBi9YNy/Xi5RREFCSjyiYIIoB\nq3lVZLmSlWSRtpaaGlurqantGurH7LNpXiRYN+3jdSKVVymr9dH8mrkpODOGCAgKDJ7fH67vnwjS\nqLwG0Pv9L+e83+d9Xu/XnDM+5nDmvOuqqqoCAACsVR3W9QAAAOCDSGgDAEABQhsAAAoQ2gAAUIDQ\nBgCAAoQ2AAAUILThfbjsssty+umnr+thrFeeeeaZHHbYYWlsbMyPf/zjdT2cXHLJJfnqV7+aJJk7\nd24aGxuzYsWKJMlf//rXHH300WlsbMx3vvOdVFWVr3/96xk4cGDGjx+/LofNOnDGGWfkBz/4wboe\nxnty00035ZOf/OS6HkaSDXseYW2rX9cDgPVZY2Nj7b9fffXVdOrUKR07dkySfOtb38qJJ564roZW\nc+yxx+bQQw/NhAkT1vVQkiRXXHFFBg0alJtvvnldD2UVW265ZR599NHa19ddd1169uyZRx55JHV1\ndXn44Yfz61//Ovfee2823XTTdh/f8OHDc+6552afffZZ7eMPPPBAPvWpT2WTTTZJknTt2jWNjY05\n/vjjs/vuu2fu3LkZPXp07flLlizJJptskrq6uiTJ5ZdfngEDBuSxxx7LJZdckkcffTQdOnTI3//9\n3+eTn/xkxo0bl4MOOignn3xyRo0alST53e9+l6OOOioXXnjhSss++9nP5vjjj8+VV16ZJGltbU1r\na2s23njjJG/M9S233FJmotaCm266KTfccEOuvfba2rKzzz57HY7og8M8wv+f0IY1eGuU/a0I+rBr\nbW1NfX39KrH3Xl6jvcydOzfbbbddLUTnzJmTrbba6j1FdnuNvU+fPpk5c2aqqkpzc3Ouu+66HH30\n0Zk2bVqGDBmy0j6700475eabb84222xTW/boo4/mM5/5TCZNmpTvfve76dmzZ/7whz/k8ssvz7hx\n4zJw4MA89NBDtah++OGHs+22266yrLGxMV/4whfyhS98Icnqw3Vdae/9aENnvqAcl47A+/DWyxKe\nf/757LTTTvnZz36WYcOGZeDAgbn22mvz2GOPZcyYMRkwYMAqZ3puvPHGHHzwwRk4cGCOP/74zJkz\nJ0lSVVWmTp2aIUOGZM8998yYMWPy1FNPrbL9Cy+8MA8//HDOPvvsNDY21l7/kUceybhx47LXXntl\n3LhxeeSRR97xPQwfPjw//OEPM2rUqAwcODBf//rXs3Tp0trj99xzTw477LAMGDAgRx55ZJ588smV\n1p02bVrGjBmT/v3757jjjssDDzxQG8+f//znLFq0KF/72tcyePDg7L///vn3f//3vP7660neiLMj\njzwyU6dOzaBBg3LJJZestGzAgAE54IAD8sgjj+Smm27KsGHDMmTIkEyfPv0d389zzz2XY445Jo2N\njZk4cWLmz59fe+zN71Fra2tOO+20zJgxI1deeWUaGxvz05/+NN/4xjcye/bsNDY25uKLL37X77+1\ntTXNzc354he/mMGDB2f48OErXT5zySWX5OSTT87Xvva1NDY2ZvTo0Xn88ceTJFOmTMncuXNz4okn\nprGxMZdffvk7vsckqaurS9++fXPyySdnwoQJOe+889b4/Df927/9W8aOHZsTTjghvXr1Sl1dXXbb\nbbd8//vfT5IMGDAgDz/8cO35Dz/8cD73uc+tsmzAgAFt2t7b3XXXXRk9enQGDBiQY489Nn/6059q\nj63NfbG1tTXTpk3LgQcemMbGxowaNSp33HFHkuRPf/pTzjzzzNr3+s33ctppp+XCCy+sveb111+f\nESNGZO+9986JJ56Y5ubm2mM77bRTrr322owcOTIDBgzIt771rbz5Qcv//d//nWOOOSZ77bVXBg0a\nlMmTJ7/n+bjyyiszZsyY7LXXXpk8efJK87Em5557boYNG5Y999wzn/jEJ1b6/l1yySX50pe+lK9+\n9avZc889M3369Lz22ms59dRTM3DgwBx88MG5/PLLs99++9XWWdN+/XZvnccHHngg++23Xy6//PIM\nGTIkQ4cOzZ133pl77703Bx10UPbee+9cdtlltXUfe+yxHHHEERkwYECGDh2as88+O8uWLas9PmvW\nrBx00EHZa6+9ctZZZ+WYY47JDTfcUHv8/f5MhbWuAtpk//33r37961+vtOziiy+uvvKVr1RVVVXP\nPfdcteOOO1bf/OY3q9dee6267777qt12262aNGlS9de//rVqamqqBg8eXD3wwANVVVXVHXfcUR14\n4IHV008/XS1fvrz6wQ9+UB1xxBFVVVXVzJkzq8MPP7xasGBB9frrr1dPP/101dzcvNpxHXPMMdX1\n119f+3r+/PnVgAEDqunTp1fLly+vfvGLX1QDBgyoXnrppXd8X6NHj67mzp1bzZ8/vzriiCOqCy64\noKqqqvrDH/5QDR48uJo9e3bV2tpa3XTTTdX+++9fLV26tLbuoYceWs2dO7d69dVXVzueKVOmVCee\neGK1aNGi6rnnnqtGjhxZe/xnP/tZtfPOO1c//vGPq+XLl1evvvpqbdmNN95Ytba2VhdccEE1bNiw\n6qyzzqqWLl1a3XfffVX//v2rV155ZbXv55/+6Z+qqVOnVkuXLq0efPDBqn///qt8j5YvX15VVVWd\neuqptff65niOPPLI2tfv9v2vWLGiOvzww6tLLrmkWrp0afWXv/ylGj58eDVz5sza/rLbbrtVv/rV\nr6rW1tbq/PPPryZMmLDS9+Lt+9hb3X///dXHPvaxVZb/5je/qXbaaadq8eLFKy3fcccdq2effbb2\n9ZIlS6p//Md/rH7729++4zaef/75aqeddqrmz59frVixoho8eHD16quvVvvtt19t2Z577lk9+OCD\nK6339rlbnWeeeabaY489qlmzZlXLli2rpk2bVh144IErzefa3BdvvfXWqqmpqVqxYkV1yy23VHvs\nsUftOFrdeN+6P/zmN7+p9t577+r3v/99tXTp0urss8+ujjrqqJXm9oQTTqgWLFhQzZkzpxo0aFB1\n7733VlVVVV/+8perf//3f69WrFhRvfbaa9VDDz30nudj3LhxVVNTUzV//vzq4x//eHXNNdes9rXe\n/n5mzJhRvfTSS9Xy5curK6+8stpnn32q1157raqqN/bDXXbZpbrjjjuqFStWVK+++mp13nnnVUcf\nfXT18ssvVy+88EJ1yCGH1Pa1v7Vfv91b5/H++++vdt555+qSSy6pli1bVl133XXVoEGDqlNOOaVa\ntGhR9dRTT1Uf/ehHq7/85S9VVVXV448/Xj366KPV8uXLq+eee676+Mc/Xl111VVVVVXVvHnzqsbG\nxuq2226rli9fXv3oRz+qdtlll9rPk7X1MxXWJme0YS076aST0rlz5wwdOjSbbrppDjnkkGy++eZp\naGjIgAED8v/+3/9Lkvz0pz/NCSeckO222y719fU58cQT88QTT2TOnDmpr6/P4sWL88wzz6Sqqmy3\n3Xbp06dPm7b/q1/9Kttss03Gjh2b+vr6HHLIIdl2221zzz33vOM6Rx99dLbYYov06NEjkyZNql1b\ne9111+WII47IHnvskY4dO+bwww/PRhttlNmzZ9fWPfbYY7PFFlvUrs19qxUrVuTWW2/NV77ylWy2\n2WbZeuutM3HixPz85z+vPadPnz459thjU19fX3uNrbfeOuPGjUvHjh0zatSovPDCCznppJPSqVOn\nDB06NJ06dcpf/vKXVbY3d+7cPP744zn55JPTqVOnDBw4MMOHD2/TvK3Ou33/jz/+eF566aV84Qtf\nSKdOndKvX7/80z/9U2699dba8/faa68MGzYsHTt2zGGHHbbSWdn3qk+fPqmqKosWLVrj8xYuXJjX\nX389vXv3fsfnbLXVVtlyyy3z8MMP58knn8w222yTjTfeOHvuuWdt2fLly7PHHnu863HeeuutGTZs\nWPbdd99stNFGOf744/Paa6+tdLnL2twXDz744DQ0NKRDhw4ZNWpUttlmmzz22GNtGusvfvGLjBs3\nLrvuums6deqUU045JbNnz87zzz9fe87nPve5dOvWLVtuuWUGDRpU+16+eQlVS0tLOnfu/I5n/9sy\nH8cee2waGhrSo0eP7L///nniiSfaNP7DDjssPXv2TH19fT7zmc9k2bJl+fOf/1x7vH///jnwwAPT\noUOHbLzxxvnlL3+Zf/7nf0737t3Tt2/fHHfccbXntmW/XpP6+vpMmjQpG220UUaNGpX58+fnuOOO\ny2abbZYddtgh22+/ff74xz8mSXbbbbf0798/9fX12XrrrXPEEUfkoYceSpLMnDkzO+ywQ0aOHJn6\n+vocd9xx+bu/+7vadkr9TIX3w0VZsJZtvvnmtf/u3LnzKl8vWbIkyRtROHXq1Hz3u9+tPV79z3W3\nQ4YMydFHH52zzz47c+bMyciRI3Pqqadms802+5vbb2lpyZZbbrnSsi233HKlf/Z+uy222GKl57a0\ntNTGOGPGjPznf/5n7fHly5fXHn/7um83f/78LF++fKXxvH0sffv2XWW9t87Zm9H01v+hdu7cOYsX\nL15lvZaWlnTr1m2la6y33HLLvPDCC+84xjV5t+9/zpw5aWlpWSmsVqxYsdLXb30fG2+8cZYuXfq+\nr5FtaWlJXV1dunbtusbndevWLR06dMiLL76Y7bbb7h2f9+blI1tssUVt7HvttVdt2e67755OnTq9\np3G+dV/o0KFDtthii5X2h7W5L86YMSNXXXVV7fKBJUuWrHQp0d8a66677lr7ukuXLunRo0eam5uz\n9dZbJ8lKv7BssskmtX1yypQp+f73v5/x48ene/fumThx4mrvYtOW+Xj7Nt76ftfkyiuvzI033ljb\nN1555ZWV3vvbj7uWlpaV5u+tj7dlv16THj161P6I/M3j+e0/F9+cuz//+c/5zne+k9///vd59dVX\ns2LFitr3oaWlZaVxvXn51JtK/UyF90NowzqyxRZb5MQTT8yhhx662sePO+64HHfccZk3b14mT56c\nK664Yo3Xer6pT58+mTt37krLXnjhhXzsYx97x3XeGqJz586tnel5c4yTJk16x3Xf/EPC1enZs2c2\n2mijzJ07N9tvv31tWw0NDW1a/93q3bt3Fi5cmCVLltRie+7cue95G+/2/W+xxRbZeuutc/vtt7+n\n7b1Xd9xxR3bZZZe/+Uecm2yySfr375/bb789gwcPfsfnDRw4MD/96U+z1VZb5ROf+ESSN+J7+vTp\n2Wqrrd7z9dl9+vRZ6brYqqpW2R/W1r44Z86cfOMb38iPfvSjNDY21v4FYXXPfaexvhnoyRuR/vLL\nL6801nfSu3fvnHvuuUneuJ594sSJGThw4Ep/lPrmNv7WfLwXDz/8cK644or86Ec/yg477JAOHTpk\n4MCBtWvIk1Xff+/evdPU1FQ7TpuammqPted+fdZZZ2WXXXbJ9773vWy22Wb50Y9+lNtuu602xrf+\nElJV1SrjLPEzFd4Pl47AOnLkkUdm2rRp+f/+v/8vSbJo0aL88pe/TPLGHwT913/9V5YvX55NNtkk\nnTp1SocOqz9c/+7v/i7PPfdc7ethw4bl2WefzS9+8Yu0trbm1ltvzdNPP53/9b/+1zuO5ZprrklT\nU1NefvnlXHbZZbW7S0yYMCE//elP81//9V+pqipLlizJr371q7zyyitteo8dO3bMxz/+8Vx44YV5\n5ZVXMmfOnFx11VXv+D/C92urrbbKbrvtlksuuSTLli3Lww8/vMZLZv6Wd/v+d99993Tp0iXTpk3L\na6+9lhUrVuSpp55q8+UKb/9ersmbZ+ouvfTS3HDDDTnllFPatN6UKVMyffr0XHHFFbUznE8++WS+\n/OUv154zYMCAPPHEE3nooYey5557Jkl23HHHPP/883nggQcycODANm3r7Q4++ODce++9+e1vf5vl\ny5fnf//v/51OnTqtdBvNtbUvvvrqq6mrq0uvXr2SJD/72c9qx1ryxhnV5ubmlf7Q7q0OOeSQ3HTT\nTXniiSeybNmyXHDBBdl9991rZ7PX5Je//GUtALt37566urrVHr9tmY/3YvHixenYsWN69eqV1tbW\nXHrppX/zmD344IPzwx/+MAsWLEhzc/NK/3Lwfvfrdzv2Ll26pEuXLvnTn/600l1shg0blj/+8Y+5\n884709ramp/85Cf561//Wnt8bf1MhbXJGW1YR0aMGJHFixfnlFNOyZw5c9K1a9fss88+Ofjgg7N4\n8eJMnTo1zz//fO265OOPP361r3PcccfltNNOy7XXXpvDDjss3/jGN3LZZZdl6tSpOeuss7LNNtvk\nsssuqwXH6hxyyCH5zGc+k5aWlhxwwAG1s4Yf/ehHc8455+Tss8/Of//3f9eu1X03ZzS/+c1v5pxz\nzsmBBx6Yzp07Z8KECRk3bty7m6x34Xvf+15OPfXUDBo0KP3798/YsWOzcOHC9/Ra7/b9d+zYMZdd\ndlm++93v5oADDsiyZcvykY98pM1nzU444YSce+65Oe+88zJp0qTVfs9bWlrS2NiYqqqy2WabZc89\n98z/+T//J/3792/TNvbcc89cffXVufjii/Mf//Ef6dixY7bZZpscffTRted85CMfSa9evdKzZ890\n69YtyRuXNey+++75zW9+855DcNttt815552Xc845J83Nzdl5551z2WWXrXQZytraF7fffvt85jOf\nyZFHHpm6urqMHTu29ktDkgwePDjbb799hg4dmrq6ujzwwAMrrb/PPvvk5JNPzhe/+MUsXLgwjY2N\nK92RZE0ef/zxTJ06Na+88ko233zznH766enXr997mo/3YujQofnYxz6Wgw46KJtuumk+9alPrfES\nr+SNvy0588wzc8ABB6R3794ZM2ZMbrrppiTvf79+N0499dR885vfzJVXXpmdd945o0aNyv33358k\n6dWrV77//e/n29/+dk499dSMGTMmu+22WzbaaKMka+9nKqxNddVb/y0J+NBxf3DWF/bF9cc111yT\nW2+9daUz2+ub119/Pfvtt1/OP//8NV4KBeuSfzcBgA+5lpaW/O53v8vrr7+eZ555JldddVUOPPDA\ndT2sVdx3331ZuHBhli1bVrv/dlv/NQfWBZeOAMCH3PLly3PmmWfm+eefT9euXTN69OgcddRR63pY\nq5g9e3a++tWvZtmyZdl+++3zgx/8YLW3FoX1hUtHAACgAJeOAABAAR/IS0defHHNn472QdGz56aZ\nP3/Juh7Gese8rMqcrMqcrMqcrMqcrMqcrJ55WdWHZU56937nDwtzRnsDVl/fcV0PYb1kXlZlTlZl\nTlZlTlZlTlZlTlbPvKzKnAhtAAAoQmgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0A\nAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFC\nGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCA\nAurX9QAAgPdn03+b2n4b69I5my5e2m6bW/K1f2m3bcHa5ow2AAAUILQBAKAAoQ0AAAUIbQAAKEBo\nAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQ\ngNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQB\nAKAAoQ0AAAXUr+sBACTJpv82tX032KVzNl28tN02t+Rr/9Ju2wJg/eCMNgAAFCC0AQCgAKENAAAF\nCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsA\nAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKE\nNgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAA\nBQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKKC+5IsPHz48\nXbp0SYcOHdKxY8fcdNNNefnll/PlL385c+bMyVZbbZWLLroo3bt3T1VV+fa3v5177703G2+8cb7z\nne9k1113TZJMnz49//Ef/5EkmTRpUg4//PCSwwYAgPet+Bntq6++OjfffHNuuummJMm0adMyZMiQ\n3H777RkyZEimTZuWJJk5c2aeffbZ3H777TnnnHNy1llnJUlefvnlXHrppbn++utzww035NJLL82C\nBQtKDxsAAN6Xdr905K677srYsWOTJGPHjs2dd9650vK6urr0798/CxcuTEtLS2bNmpV99903PXr0\nSPfu3bPvvvvmvvvua+9hAwDAu1I8tI8//vh84hOfyHXXXZckmTdvXvr06ZMk6d27d+bNm5ckaW5u\nTt++fWvr9e3bN83Nzassb2hoSHNzc+lhAwDA+1L0Gu1rr702DQ0NmTdvXiZOnJhtt912pcfr6upS\nV1e31rfbs+emqa/vuNZfd33Uu3fXdT2E9ZJ5WdV6PyddOrf/Jttxm13W9/n/H+v9frIObBBz0s7H\nj2Nn9TaIfaWdfdjnpGhoNzQ0JEk233zzjBgxIo899lg233zztLS0pE+fPmlpaUmvXr1qz21qaqqt\n29TUlIaGhjQ0NOTBBx+sLW9ubs7ee++9xu3On7+kwLtZ//Tu3TUvvrhoXQ9jvWNeVrUhzMmmi5e2\n6/a6dOmcxe24zSXr+fwnG8Z+0t42lDlpz+PHsbN6G8q+0p4+LHOypl8mil06smTJkrzyyiu1//71\nr3+dHXbYIcOHD8+MGTOSJDNmzMgBBxyQJLXlVVVl9uzZ6dq1a/r06ZOhQ4dm1qxZWbBgQRYsWJBZ\ns2Zl6NChpYYNAABrRbEz2vPmzctJJ52UJFmxYkUOOeSQ7LfffvnoRz+ayZMn58Ybb8yWW26Ziy66\nKEkybNiw3HvvvRkxYkQ22WSTTJ06NUnSo0ePfP7zn8/48eOTJCeddFJ69OhRatgAALBWFAvtfv36\n5ec///kqy3v27Jmrr756leV1dXU588wzV/ta48ePr4U2AABsCHwyJAAAFCC0AQCgAKENAAAFCG0A\nAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ\n2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAA\nFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQht\nAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAK\nENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYA\nABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUI\nbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAA\nChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2\nAAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAF\nCG0AACigeGivWLEiY8eOzT//8z8nSZ577rlMmDAhI0aMyOTJk7Ns2bIkybJlyzJ58uSMGDEiEyZM\nyPPPP197jR/+8IcZMWJEDjrooNx3332lhwwAAO9b8dD+8Y9/nO2226729fnnn59Pf/rTueOOO9Kt\nW7fceOONSZIbbrgh3bp1yx133JFPf/rTOf/885MkTz/9dG655ZbccsstueKKK/Ktb30rK1asKD1s\nAAB4X4qGdlNTU371q19l/PjxSZKqqnL//ffnoIMOSpIcfvjhueuuu5Ikd999dw4//PAkyUEHHZTf\n/va3qaoqd911V0aPHp1OnTqlX79+2WabbfLYY4+VHDYAALxvRUN76tSpmTJlSjp0eGMz8+fPT7du\n3VJfX58k6du3b5qbm5Mkzc3N2WKLLZIk9fX16dq1a+bPn5/m5ub07du39poNDQ21dQAAYH1VX+qF\n77nnnvTq1Su77bZbHnjggVKbWa2ePTdNfX3Hdt3mutK7d9d1PYT1knlZ1Xo/J106t/8m23GbXdb3\n+f8f6/1+sg5sEHPSzsePY2f1Noh9pZ192OekWGg/8sgjufvuuzNz5swsXbo0r7zySr797W9n4cKF\naW1tTX19fZqamtLQ0JDkjTPVL7zwQvr27ZvW1tYsWrQoPXv2TENDQ5qammqv29zcXFvnncyfv6TU\n21qv9O7dNS++uGhdD2O9Y15WtSHMyaaLl7br9rp06ZzF7bjNJev5/Ccbxn7S3jaUOWnP48exs3ob\nyr7Snj4sc7KmXyaKXTryla98JTNnzszdd9+dCy64IIMHD873vve9DBo0KLfddluSZPr06Rk+fHiS\nZPjw4Zk+fXqS5LbbbsvgwYNTV1eX4cOH55ZbbsmyZcvy3HPP5dlnn83uu+9eatgAALBWtPt9tKdM\nmZKrrroqI0aMyMsvv5wJEyYkScaPH5+XX345I0aMyFVXXZWvfvWrSZIddtghBx98cEaNGpXPfvaz\nOeOMM9Kx44fjshAAADZcxS4deatBgwZl0KBBSZJ+/frVbun3Vp07d87FF1+82vUnTZqUSZMmFR0j\nAACsTT4b9NcLAAAgAElEQVQZEgAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgD\nAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA\n0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEA\noAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBo\nAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQ\ngNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQB\nAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChA\naAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABdS39YmvvfZaXnzxxXTu3Dl9+vQp\nOSYAANjgrTG0X3/99cyYMSM33HBDnnzyyWy22WZZtmxZ6uvrc+CBB+bTn/50PvKRj7TXWAEAYIOx\nxtA+8sgj09jYmK9//evZdddd07FjxyTJvHnzct999+WMM87IkUcemdGjR7fLYAEAYEOxxtC+7LLL\n0qtXr1WWb7755hk7dmzGjh2bl156qdjgAABgQ7XGP4ZcXWTPmzcvs2fPXuNzAADgw65Ndx056qij\nsmjRoixcuDBjx47N6aefnu9+97ulxwYAABusNoX2kiVL0rVr19xzzz0ZM2ZMfvGLX2TWrFmlxwYA\nABusNoX2smXLkiQPPPBA9t1333To0KH2h5EAAMCq2hTae++9d0aNGpXf/e532XvvvbNw4cJ06OCz\nbgAA4J206QNrzjzzzDz55JPp169fNtpooyxatCjnnntu6bEBAMAGq02hXVdXl3/4h39IU1NTmpqa\nkiSdOnUqOjAAANiQtSm0f/zjH+fCCy9M9+7da5eM1NXV5a677io6OAAA2FC1KbSvvvrq/N//+3/T\n0NBQejwAAPCB0Ka/aOzbt6/IBgCAd6FNZ7S/+MUv5vTTT8+wYcPSuXPn2vJhw4YVGxgAAGzI2hTa\n99xzT+655548++yzK12jLbQBAGD12hTad9xxR+6+++5svPHGpccDAAAfCG26Rrtfv36pr29TkwMA\nAGnjGe1tttkmn/rUp3LggQeudP/so48+utjAAABgQ9am0F6+fHn+/u//Pk899VTp8QAAwAdCm0L7\nX//1X0uPAwAAPlDWeI3273//+zWuvGzZsvzpT39aqwMCAIAPgjWG9rRp0/LZz342M2bMyJ///Ocs\nWrQof/3rX/PQQw/lggsuyIQJE9LS0rLadZcuXZrx48fn0EMPzejRo3PxxRcnSZ577rlMmDAhI0aM\nyOTJk7Ns2bIkb0T75MmTM2LEiEyYMCHPP/987bV++MMfZsSIETnooINy3333ra33DgAAxazx0pGL\nL744jz32WK677rr84Ac/SFNTUzbZZJPsuOOOOfDAA/OTn/wkm2222WrX7dSpU66++up06dIly5cv\nz1FHHZX99tsvV111VT796U9n9OjROeOMM3LjjTfmqKOOyg033JBu3brljjvuyC233JLzzz8/F110\nUZ5++unccsstueWWW9Lc3JyJEyfmtttuS8eOHYtMCAAArA1/8xrt3XffPbvvvvu7fuG6urp06dIl\nSdLa2prW1tbU1dXl/vvvz/e+970kyeGHH55LL700Rx11VO6+++584QtfSJIcdNBBOfvss1NVVe66\n666MHj06nTp1Sr9+/bLNNtvkscceS2Nj47seEwAAtJc23Uf7vVqxYkUOO+yw7LPPPtlnn33Sr1+/\ndOvWrXZP7r59+6a5uTlJ0tzcnC222CJJUl9fn65du2b+/Plpbm5O3759a6/Z0NBQWwcAANZXRT+F\npmPHjrn55puzcOHCnHTSSXnmmWdKbq6mZ89NU1//4bi0pHfvrut6COsl87Kq9X5OunRu/0224za7\nrO/z/z/W+/1kHdgg5qSdjx/HzuptEPtKO/uwz0m7fNxjt27dMmjQoMyePTsLFy5Ma2tr6uvr09TU\nlIaGhiRvnKl+4YUX0rdv37S2tmbRokXp2bNnGhoa0tTUVHut5ubm2jrvZP78JUXfz/qid++uefHF\nRet6GOsd87KqDWFONl28tF2316VL5yxux20uWc/nP9kw9pP2tqHMSXseP46d1dtQ9pX29GGZkzX9\nMlHs0pGXXnopCxcuTJK89tpr+c1vfpPtttsugwYNym233ZYkmT59eoYPH54kGT58eKZPn54kue22\n2zJ48ODU1dVl+PDhueWWW7Js2bI899xzefbZZ9/TNeMAANCe2nRGe968efnXf/3XvPDCC/nJT36S\nJ598Mo8++mg++clPvuM6LS0tOe2007JixYpUVZWPf/zj2X///bP99tvny1/+ci666KLsvPPOmTBh\nQpJk/PjxmTJlSkaMGJHu3bvnwgsvTJLssMMOOfjggzNq1Kh07NgxZ5xxhjuOAACw3mtTaH/jG9/I\nfvvtl2uuuSZJsu2222bKlClrDO1//Md/zIwZM1ZZ3q9fv9x4442rLO/cuXPtXttvN2nSpEyaNKkt\nQwUAgPVCmy4daW5uzic/+cnameROnTqlQ4eiNywBAIANWptq+c3b8b1p4cKFqaqqyIAAAOCDoE2X\njowYMSJnnHFGFi9enJtuuinXXHNNxo0bV3psAACwwWpTaH/uc5/Lz3/+8yxcuDD33ntvjj322Bx2\n2GGlxwYAABusNt9H+9BDD82hhx5aciwAAPCB0ebb+/3nf/5n/vKXv6S1tbW2/Pvf/36xgQEAwIas\nTaH9+c9/PrvsskuGDBniHtYAANAGbQrtV199NWeeeWbpsQAAwAdGm27vt8cee+SPf/xj6bEAAMAH\nRpvOaB955JE55phj0rdv33Tu3Lm2fHWf8AgAALQxtKdMmZITTzwxu+yyi2u0AQCgDdoU2p07d87x\nxx9feiwAAPCB0aZrtD/2sY9l5syZpccCAAAfGG06o3399ddn2rRp6dKlSzp16pSqqlJXV5ff/va3\npccHAAAbpDaF9s9+9rPS4wAAgA+UNoX2VlttVXocAADwgbLG0J4yZUrOO++8jBs3LnV1das87vZ+\nAACwemsM7U996lNJklNPPbVdBgMAAB8Uawzta665JlOnTs3ee+/dXuMBAIAPhDXe3u+JJ55or3EA\nAMAHSpvuow0AALw7a7x05KmnnsqQIUNWWe4+2gAAsGZrDO1/+Id/yLRp09prLAAA8IGxxtDu1KmT\ne2gDAMB7sMZrtDfaaKP2GgcAAHygrDG0r7/++vYaBwAAfKC46wgAABQgtAEAoAChDQAABQhtAAAo\nQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoA\nAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQg\ntAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAA\nKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDa\nAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAU\nILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0A\nAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoIBiof3CCy/k2GOPzahRozJ69OhcffXV\nSZKXX345EydOzMiRIzNx4sQsWLAgSVJVVc4999yMGDEiY8aMyR/+8Ifaa02fPj0jR47MyJEjM336\n9FJDBgCAtaZYaHfs2DGnnXZabr311lx33XW55ppr8vTTT2fatGkZMmRIbr/99gwZMiTTpk1Lksyc\nOTPPPvtsbr/99pxzzjk566yzkrwR5pdeemmuv/763HDDDbn00ktrcQ4AAOurYqHdp0+f7LrrrkmS\nzTbbLNtuu22am5tz1113ZezYsUmSsWPH5s4770yS2vK6urr0798/CxcuTEtLS2bNmpV99903PXr0\nSPfu3bPvvvvmvvvuKzVsAABYK9rlGu3nn38+TzzxRPbYY4/Mmzcvffr0SZL07t078+bNS5I0Nzen\nb9++tXX69u2b5ubmVZY3NDSkubm5PYYNAADvWX3pDSxevDhf+tKX8i//8i/ZbLPNVnqsrq4udXV1\na32bPXtumvr6jmv9dddHvXt3XddDWC+Zl1Wt93PSpXP7b7Idt9llfZ///7He7yfrwAYxJ+18/Dh2\nVm+D2Ffa2Yd9ToqG9vLly/OlL30pY8aMyciRI5Mkm2++eVpaWtKnT5+0tLSkV69eSd44U93U1FRb\nt6mpKQ0NDWloaMiDDz5YW97c3Jy99957jdudP39JgXez/undu2tefHHRuh7Gese8rGpDmJNNFy9t\n1+116dI5i9txm0vW8/lPNoz9pL1tKHPSnsePY2f1NpR9pT19WOZkTb9MFLt0pKqqnH766dl2220z\nceLE2vLhw4dnxowZSZIZM2bkgAMOWGl5VVWZPXt2unbtmj59+mTo0KGZNWtWFixYkAULFmTWrFkZ\nOnRoqWEDAMBaUeyM9u9+97vcfPPN2XHHHXPYYYclSU455ZSccMIJmTx5cm688cZsueWWueiii5Ik\nw4YNy7333psRI0Zkk002ydSpU5MkPXr0yOc///mMHz8+SXLSSSelR48epYYNAABrRbHQHjBgQP74\nxz+u9rE376n9VnV1dTnzzDNX+/zx48fXQhsAADYEPhkSAAAKENoAAFCA0AYAgAKENgAAFCC0AQCg\nAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgD\nAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA\n0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEA\noAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBo\nAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQ\ngNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQB\nAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChA\naAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAA\nUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0\nAQCggGKh/fWvfz1DhgzJIYccUlv28ssvZ+LEiRk5cmQmTpyYBQsWJEmqqsq5556bESNGZMyYMfnD\nH/5QW2f69OkZOXJkRo4cmenTp5caLgAArFXFQvsTn/hErrjiipWWTZs2LUOGDMntt9+eIUOGZNq0\naUmSmTNn5tlnn83tt9+ec845J2eddVaSN8L80ksvzfXXX58bbrghl156aS3OAQBgfVYstAcOHJju\n3buvtOyuu+7K2LFjkyRjx47NnXfeudLyurq69O/fPwsXLkxLS0tmzZqVfffdNz169Ej37t2z7777\n5r777is1ZAAAWGvq23Nj8+bNS58+fZIkvXv3zrx585Ikzc3N6du3b+15ffv2TXNz8yrLGxoa0tzc\n/De307Pnpqmv77iWR79+6t2767oewnrJvKxqvZ+TLp3bf5PtuM0u6/v8/4/1fj9ZBzaIOWnn48ex\ns3obxL7Szj7sc9Kuof1WdXV1qaurK/La8+cvKfK665vevbvmxRcXrethrHfMy6o2hDnZdPHSdt1e\nly6ds7gdt7lkPZ//ZMPYT9rbhjIn7Xn8OHZWb0PZV9rTh2VO1vTLRLvedWTzzTdPS0tLkqSlpSW9\nevVK8saZ6qamptrzmpqa0tDQsMry5ubmNDQ0tOeQAQDgPWnX0B4+fHhmzJiRJJkxY0YOOOCAlZZX\nVZXZs2ena9eu6dOnT4YOHZpZs2ZlwYIFWbBgQWbNmpWhQ4e255ABAOA9KXbpyCmnnJIHH3ww8+fP\nz3777ZcvfvGLOeGEEzJ58uTceOON2XLLLXPRRRclSYYNG5Z77703I0aMyCabbJKpU6cmSXr06JHP\nf/7zGT9+fJLkpJNOSo8ePUoNGQAA1ppioX3BBResdvnVV1+9yrK6urqceeaZq33++PHja6ENAAAb\nCp8MCQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBo\nAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQ\ngNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsAAAoQ2gAAUIDQBgCAAoQ2AAAUILQB\nAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKqF/XAwAAoLx/e3Bqu26vS5fOWbx4abts62t7/0u7bOfd\nckYbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAACvDJkABsUNrz\n0+3a85PtkvX30+2A98YZbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKENgAAFCC0AQCgAKENAAAF\nCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAABQgtAEAoAChDQAABQhtAAAoQGgDAEABQhsA\nAAoQ2gAAUIDQBgCAAoQ2AAAUILQBAKAAoQ0AAAUIbQAAKEBoAwBAAUIbAAAKENoAAFCA0AYAgAKE\nNgAAFCC0AQCgAKENAAAFCG0AAChAaAMAQAFCGwAAChDaAABQgNAGAIAChDYAAP+/9u49KKrzjOP4\nV8iCqJRIiibVTmPqpWpwrIOx6ngJFyEsC15QGwWj0TSWGJPReIlp0XFirFSdMXY61anTTjuOja0X\npEFq1ca0VTHSZjBYm9rES2yiI6DIdWF5+wfjCbgIIrsLa36fv2T3nOe878N5nee8nHNe8QIV2iIi\nIiIiXqBCW0RERETECx7q6AaIiIiIeFpWVpBPj9e9O1RU+OaYy5c7fXIcaT/NaIuIiIiIeIFmtEVE\nOqkHeUYONCsnIg8+zWiLiIiIiHiBCm0RERERES9QoS0iIiIi4gUqtEVEREREvECFtoiIiIiIF6jQ\nFhERERHxAr3ez8O6Zb3lu4N1D6ZbRY3PDle5fJXPjiUiIiLi7zSjLSIiIiLiBSq0RURERES8QIW2\niIiIiIgX+E2h/f777xMfH09cXBzbt2/v6OaIiIiIiLTILx6GdLlcrF27ll/96lf07t2b1NRUoqOj\n6d+/f0c3TeS+ZJ3y4UOzQPfuwVT46MHZ5U/poVkRERHwk0K7sLCQb33rW3zzm98EwG63c+TIERXa\nfiIrK8inx+veHSoqfHPM5cudPjmOiIiI+J8uxhjT0Y1oTV5eHn/9619Zt24dAPv376ewsJDMzMwO\nbpmIiIiISPP85h5tERERERF/4heFdu/evfniiy+sn69evUrv3r07sEUiIiIiIi3zi0I7MjKSCxcu\ncPnyZZxOJ++++y7R0dEd3SwRERERkbvyi4chH3roITIzM1mwYAEul4tp06YxYMCAjm6WiIiIiMhd\n+cXDkCIiIiIi/sYvbh0REREREfE3KrRFRERERLxAhfZXwJYtWzh+/HhHN4OysjJ27tzZ5LMNGzZg\nt9vZsGGDT9rwxhtvcP78eZ8c614oJ963d+9e1q5d26Z9fvGLX7S6zcqVK8nLy7vfZnlMW86hrVu3\nsmPHjnbFbk56ejpnzpy557jeppx4zoM8fnSeuFNOPE+F9lfAK6+8wpgxYzq6GZSVlbFr164mn+3e\nvZsDBw6wYsUKn7Rh3bp1nWpFUeWkc9q2bVtHN+GeefMcai62P1BOOpa/jB+dJ+6UE89Toe1Bn332\nGc888ww/+tGPsNvtPP/881RXVwMNV2hvvvkmKSkpJCUlUVhY6LZ/TU0Nr7/+Og6Hg8mTJ3Py5Emg\nYUZh0aJFzJ8/n0mTJpGVlWXt87e//Y2ZM2cyZcoUFi9eTEVFhVvcxjMH0dHRbNq0iZSUFKZOnUpR\nURHz588nNjbWGgD19fWsWbOGhIQE5s2bxwsvvOCRmYdNmzZx6dIlUlJS2LBhAwsXLqSyspKpU6eS\nm5vbZNsbN26QkZGBw+FgxowZnDt3Dmi4gn799ddJT08nJiaG3/zmN9Y+2dnZpKamkpKSQmZmJi6X\ny60Nja+Uv/vd71pX6nPnzqWwsNCKe+TIEQCqqqp45ZVXSExM5KWXXmL69OkevdJWTlrX3nEF8Pnn\nn5Oens6kSZP42c9+Zn2ekZHB1KlTsdvtvPPOOwBs3LiR6upqUlJSWLp0KdCwGq3D4SA5OZlly5ZZ\n+58+fZrvf//7xMTEdNjsXFvOIYBz584xc+ZMJk2axO7duwGoqKjgueeeY8qUKTgcDg4fPtxsbIDt\n27dbudi4caMVNy8vj9TUVOLj4zl9+rQPen53ysmXNH7uTueJO+XEC4x4zOXLl83gwYPN2bNnjTHG\nLF682Ozfv98YY0xaWpp54403jDHGnDp1ytjtdrf9d+zYYVauXGmMMeb8+fNmwoQJprq62uzZs8dE\nR0ebsrIyU11dbSZOnGj+97//meLiYjNr1ixTUVFhjDFm27ZtZuvWrW5xV6xYYQ4ePGiMMebpp582\nO3fuNMYYs27dOpOUlGRu3bpliouLzejRo40xxhw8eNAsWLDAuFwuc+3aNRMVFWXt39783Nnv4cOH\nN7vt2rVrrb4cP37cJCcnG2OMefvtt83MmTNNTU2NKS4uNk899ZRxOp3m/Pnz5sUXXzROp9MYY8zq\n1avNvn373OKmpaWZwsJCY4wxAwcONO+9954xxpiMjAwzb94843Q6zb/+9S/reL/85S/Nj3/8Y2OM\nMf/+97/N4MGDrf09QTlpXXvH1Z49e8zYsWNNSUmJqaqqMna73WpvaWmpMcZYn5eUlBhjmv4OPv74\nYzNp0iRTXFzcZJ8VK1aYl19+2bhcLvOf//zHxMbGeqP7rWrLOfT2228bh8NhqqqqTHFxsRk/frz5\n4osvTG1trbl165Yxxpji4mITGxtr6uvr3WK/9957ZubMmaaystIY82Uu0tLSzPr1661tnnvuOU93\ns02Uky9p/NydzhN3yonn+cV7tP1J3759GTx4MABDhw7lypUr1nd2ux2AkSNHUl5eTllZGV/72tes\n7wsKCkhLSwPg29/+Nt/4xjf49NNPARg9ejShoaHWd1euXOHWrVucP3+eZ599FoDa2lqGDx/eahtj\nYmIAGDhwIJWVlfTo0QOAoKAgysrKKCgoICEhgYCAACIiIhg1alS7cnI/CgoK2Lp1K9DQ9xs3blBe\nXg7AhAkTCAoKIjw8nPDwcIqLizlx4gQfffQRqampAFRXV/PII4+0eAybzcb48eOBhlwEBQVhs9kY\nOHCg9XsrKChgzpw51jaDBg3ySn/vxVc5J+0ZVwBjxoyhZ8+eAMTFxVFQUEBkZCS//e1v+fOf/ww0\nzNpdvHjR2u62kydPkpCQQHh4OAAPP/yw9V1sbCwBAQH079+f69eve7jX3hETE0PXrl3p2rUro0aN\n4syZM0yYMIHNmzfzwQcfEBAQwNWrV5vtz4kTJ5g6dSohISFA01zExcUB7r8ff/Cg50TjxzMe9PPk\nfignrVOh7WFBQUHWvwMDA6mpqbF+7tKlS5Nt7/y5LXFdLhfGGMaOHcvmzZvb1EabzQZAQEBAk7gB\nAQHU1dW1KVZHuDMXdXV1GGOYMmWK9afKe2Gz2azfQeNcBAQENHuLRWf2oOekveOquW3y8/M5fvw4\n77zzDiEhIaSnpzeJ29Z2+Yvm8pOTk0NJSQl79+7FZrMRHR1937no6HPlfjzoOdH48YwH/Ty5H8pJ\n63SPtg/dvr/p9OnThIaGWjPUt0VFRZGTkwPAp59+yueff84TTzxx13jDhw/nH//4BxcvXgSgsrLS\nmgFvjxEjRnDo0CHq6+u5fv06p06dandMgO7duzd7D3lzoqKiOHDgAAD5+fn07NnTmnlvzujRo/nT\nn/5EcXEx0HA/syeugkeMGMHBgwcBOH/+PB9//HG7YzamnLRfa+MK4O9//zs3btygurqaw4cPM2LE\nCG7dukVYWBghISH897//5cMPP7S2f+ihh6itrQXge9/7Hnl5eZSWlgINeexM2nIOARw5coSamhpK\nS0s5deoUkZGR3Lp1i0ceeQSbzcbJkyet8+TO2GPGjGHv3r1UVVUBnS8Xtykn9+6rPH50nrhTTjxP\nM9o+FBwczOTJk6mrq+Ott95y+37WrFmsWbMGh8NBYGAg69evb/GKPzw8nPXr17NkyRKcTicAr776\nKv369WtXO+Pj4zlx4gSJiYk89thjDBkypNn/fNuqZ8+ejBgxgqSkJMaNG9fiE8yLFi1i1apVOBwO\nQhnyD5sAAAo4SURBVEJC+MlPftJi7P79+/Pqq6/y/PPPU19fj81mIzMzkz59+rSrzbNmzWLlypUk\nJibyxBNP0L9/f4/k4jblpP1aG1cAw4YN4+WXX+bq1askJycTGRnJoEGD+N3vfsczzzxDv379mtx2\nNWPGDJKTkxkyZAibNm1i4cKFpKenExAQwJAhQ1rNvS+15RwCGDRoEHPmzKG0tJSMjAx69+6Nw+Hg\nhz/8IQ6HgyeffNK6wG8u9rlz55g2bRo2m40JEyawZMkSX3SzTZSTe/dVHj86T9wpJ56nJdh9JD09\nneXLlxMZGdnRTbknFRUVdO/endLSUqZPn86uXbuIiIjo6Gb5nMvloq6ujuDgYC5dusTcuXPJy8vz\nuz95elJnyom/jSuRzkTjR8T7NKMtzVq4cCFlZWXU1taSkZHxlSyyoeFVdnPmzLHueV69evVXusgG\n5UREROReaUZbRERERMQL9DCkdHrV1dWkpaV59MnjkpIS5s+f77F4HcHTeXE6ncyePdsv3jwj907j\nx53Gjoj4igpt6fT27NlDXFwcgYGBHosZHh5Or169KCgo8FhMX/N0XoKCghg9enSzq3+J/9L4caex\n86X8/HxWrlzZ0c3odJQXd8rJ/VGhLZ1eTk4OMTExLS4NHx0dTVZWFg6Hg9TUVOuVh42Xn4eGJcZv\ni4mJsV6n6I9u5wWaX8b2bssrb926lR07dlhxkpKS+Oyzz4CGBST8OSfiTuPHncZOyy5evMjcuXNJ\nTk5mypQpXLp0CWMMGzZsICkpCYfDYV1U5Ofnk56ezuLFi0lISGDp0qUYY3j//fdZvHixFTM/P58X\nX3yxo7rkEcqLO+WkdXoYUjo1p9PJ5cuX6du3L3l5eVy5coXc3FyKi4tJTExk2rRp1rahoaHk5OSw\nf/9+3nrrLbZt29Zi7MjISLZs2eLtLnhF47wcO3aMo0ePsnv3bkJCQpq8i7S6uprs7Gw++OADVq1a\nxR//+McW4w4YMIAzZ854u/niIxo/7jR2Wvfaa6/xgx/8gLi4OGpqaqivr+fQoUOcO3eO7OxsSktL\nSU1NJSoqCoCzZ8/y7rvv0qtXL5599lkKCgoYM2YMmZmZVFZW0q1bN3Jzc0lMTOzgnrWP8uJOOWmd\nCm3p1EpLS613NLe2NHxSUhLQsKTw+vXrW40dHh7OtWvXPN9oH2icl5aWsW1ueeWWBAYGYrPZKC8v\nb3ExHPEPGj/uNHYaTJ8+HafTSWVlJTdv3iQlJQXAel/27SWwg4ODgYbzx263ExgYyNe//nVGjhzJ\nmTNn6NGjB8OGDePRRx8F4Dvf+Q5XrlwhKiqKcePG8Ze//IX4+HiOHTvGsmXLOqazbaC8uFNO2keF\ntnRqXbt2tRbjuR+BgYHU19cDUF9fb61WBlBTU2P9x+Bv7jUvzS2d3DgngNvSuE6n02/zIk1p/LjT\n2Gnw+9//Hmj4M/2+ffusRWTKy8vbHOvOJd5vP2SamJjIzp07CQsL48knn/SLCxDlxZ1y0j66R1s6\ntbCwMFwuFzU1Na0uDX97WfDc3FzrXtI+ffpQVFQEwNGjR5sUChcuXGDAgAE+6olnNc5LS8vYNre8\ncp8+fTh79iwARUVF1j2m0DDb17NnT2w2mw97I96i8eNOY6dlPXr04NFHH+Xw4cNAw8VDVVUVUVFR\nHDx4EJfLRUlJCadPn2bYsGEtxnrqqac4e/Ysu3fv9vtbAZQXd8rJvdGMtnR6Y8eOpaCgoNWl4W/e\nvInD4SAoKIjNmzcDDUsBZ2RkkJyczLhx4+jWrZu1fX5+PhMnTvR1dzzmdl7Gjx9/12Vsm1teOT4+\nnuzsbOx2O8OGDePxxx+3Yvp7TsSdxo87jZ2WZWVlkZmZyZYtW7DZbGzZsoW4uDj++c9/kpKSQpcu\nXVi2bBkRERF88sknd40TGBjIxIkT2bdvHxs2bPBhD7xDeXGnnNwDI9LJffTRR+a1114zxhhTXl5u\njDGmpKTExMTEmGvXrhljjHn66adNcXFxm+LOmjXL3Lhxw7ON9aHGeWlOWlqaKSwsbFPMl156yXzy\nySftbZp0Iho/7jR2RMRXNKMtnd7QoUMZNWoULpfLY0vDl5SUMG/ePMLCwjzcWt9pnBdPvA/Y6XQS\nGxtLv379PNA66Sw0ftxp7IiIr2gJdhERERERL9DDkCIiIiIiXqBCW0REROQO1dXVpKWlWa+g84SS\nkhLmz5/vsXgdwdN5cTqdzJ49m7q6Oo/E62xUaIuIiIjcYc+ePcTFxXnkPv7bwsPD6dWrFwUFBR6L\n6WuezktQUBCjR4+2Xqn5oFGhLSIiInKHnJwcYmJiqK+vZ82aNSQkJDBv3jxeeOEF8vLyAIiOjiYr\nKwuHw0FqaioXL14EYOXKldY2gPVueoCYmBhycnJ82xkPup0XgO3bt+NwOEhOTmbjxo0ApKen8+ab\nb5KSkkJSUhKFhYUAbN26lR07dlhxkpKSrHfRx8bG+nVOWqJCW0RERKQRp9PJ5cuX6du3L4cOHeLK\nlSvk5uaSlZXFhx9+2GTb0NBQcnJySEtLs9653pLIyEi/ndFunJdjx45x9OhRdu/ezYEDB1iwYIG1\nXXV1NdnZ2axevZpVq1a1GnfAgAGcOXPGm03vMCq0RURERBopLS21FnQqKCggISGBgIAAIiIiGDVq\nVJNtk5KSALDb7W5FeHPCw8O5du2a5xvtA43zcuLECaZOnUpISAgADz/8sLWd3W4HYOTIkZSXl1NW\nVtZi3MDAQGw2230t697ZqdAWERERaaRr1644nc773j8wMJD6+noA6uvrqa2ttb6rqakhODi43W3s\nCPealy5durj93Dgn0JCHxpxOp9/mpSUqtEVEREQaCQsLw+VyUVNTw4gRIzh06BD19fVcv36dU6dO\nNdn24MGDAOTm5lr3Yvfp04eioiIAjh492qTQvnDhAgMGDPBRTzyrcV7GjBnD3r17qaqqAuDGjRvW\ndrcfbDx9+jShoaGEhobSp08fzp49C0BRUZF1fzY0zJT37NkTm83mw974hlaGFBEREbnD2LFjKSgo\nID4+nhMnTpCYmMhjjz3GkCFDrNsnAG7evInD4SAoKIjNmzcDMGPGDDIyMkhOTmbcuHF069bN2j4/\nP5+JEyf6ujseczsv48eP59y5c0ybNg2bzcaECRNYsmQJAMHBwUyePJm6ujrrvvX4+Hiys7Ox2+0M\nGzaMxx9/3Irp7zlpiVaGFBEREblDUVERv/71r/npT39KRUUF3bt3p7S0lOnTp7Nr1y4iIiKIjo7m\nD3/4A+Hh4fccd/bs2fz85z8nLCzMi633nsZ5aU56ejrLly8nMjLynmMuWrSIpUuX0q9fP081s9PQ\njLaIiIjIHYYOHcqoUaNwuVwsXLiQsrIyamtrycjIICIi4r5ilpSUMG/ePL8tsqFpXjzxLm2n00ls\nbOwDWWSDZrRFRERERLxCD0OKiIiIiHiBCm0RERERES9QoS0iIiIi4gUqtEVEREREvECFtoiIiIiI\nF/wfAOvqbvm2/GkAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "objects = ('np one img', 'tf one img\\n(gpu)', 'tf one img\\n(cpu)', 'np batch', 'tf batch\\n(gpu)', \n",
+ " 'tf batch\\n(cpu)', 'np batch\\n+conv', 'tf batch\\n+conv\\n(gpu)', 'tf batch\\n+conv\\n(cpu)')\n",
+ "y_pos = np.arange(len(objects))\n",
+ "performance = [large_np.average, large_tf.average, large_tf_cpu.average, \n",
+ " large_np_batch.average, large_tf_batch.average, large_tf_batch_cpu.average,\n",
+ " large_np_conv.average, large_tf_conv.average, large_tf_conv_cpu.average]\n",
+ "performance = [i*1000 for i in performance]\n",
+ "fig, ax = plt.subplots(1, figsize=(12,12))\n",
+ "ax.bar(y_pos, performance, align='center', alpha=0.5, color=['red', 'blue', 'green'])\n",
+ "plt.xticks(y_pos, objects)\n",
+ "plt.ylabel('Time (ms)')\n",
+ "plt.title('Times to perform different DTCWT operations on large images')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Comparison to Convolutions\n",
+ "One important distinguising feature that would be nice would be to see a speed-up in using the dtcwt over using convolutions."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## First we can compare execution time on a CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-10T23:08:12.470902Z",
+ "start_time": "2017-08-10T23:08:12.467758Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "session_conf = tf.ConfigProto(\n",
+ " intra_op_parallelism_threads=1,\n",
+ " inter_op_parallelism_threads=1,\n",
+ " device_count={'CPU': 1})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-10T23:09:27.466591Z",
+ "start_time": "2017-08-10T23:09:20.670606Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2.2268359661102295\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Create the input\n",
+ "h, w = 512, 512\n",
+ "in_ = np.random.randn(100,h,w,3)\n",
+ "\n",
+ "# Set up the transforms\n",
+ "nlevels = 3\n",
+ "tf.reset_default_graph()\n",
+ "with tf.device(\"/cpu:0\"):\n",
+ " fwd_tf = dtcwt.tf.Transform2d() # Tensorflow Transform\n",
+ " in_placeholder = tf.placeholder(tf.float32, [None, h, w, 3])\n",
+ " Yl, Yh = fwd_tf.forward_channels(in_placeholder, nlevels=nlevels)\n",
+ "\n",
+ "sess = tf.Session(config=session_conf)\n",
+ "sess.run(tf.global_variables_initializer())\n",
+ "\n",
+ "start = time.time()\n",
+ "#dtcwt_cpu = %timeit -o sess.run(Yl, {in_placeholder: in_})\n",
+ "sess.run(Yl, {in_placeholder: in_})\n",
+ "print(time.time()-start)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2017-08-10T23:07:47.429889Z",
+ "start_time": "2017-08-10T23:06:54.087493Z"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "6.17 s ± 150 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Hard to say what an equivalent is? Let us compare to a single layer with 5x5x3x64\n",
+ "h, w = 512, 512\n",
+ "in_ = np.random.randn(100,h,w,3)\n",
+ "\n",
+ "# Set up the transforms\n",
+ "nlevels = 3\n",
+ "tf.reset_default_graph()\n",
+ "with tf.device(\"/cpu:0\"):\n",
+ " in_placeholder = tf.placeholder(tf.float32, [None, h, w, 3])\n",
+ " weights = tf.get_variable('weights', shape=(10,10,3,64))\n",
+ " out = tf.nn.conv2d(in_placeholder, weights, strides=[1,1,1,1], padding='SAME')\n",
+ "\n",
+ "sess = tf.Session(config=session_conf)\n",
+ "sess.run(tf.global_variables_initializer())\n",
+ "dtcwt_cpu = %timeit -o sess.run(out, {in_placeholder: in_})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.2"
+ },
+ "toc": {
+ "colors": {
+ "hover_highlight": "#DAA520",
+ "navigate_num": "#000000",
+ "navigate_text": "#333333",
+ "running_highlight": "#FF0000",
+ "selected_highlight": "#FFD700",
+ "sidebar_border": "#EEEEEE",
+ "wrapper_background": "#FFFFFF"
+ },
+ "moveMenuLeft": true,
+ "nav_menu": {
+ "height": "12px",
+ "width": "252px"
+ },
+ "navigate_menu": true,
+ "number_sections": true,
+ "sideBar": true,
+ "threshold": 4,
+ "toc_cell": false,
+ "toc_section_display": "block",
+ "toc_window_display": false,
+ "widenNotebook": false
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tests/requirements.txt b/tests/requirements.txt
index d631253..7b87548 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -3,3 +3,4 @@ pytest
pytest-capturelog
pytest-cov
coverage
+scipy
diff --git a/tests/test_tfTransform1d.py b/tests/test_tfTransform1d.py
new file mode 100644
index 0000000..7839121
--- /dev/null
+++ b/tests/test_tfTransform1d.py
@@ -0,0 +1,375 @@
+import os
+import pytest
+
+from pytest import raises
+
+import numpy as np
+from importlib import import_module
+from dtcwt.numpy import Transform1d as Transform1d_np
+from dtcwt.coeffs import biort, qshift
+import tests.datasets as datasets
+from .util import skip_if_no_tf
+from scipy import stats
+from dtcwt.compat import dtwavexfm, dtwaveifm
+import dtcwt
+
+PRECISION_DECIMAL = 5
+TOLERANCE = 1e-6
+
+
+@skip_if_no_tf
+def setup():
+ global mandrill, in_p, pyramid_ops
+ global tf, Transform1d, dtwavexfm2, dtwaveifm2, Pyramid_tf
+ global np_dtypes, tf_dtypes, stats
+ # Import the tensorflow modules
+ tf = import_module('tensorflow')
+ dtcwt_tf = import_module('dtcwt.tf')
+ dtcwt_tf_xfm1 = import_module('dtcwt.tf.transform1d')
+ Transform1d = getattr(dtcwt_tf, 'Transform1d')
+ Pyramid_tf = getattr(dtcwt_tf, 'Pyramid')
+ np_dtypes = getattr(dtcwt_tf_xfm1, 'np_dtypes')
+ tf_dtypes = getattr(dtcwt_tf_xfm1, 'tf_dtypes')
+
+ mandrill = datasets.mandrill()
+ # Make sure we run tests on cpu rather than gpus
+ os.environ["CUDA_VISIBLE_DEVICES"] = ""
+ dtcwt.push_backend('tf')
+
+
+@skip_if_no_tf
+def test_simple():
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec, 3)
+ assert len(Yh) == 3
+
+
+@skip_if_no_tf
+def test_simple_with_no_levels():
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec, 0)
+ assert len(Yh) == 0
+
+
+@skip_if_no_tf
+def test_simple_with_scale():
+ vec = np.random.rand(630)
+ Yl, Yh, Yscale = dtwavexfm(vec, 3, include_scale=True)
+ assert len(Yh) == 3
+ assert len(Yscale) == 3
+
+
+@skip_if_no_tf
+def test_simple_with_scale_and_no_levels():
+ vec = np.random.rand(630)
+ Yl, Yh, Yscale = dtwavexfm(vec, 0, include_scale=True)
+ assert len(Yh) == 0
+ assert len(Yscale) == 0
+
+
+@skip_if_no_tf
+def test_perfect_recon():
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec)
+ vec_recon = dtwaveifm(Yl, Yh)
+ assert np.max(np.abs(vec_recon - vec)) < TOLERANCE
+
+
+@skip_if_no_tf
+def test_simple_custom_filter():
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec, 4, biort('legall'), qshift('qshift_06'))
+ vec_recon = dtwaveifm(Yl, Yh, biort('legall'), qshift('qshift_06'))
+ assert np.max(np.abs(vec_recon - vec)) < TOLERANCE
+
+
+@skip_if_no_tf
+def test_single_level():
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec, 1)
+
+
+@skip_if_no_tf
+def test_non_multiple_of_two():
+ vec = np.random.rand(631)
+ with raises(ValueError):
+ Yl, Yh = dtwavexfm(vec, 1)
+
+
+@skip_if_no_tf
+def test_2d():
+ Yl, Yh = dtwavexfm(np.random.rand(10,10))
+
+
+@skip_if_no_tf
+def test_integer_input():
+ # Check that an integer input is correctly coerced into a floating point
+ # array
+ Yl, Yh = dtwavexfm([1,2,3,4])
+ assert np.any(Yl != 0)
+
+
+@skip_if_no_tf
+def test_integer_perfect_recon():
+ # Check that an integer input is correctly coerced into a floating point
+ # array and reconstructed
+ A = np.array([1,2,3,4], dtype=np.int32)
+ Yl, Yh = dtwavexfm(A)
+ B = dtwaveifm(Yl, Yh)
+ assert np.max(np.abs(A-B)) < 1e-12
+
+
+@skip_if_no_tf
+def test_float32_input():
+ # Check that an float32 input is correctly output as float32
+ Yl, Yh = dtwavexfm(np.array([1,2,3,4]).astype(np.float32))
+ assert np.issubsctype(Yl.dtype, np.float32)
+ assert np.all(list(np.issubsctype(x.dtype, np.complex64) for x in Yh))
+
+
+@skip_if_no_tf
+def test_reconstruct():
+ # Reconstruction up to tolerance
+ vec = np.random.rand(630)
+ Yl, Yh = dtwavexfm(vec)
+ vec_recon = dtwaveifm(Yl, Yh)
+ assert np.all(np.abs(vec_recon - vec) < TOLERANCE)
+
+
+@skip_if_no_tf
+def test_reconstruct_2d():
+ # Reconstruction up to tolerance
+ vec = np.random.rand(630, 20)
+ Yl, Yh = dtwavexfm(vec)
+ vec_recon = dtwaveifm(Yl, Yh)
+ assert np.all(np.abs(vec_recon - vec) < TOLERANCE)
+
+
+@skip_if_no_tf
+def test_float32_input_inv():
+ # Check that an float32 input is correctly output as float32
+ Yl, Yh = dtwavexfm(np.array([1, 2, 3, 4]).astype(np.float32))
+ assert np.issubsctype(Yl.dtype, np.float32)
+ assert np.all(list(np.issubsctype(x.dtype, np.complex64) for x in Yh))
+
+ recon = dtwaveifm(Yl, Yh)
+ assert np.issubsctype(recon.dtype, np.float32)
+
+
+@skip_if_no_tf
+def test_numpy_in():
+ X = np.random.randn(100,100)
+ f = Transform1d()
+ p = f.forward(X)
+ f1 = Transform1d_np()
+ p1 = f1.forward(X)
+ np.testing.assert_array_almost_equal(
+ p.lowpass, p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+
+ X = np.random.randn(100,100)
+ p = f.forward(X, include_scale=True)
+ p1 = f1.forward(X, include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass, p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_numpy_in_batch():
+ X = np.random.randn(5,100,100)
+
+ f = Transform1d()
+ p = f.forward_channels(X, include_scale=True)
+ f1 = Transform1d_np()
+ for i in range(5):
+ p1 = f1.forward(X[i], include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass[i], p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(
+ x[i], y, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(
+ x[i], y, decimal=PRECISION_DECIMAL)
+
+
+
+# Test end to end with numpy inputs
+@skip_if_no_tf
+def test_1d_input():
+ f = Transform1d()
+ X = np.random.randn(100,)
+ p = f.forward(X)
+ x = f.inverse(p)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_2d_input():
+ f = Transform1d()
+ X = np.random.randn(100,100)
+
+ p = f.forward(X)
+ x = f.inverse(p)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_3d_input():
+ f = Transform1d()
+ X = np.random.randn(5,100,100)
+
+ p = f.forward_channels(X)
+ x = f.inverse_channels(p)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+# Test end to end with tf inputs
+@skip_if_no_tf
+def test_2d_input_ph():
+ xfm = Transform1d()
+ X = np.random.randn(100,)
+ X_p = tf.placeholder(tf.float32, [100,])
+ p = xfm.forward(X_p)
+ x = xfm.inverse(p)
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:X}), decimal=PRECISION_DECIMAL)
+
+ X = np.random.randn(100,1)
+ X_p = tf.placeholder(tf.float32, [None, 100,1])
+ p = xfm.forward_channels(X_p)
+ x = xfm.inverse_channels(p)
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:[X]})[0], decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_return_type():
+ xfm = Transform1d()
+ X = np.random.randn(100,100)
+ p = xfm.forward(X)
+ x = xfm.inverse(p)
+ assert x.dtype in np_dtypes
+ X = tf.placeholder(tf.float32, [100,100])
+ p = xfm.forward(X)
+ x = xfm.inverse(p)
+ assert x.dtype in tf_dtypes
+ X = np.random.randn(5,100,100)
+ p = xfm.forward_channels(X)
+ x = xfm.inverse_channels(p)
+ assert x.dtype in np_dtypes
+ X = tf.placeholder(tf.float32, [None, 100,100])
+ p = xfm.forward_channels(X)
+ x = xfm.inverse_channels(p)
+ assert x.dtype in tf_dtypes
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input,biort,qshift", [
+ (datasets.mandrill(),'antonini','qshift_a'),
+ (datasets.mandrill()[100:400,40:450],'legall','qshift_a'),
+ (datasets.mandrill(),'near_sym_a','qshift_c'),
+ (datasets.mandrill()[100:374,30:322],'near_sym_b','qshift_d'),
+])
+def test_results_match(test_input, biort, qshift):
+ """
+ Compare forward transform with numpy forward transform for mandrill image
+ """
+ im = test_input
+ f_np = Transform1d_np(biort=biort,qshift=qshift)
+ p_np = f_np.forward(im, include_scale=True)
+
+ f_tf = Transform1d(biort=biort,qshift=qshift)
+ p_tf = f_tf.forward(im, include_scale=True)
+
+ np.testing.assert_array_almost_equal(
+ p_np.lowpass, p_tf.lowpass, decimal=PRECISION_DECIMAL)
+ [np.testing.assert_array_almost_equal(
+ h_np, h_tf, decimal=PRECISION_DECIMAL) for h_np, h_tf in
+ zip(p_np.highpasses, p_tf.highpasses)]
+ [np.testing.assert_array_almost_equal(
+ s_np, s_tf, decimal=PRECISION_DECIMAL) for s_np, s_tf in
+ zip(p_np.scales, p_tf.scales)]
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input,biort,qshift", [
+ (datasets.mandrill(),'antonini','qshift_c'),
+ (datasets.mandrill()[99:411,44:460],'near_sym_a','qshift_a'),
+ (datasets.mandrill(),'legall','qshift_c'),
+ (datasets.mandrill()[100:378,20:322],'near_sym_b','qshift_06'),
+])
+def test_results_match_inverse(test_input,biort,qshift):
+ im = test_input
+ f_np = Transform1d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np)
+
+ # Use a zero input and the fwd transform to get the shape of
+ # the pyramid easily
+ f_tf = Transform1d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(im, nlevels=4, include_scale=True)
+
+ # Create ops for the inverse transform
+ X_tf = f_tf.inverse(p_tf)
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("biort,qshift,gain_mask", [
+ ('antonini','qshift_c',stats.bernoulli(0.8).rvs(size=(4))),
+ ('near_sym_a','qshift_a',stats.bernoulli(0.8).rvs(size=(4))),
+ ('legall','qshift_c',stats.bernoulli(0.8).rvs(size=(4))),
+ ('near_sym_b','qshift_06',stats.bernoulli(0.8).rvs(size=(4))),
+])
+def test_results_match_invmask(biort,qshift,gain_mask):
+ im = mandrill
+
+ f_np = Transform1d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np, gain_mask)
+
+ f_tf = Transform1d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(im, nlevels=4, include_scale=True)
+ X_tf = f_tf.inverse(p_tf, gain_mask)
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input, biort, qshift", [
+ (datasets.mandrill(), 'antonini', 'qshift_06'),
+ (datasets.mandrill()[99:411, 44:460], 'near_sym_b', 'qshift_a'),
+ (datasets.mandrill(), 'near_sym_b', 'qshift_c'),
+ (datasets.mandrill()[100:378, 20:322], 'near_sym_a', 'qshift_a'),
+])
+def test_results_match_endtoend(test_input, biort, qshift):
+ im = test_input
+ f_np = Transform1d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np)
+
+ in_p = tf.placeholder(tf.float32, [im.shape[0], im.shape[1]])
+ f_tf = Transform1d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(in_p, nlevels=4, include_scale=True)
+ X = f_tf.inverse(p_tf)
+ with tf.Session() as sess:
+ X_tf = sess.run(X, feed_dict={in_p: im})
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfTransform2d.py b/tests/test_tfTransform2d.py
new file mode 100644
index 0000000..9907e92
--- /dev/null
+++ b/tests/test_tfTransform2d.py
@@ -0,0 +1,603 @@
+import os
+import pytest
+
+from pytest import raises
+
+import numpy as np
+from importlib import import_module
+import dtcwt
+from dtcwt.numpy import Transform2d as Transform2d_np
+from dtcwt.coeffs import biort, qshift
+from dtcwt.utils import unpack
+from scipy import stats
+import tests.datasets as datasets
+from .util import skip_if_no_tf
+import time
+
+PRECISION_DECIMAL = 5
+
+
+@skip_if_no_tf
+def setup():
+ # Import some tf only dependencies
+ global mandrill, Transform2d, Pyramid
+ global tf, np_dtypes, tf_dtypes, dtwavexfm2, dtwaveifm2
+ # Import the tensorflow modules
+ tf = import_module('tensorflow')
+ dtcwt.push_backend('tf')
+ Transform2d = getattr(dtcwt, 'Transform2d')
+ Pyramid = getattr(dtcwt, 'Pyramid')
+ compat = import_module('dtcwt.compat')
+ dtwavexfm2 = getattr(compat, 'dtwavexfm2')
+ dtwaveifm2 = getattr(compat, 'dtwaveifm2')
+ import dtcwt.tf.transform2d as transform2d
+ np_dtypes = getattr(transform2d, 'np_dtypes')
+ tf_dtypes = getattr(transform2d, 'tf_dtypes')
+
+ mandrill = datasets.mandrill()
+ # Make sure we run tests on cpu rather than gpus
+ os.environ["CUDA_VISIBLE_DEVICES"] = ""
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+
+
+@skip_if_no_tf
+def test_simple():
+ Yl, Yh = dtwavexfm2(mandrill)
+
+
+@skip_if_no_tf
+def test_specific_wavelet():
+ Yl, Yh = dtwavexfm2(mandrill, biort=biort('antonini'),
+ qshift=qshift('qshift_06'))
+
+
+@skip_if_no_tf
+def test_1d():
+ Yl, Yh = dtwavexfm2(mandrill[0,:])
+
+
+@skip_if_no_tf
+@pytest.mark.skip(reason='Not currently implemented')
+def test_3d():
+ with raises(ValueError):
+ Yl, Yh = dtwavexfm2(np.dstack((mandrill, mandrill)))
+
+
+@skip_if_no_tf
+def test_simple_w_scale():
+ Yl, Yh, Yscale = dtwavexfm2(mandrill, include_scale=True)
+ assert len(Yscale) > 0
+ for x in Yscale:
+ assert x is not None
+
+
+@skip_if_no_tf
+def test_odd_rows():
+ Yl, Yh = dtwavexfm2(mandrill[:509,:])
+
+
+@skip_if_no_tf
+def test_odd_rows_w_scale():
+ Yl, Yh, Yscale = dtwavexfm2(mandrill[:509,:], include_scale=True)
+
+
+@skip_if_no_tf
+def test_odd_cols():
+ Yl, Yh = dtwavexfm2(mandrill[:,:509])
+
+
+@skip_if_no_tf
+def test_odd_cols_w_scale():
+ Yl, Yh, Yscale = dtwavexfm2(mandrill[:509,:509], include_scale=True)
+
+
+@skip_if_no_tf
+def test_odd_rows_and_cols():
+ Yl, Yh = dtwavexfm2(mandrill[:,:509])
+
+
+@skip_if_no_tf
+def test_odd_rows_and_cols_w_scale():
+ Yl, Yh, Yscale = dtwavexfm2(mandrill[:509,:509], include_scale=True)
+
+
+@skip_if_no_tf
+def test_rot_symm_modified():
+ # This test only checks there is no error running these functions,
+ # not that they work
+ Yl, Yh, Yscale = dtwavexfm2(mandrill, biort='near_sym_b_bp',
+ qshift='qshift_b_bp', include_scale=True)
+ dtwaveifm2(Yl, Yh, biort='near_sym_b_bp', qshift='qshift_b_bp')
+
+
+@skip_if_no_tf
+def test_0_levels():
+ Yl, Yh = dtwavexfm2(mandrill, nlevels=0)
+ np.testing.assert_array_almost_equal(Yl, mandrill, PRECISION_DECIMAL)
+ assert len(Yh) == 0
+
+
+@skip_if_no_tf
+def test_0_levels_w_scale():
+ Yl, Yh, Yscale = dtwavexfm2(mandrill, nlevels=0, include_scale=True)
+ np.testing.assert_array_almost_equal(Yl, mandrill, PRECISION_DECIMAL)
+ assert len(Yh) == 0
+ assert len(Yscale) == 0
+
+
+@skip_if_no_tf
+def test_integer_input():
+ # Check that an integer input is correctly coerced into a floating point
+ # array
+ Yl, Yh = dtwavexfm2([[1,2,3,4], [1,2,3,4]])
+ assert np.any(Yl != 0)
+
+
+@skip_if_no_tf
+def test_integer_perfect_recon():
+ # Check that an integer input is correctly coerced into a floating point
+ # array and reconstructed
+ A = np.array([[1,2,3,4], [5,6,7,8]], dtype=np.int32)
+ Yl, Yh = dtwavexfm2(A)
+ B = dtwaveifm2(Yl, Yh)
+ assert np.max(np.abs(A - B)) < 1e-5
+
+
+@skip_if_no_tf
+def test_mandrill_perfect_recon():
+ # Check that an integer input is correctly coerced into a floating point
+ # array and reconstructed
+ Yl, Yh = dtwavexfm2(mandrill)
+ B = dtwaveifm2(Yl, Yh)
+ assert np.max(np.abs(mandrill - B)) < 1e-5
+
+
+@skip_if_no_tf
+def test_float32_input():
+ # Check that an float32 input is correctly output as float32
+ Yl, Yh = dtwavexfm2(mandrill.astype(np.float32))
+ assert np.issubsctype(Yl.dtype, np.float32)
+ assert np.all(list(np.issubsctype(x.dtype, np.complex64) for x in Yh))
+
+
+@skip_if_no_tf
+def test_numpy_in():
+ X = np.random.randn(100,100)
+ f = Transform2d()
+ p = f.forward(X)
+ f1 = Transform2d_np()
+ p1 = f1.forward(X)
+ np.testing.assert_array_almost_equal(
+ p.lowpass, p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+
+ X = np.random.randn(100,100)
+ p = f.forward(X, include_scale=True)
+ p1 = f1.forward(X, include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass, p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(x,y,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhw"), ("chw"), ("hwn"), ("hwc")
+])
+def test_numpy_in_batch(data_format):
+ if data_format == "nhw" or data_format == "chw":
+ X = np.random.randn(5,100,100)
+ else:
+ X = np.random.randn(100,100,5)
+
+ f = Transform2d()
+ p = f.forward_channels(X, data_format=data_format, include_scale=True)
+ f1 = Transform2d_np()
+ for i in range(5):
+ if data_format == "nhw" or data_format == "chw":
+ p1 = f1.forward(X[i], include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass[i], p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(
+ x[i], y, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(
+ x[i], y, decimal=PRECISION_DECIMAL)
+ else:
+ p1 = f1.forward(X[:,:,i], include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass[:,:,i], p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(
+ x[:,:,i], y, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(
+ x[:,:,i], y, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhwc"), ("nchw")
+])
+def test_numpy_batch_ch(data_format):
+ if data_format == "nhwc":
+ X = np.random.randn(5,100,100,4)
+ else:
+ X = np.random.randn(5,4,100,100)
+
+ f = Transform2d()
+ p = f.forward_channels(X, data_format=data_format, include_scale=True)
+ f1 = Transform2d_np()
+ for i in range(5):
+ for j in range(4):
+ if data_format == "nhwc":
+ p1 = f1.forward(X[i,:,:,j], include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass[i,:,:,j], p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(
+ x[i,:,:,j], y, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(
+ x[i,:,:,j], y, decimal=PRECISION_DECIMAL)
+ else:
+ p1 = f1.forward(X[i,j], include_scale=True)
+ np.testing.assert_array_almost_equal(
+ p.lowpass[i,j], p1.lowpass, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.highpasses, p1.highpasses):
+ np.testing.assert_array_almost_equal(
+ x[i,j], y, decimal=PRECISION_DECIMAL)
+ for x,y in zip(p.scales, p1.scales):
+ np.testing.assert_array_almost_equal(
+ x[i,j], y, decimal=PRECISION_DECIMAL)
+
+
+# Test end to end with numpy inputs
+@skip_if_no_tf
+def test_2d_input():
+ f = Transform2d()
+ X = np.random.randn(100,100)
+ p = f.forward(X)
+ x = f.inverse(p)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhw"), ("hwn")
+])
+def test_3d_input(data_format):
+ f = Transform2d()
+ if data_format == "nhw":
+ X = np.random.randn(5,100,100)
+ else:
+ X = np.random.randn(100,100,5)
+
+ p = f.forward_channels(X,data_format=data_format)
+ x = f.inverse_channels(p,data_format=data_format)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhwc"), ("nchw")
+])
+def test_4d_input(data_format):
+ f = Transform2d()
+ if data_format == "nhwc":
+ X = np.random.randn(5,100,100,4)
+ else:
+ X = np.random.randn(5,4,100,100)
+ p = f.forward_channels(X,data_format=data_format)
+ x = f.inverse_channels(p,data_format=data_format)
+ np.testing.assert_array_almost_equal(X,x,decimal=PRECISION_DECIMAL)
+
+
+# Test end to end with tf inputs
+@skip_if_no_tf
+def test_2d_input_ph():
+ xfm = Transform2d()
+ X = np.random.randn(100,100)
+ X_p = tf.placeholder(tf.float32, [100,100])
+ p = xfm.forward(X_p)
+ x = xfm.inverse(p)
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:X}), decimal=PRECISION_DECIMAL)
+
+ X_p = tf.placeholder(tf.float32, [None, 100,100])
+ p = xfm.forward_channels(X_p,data_format="nhw")
+ x = xfm.inverse_channels(p,data_format="nhw")
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:[X]})[0], decimal=PRECISION_DECIMAL)
+
+
+# Test end to end with tf inputs
+@skip_if_no_tf
+def test_3d_input_ph():
+ xfm = Transform2d()
+ X = np.random.randn(5,100,100)
+ X_p = tf.placeholder(tf.float32, [None,100,100])
+ p = xfm.forward_channels(X_p,data_format="nhw")
+ x = xfm.inverse_channels(p,data_format="nhw")
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:X}), decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_4d_input_ph():
+ xfm = Transform2d()
+ X = np.random.randn(5,100,100,4)
+ X_p = tf.placeholder(tf.float32, [None,100,100,4])
+ p = xfm.forward_channels(X_p,data_format="nhwc")
+ x = xfm.inverse_channels(p,data_format="nhwc")
+ with tf.Session() as sess:
+ sess.run(tf.global_variables_initializer())
+ np.testing.assert_array_almost_equal(
+ X, sess.run(x, {X_p:X}), decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+def test_return_type():
+ xfm = Transform2d()
+ X = np.random.randn(100,100)
+ p = xfm.forward(X)
+ x = xfm.inverse(p)
+ assert x.dtype in np_dtypes
+ X = tf.placeholder(tf.float32, [100,100])
+ p = xfm.forward(X)
+ x = xfm.inverse(p)
+ assert x.dtype in tf_dtypes
+ xfm = Transform2d()
+ X = np.random.randn(5,100,100,4)
+ p = xfm.forward_channels(X,data_format="nhwc")
+ x = xfm.inverse_channels(p,data_format="nhwc")
+ assert x.dtype in np_dtypes
+ xfm = Transform2d()
+ X = tf.placeholder(tf.float32, [None, 100,100,4])
+ p = xfm.forward_channels(X,data_format="nhwc")
+ x = xfm.inverse_channels(p,data_format="nhwc")
+ assert x.dtype in tf_dtypes
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input,biort,qshift", [
+ (datasets.mandrill(),'antonini','qshift_a'),
+ (datasets.mandrill()[100:400,40:450],'legall','qshift_a'),
+ (datasets.mandrill(),'near_sym_a','qshift_c'),
+ (datasets.mandrill()[100:375,30:322],'near_sym_b','qshift_d'),
+ (datasets.mandrill(),'near_sym_b_bp', 'qshift_b_bp')
+])
+def test_results_match(test_input, biort, qshift):
+ """
+ Compare forward transform with numpy forward transform for mandrill image
+ """
+ im = test_input
+ f_np = Transform2d_np(biort=biort,qshift=qshift)
+ p_np = f_np.forward(im, include_scale=True)
+
+ f_tf = Transform2d(biort=biort,qshift=qshift)
+ p_tf = f_tf.forward(im, include_scale=True)
+
+ np.testing.assert_array_almost_equal(
+ p_np.lowpass, p_tf.lowpass, decimal=PRECISION_DECIMAL)
+ [np.testing.assert_array_almost_equal(
+ h_np, h_tf, decimal=PRECISION_DECIMAL) for h_np, h_tf in
+ zip(p_np.highpasses, p_tf.highpasses)]
+ [np.testing.assert_array_almost_equal(
+ s_np, s_tf, decimal=PRECISION_DECIMAL) for s_np, s_tf in
+ zip(p_np.scales, p_tf.scales)]
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input,biort,qshift", [
+ (datasets.mandrill(),'antonini','qshift_c'),
+ (datasets.mandrill()[100:411,44:460],'near_sym_a','qshift_a'),
+ (datasets.mandrill(),'legall','qshift_c'),
+ (datasets.mandrill()[100:378,20:322],'near_sym_b','qshift_06'),
+ (datasets.mandrill(),'near_sym_b_bp', 'qshift_b_bp')
+])
+def test_results_match_inverse(test_input,biort,qshift):
+ im = test_input
+ f_np = Transform2d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np)
+
+ # Use a zero input and the fwd transform to get the shape of
+ # the pyramid easily
+ f_tf = Transform2d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(im, nlevels=4, include_scale=True)
+
+ # Create ops for the inverse transform
+ X_tf = f_tf.inverse(p_tf)
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("biort,qshift,gain_mask", [
+ ('antonini','qshift_c',stats.bernoulli(0.8).rvs(size=(6,4))),
+ ('near_sym_a','qshift_a',stats.bernoulli(0.8).rvs(size=(6,4))),
+ ('legall','qshift_c',stats.bernoulli(0.8).rvs(size=(6,4))),
+ ('near_sym_b','qshift_06',stats.bernoulli(0.8).rvs(size=(6,4))),
+ ('near_sym_b_bp', 'qshift_b_bp',stats.bernoulli(0.8).rvs(size=(6,4)))
+])
+def test_results_match_invmask(biort,qshift,gain_mask):
+ im = mandrill
+
+ f_np = Transform2d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np, gain_mask)
+
+ f_tf = Transform2d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(im, nlevels=4, include_scale=True)
+ X_tf = f_tf.inverse(p_tf, gain_mask)
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("test_input, biort, qshift", [
+ (datasets.mandrill(), 'antonini', 'qshift_06'),
+ (datasets.mandrill()[100:411, 44:460], 'near_sym_b', 'qshift_a'),
+ (datasets.mandrill(), 'near_sym_b', 'qshift_c'),
+ (datasets.mandrill()[100:378, 20:322], 'near_sym_a', 'qshift_a'),
+ (datasets.mandrill(), 'near_sym_b_bp', 'qshift_b_bp')
+])
+def test_results_match_endtoend(test_input, biort, qshift):
+ im = test_input
+ f_np = Transform2d_np(biort=biort, qshift=qshift)
+ p_np = f_np.forward(im, nlevels=4, include_scale=True)
+ X_np = f_np.inverse(p_np)
+
+ in_p = tf.placeholder(tf.float32, [im.shape[0], im.shape[1]])
+ f_tf = Transform2d(biort=biort, qshift=qshift)
+ p_tf = f_tf.forward(in_p, nlevels=4, include_scale=True)
+ X = f_tf.inverse(p_tf)
+ with tf.Session() as sess:
+ X_tf = sess.run(X, feed_dict={in_p: im})
+
+ np.testing.assert_array_almost_equal(
+ X_np, X_tf, decimal=PRECISION_DECIMAL)
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhwc"),
+ ("nchw"),
+])
+def test_forward_channels(data_format):
+ batch = 5
+ c = 3
+ nlevels = 3
+ sess = tf.Session()
+
+ if data_format == "nhwc":
+ ims = np.random.randn(batch, 100, 100, c)
+ in_p = tf.placeholder(tf.float32, [None, 100, 100, c])
+ else:
+ ims = np.random.randn(batch, c, 100, 100)
+ in_p = tf.placeholder(tf.float32, [None, c, 100, 100])
+
+ # Transform a set of images with forward_channels
+ f_tf = Transform2d(biort='near_sym_b_bp', qshift='qshift_b_bp')
+ start = time.time()
+ Yl, Yh, Yscale = unpack(
+ f_tf.forward_channels(in_p, nlevels=nlevels, data_format=data_format,
+ include_scale=True), 'tf')
+
+ Yl, Yh, Yscale = sess.run([Yl, Yh, Yscale], {in_p: ims})
+ print("That took {:.2f}s".format(time.time() - start))
+
+ # Now do it channel by channel
+ in_p2 = tf.placeholder(tf.float32, [None, 100, 100])
+ p_tf = f_tf.forward_channels(in_p2, data_format="nhw", nlevels=nlevels,
+ include_scale=True)
+ for i in range(c):
+ if data_format == "nhwc":
+ Yl1, Yh1, Yscale1 = sess.run([p_tf.lowpass_op,
+ p_tf.highpasses_ops,
+ p_tf.scales_ops],
+ {in_p2: ims[:,:,:,i]})
+ np.testing.assert_array_almost_equal(
+ Yl[:,:,:,i], Yl1, decimal=PRECISION_DECIMAL)
+ for j in range(nlevels):
+ np.testing.assert_array_almost_equal(
+ Yh[j][:,:,:,i,:], Yh1[j], decimal=PRECISION_DECIMAL)
+ np.testing.assert_array_almost_equal(
+ Yscale[j][:,:,:,i], Yscale1[j], decimal=PRECISION_DECIMAL)
+ else:
+ Yl1, Yh1, Yscale1 = sess.run([p_tf.lowpass_op,
+ p_tf.highpasses_ops,
+ p_tf.scales_ops],
+ {in_p2: ims[:,i]})
+ np.testing.assert_array_almost_equal(
+ Yl[:,i], Yl1, decimal=PRECISION_DECIMAL)
+ for j in range(nlevels):
+ np.testing.assert_array_almost_equal(
+ Yh[j][:,i], Yh1[j], decimal=PRECISION_DECIMAL)
+ np.testing.assert_array_almost_equal(
+ Yscale[j][:,i], Yscale1[j], decimal=PRECISION_DECIMAL)
+ sess.close()
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("data_format", [
+ ("nhwc"),
+ ("nchw"),
+])
+def test_inverse_channels(data_format):
+ batch = 5
+ c = 3
+ nlevels = 3
+ sess = tf.Session()
+
+ # Create the tensors of the right shape by calling the forward function
+ if data_format == "nhwc":
+ ims = np.random.randn(batch, 100, 100, c)
+ in_p = tf.placeholder(tf.float32, [None, 100, 100, c])
+ f_tf = Transform2d(biort='near_sym_b_bp', qshift='qshift_b_bp')
+ Yl, Yh = unpack(
+ f_tf.forward_channels(in_p, nlevels=nlevels,
+ data_format=data_format), 'tf')
+ else:
+ ims = np.random.randn(batch, c, 100, 100)
+ in_p = tf.placeholder(tf.float32, [None, c, 100, 100])
+ f_tf = Transform2d(biort='near_sym_b_bp', qshift='qshift_b_bp')
+ Yl, Yh = unpack(f_tf.forward_channels(
+ in_p, nlevels=nlevels, data_format=data_format), 'tf')
+
+ # Call the inverse_channels function
+ start = time.time()
+ X = f_tf.inverse_channels(Pyramid(Yl, Yh), data_format=data_format)
+ X, Yl, Yh = sess.run([X, Yl, Yh], {in_p: ims})
+ print("That took {:.2f}s".format(time.time() - start))
+
+ # Now do it channel by channel
+ in_p2 = tf.zeros((batch, 100, 100), tf.float32)
+ p_tf = f_tf.forward_channels(in_p2, nlevels=nlevels, data_format="nhw",
+ include_scale=False)
+ X_t = f_tf.inverse_channels(p_tf,data_format="nhw")
+ for i in range(c):
+ Yh1 = []
+ if data_format == "nhwc":
+ Yl1 = Yl[:,:,:,i]
+ for j in range(nlevels):
+ Yh1.append(Yh[j][:,:,:,i])
+ else:
+ Yl1 = Yl[:,i]
+ for j in range(nlevels):
+ Yh1.append(Yh[j][:,i])
+
+ # Use the eval_inv function to feed the data into the right variables
+ sess.run(tf.global_variables_initializer())
+ X1 = sess.run(X_t, {p_tf.lowpass_op: Yl1, p_tf.highpasses_ops: Yh1})
+
+ if data_format == "nhwc":
+ np.testing.assert_array_almost_equal(
+ X[:,:,:,i], X1, decimal=PRECISION_DECIMAL)
+ else:
+ np.testing.assert_array_almost_equal(
+ X[:,i], X1, decimal=PRECISION_DECIMAL)
+
+ sess.close()
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfcoldfilt.py b/tests/test_tfcoldfilt.py
new file mode 100644
index 0000000..096038c
--- /dev/null
+++ b/tests/test_tfcoldfilt.py
@@ -0,0 +1,102 @@
+from pytest import raises
+
+import numpy as np
+from dtcwt.coeffs import qshift
+from dtcwt.numpy.lowlevel import coldfilt as np_coldfilt
+from importlib import import_module
+
+from tests.util import skip_if_no_tf
+import tests.datasets as datasets
+
+
+@skip_if_no_tf
+def test_setup():
+ global mandrill, mandrill_t, tf, coldfilt
+ tf = import_module('tensorflow')
+ lowlevel = import_module('dtcwt.tf.lowlevel')
+ coldfilt = getattr(lowlevel, 'coldfilt')
+
+ mandrill = datasets.mandrill()
+ mandrill_t = tf.expand_dims(tf.constant(mandrill, dtype=tf.float32),axis=0)
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+ assert mandrill_t.get_shape() == (1, 512, 512)
+
+
+@skip_if_no_tf
+def test_odd_filter():
+ with raises(ValueError):
+ coldfilt(mandrill_t, (-1,2,-1), (-1,2,1))
+
+
+@skip_if_no_tf
+def test_different_size():
+ with raises(ValueError):
+ coldfilt(mandrill_t, (-0.5,-1,2,1,0.5), (-1,2,-1))
+
+
+@skip_if_no_tf
+def test_bad_input_size():
+ with raises(ValueError):
+ coldfilt(mandrill_t[:,:511,:], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_good_input_size():
+ coldfilt(mandrill_t[:,:,:511], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_good_input_size_non_orthogonal():
+ coldfilt(mandrill_t[:,:,:511], (1,1), (1,1))
+
+
+@skip_if_no_tf
+def test_output_size():
+ y_op = coldfilt(mandrill_t, (-1,1), (1,-1))
+ assert y_op.shape[1:] == (mandrill.shape[0]/2, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_equal_small_in():
+ ha = qshift('qshift_b')[0]
+ hb = qshift('qshift_b')[1]
+ im = mandrill[0:4,0:4]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_coldfilt(im, ha, hb)
+ y_op = coldfilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift1():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ ref = np_coldfilt(mandrill, ha, hb)
+ y_op = coldfilt(mandrill_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift2():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ im = mandrill[:508, :502]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_coldfilt(im, ha, hb)
+ y_op = coldfilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfcolfilter.py b/tests/test_tfcolfilter.py
new file mode 100644
index 0000000..6a3a9f3
--- /dev/null
+++ b/tests/test_tfcolfilter.py
@@ -0,0 +1,121 @@
+import numpy as np
+from dtcwt.coeffs import biort, qshift
+from dtcwt.numpy.lowlevel import colfilter as np_colfilter
+from importlib import import_module
+
+from tests.util import skip_if_no_tf
+import tests.datasets as datasets
+
+
+@skip_if_no_tf
+def test_setup():
+ global mandrill, mandrill_t, tf, colfilter
+ tf = import_module('tensorflow')
+ lowlevel = import_module('dtcwt.tf.lowlevel')
+ colfilter = getattr(lowlevel, 'colfilter')
+
+ mandrill = datasets.mandrill()
+ mandrill_t = tf.expand_dims(tf.constant(mandrill, dtype=tf.float32),axis=0)
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+ assert mandrill_t.get_shape() == (1, 512, 512)
+
+
+@skip_if_no_tf
+def test_odd_size():
+ y_op = colfilter(mandrill_t, [-1,2,-1])
+ assert y_op.get_shape()[1:] == mandrill.shape
+
+
+@skip_if_no_tf
+def test_even_size():
+ y_op = colfilter(mandrill_t, [-1,-1])
+ assert y_op.get_shape()[1:] == (mandrill.shape[0]+1, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_qshift():
+ h = qshift('qshift_a')[0]
+ y_op = colfilter(mandrill_t, h)
+ assert y_op.get_shape()[1:] == (mandrill.shape[0]+1, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_biort():
+ h = biort('antonini')[0]
+ y_op = colfilter(mandrill_t, h)
+ assert y_op.get_shape()[1:] == mandrill.shape
+
+
+@skip_if_no_tf
+def test_even_size_batch():
+ zero_t = tf.zeros([1, mandrill.shape[0], mandrill.shape[1]], tf.float32)
+ y_op = colfilter(zero_t, [-1,1])
+ assert y_op.get_shape()[1:] == (mandrill.shape[0]+1, mandrill.shape[1])
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ assert not np.any(y[:] != 0.0)
+
+
+@skip_if_no_tf
+def test_equal_small_in():
+ h = qshift('qshift_b')[0]
+ im = mandrill[0:4,0:4]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im, h)
+ y_op = colfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_biort1():
+ h = biort('near_sym_b')[0]
+ ref = np_colfilter(mandrill, h)
+ y_op = colfilter(mandrill_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_biort2():
+ h = biort('near_sym_b')[0]
+ im = mandrill[52:407, 30:401]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im, h)
+ y_op = colfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift1():
+ h = qshift('qshift_c')[0]
+ ref = np_colfilter(mandrill, h)
+ y_op = colfilter(mandrill_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift2():
+ h = qshift('qshift_c')[0]
+ im = mandrill[52:407, 30:401]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im, h)
+ y_op = colfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfcolifilt.py b/tests/test_tfcolifilt.py
new file mode 100644
index 0000000..3cc57c8
--- /dev/null
+++ b/tests/test_tfcolifilt.py
@@ -0,0 +1,124 @@
+import pytest
+from pytest import raises
+
+import numpy as np
+from dtcwt.coeffs import qshift
+from dtcwt.numpy.lowlevel import colifilt as np_colifilt
+from importlib import import_module
+
+from tests.util import skip_if_no_tf
+import tests.datasets as datasets
+
+
+@skip_if_no_tf
+def test_setup():
+ global mandrill, mandrill_t, tf, colifilt
+ tf = import_module('tensorflow')
+ lowlevel = import_module('dtcwt.tf.lowlevel')
+ colifilt = getattr(lowlevel, 'colifilt')
+
+ mandrill = datasets.mandrill()
+ mandrill_t = tf.expand_dims(tf.constant(mandrill, dtype=tf.float32),axis=0)
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+ assert mandrill_t.get_shape() == (1, 512, 512)
+
+
+@skip_if_no_tf
+def test_odd_filter():
+ with raises(ValueError):
+ colifilt(mandrill_t, (-1,2,-1), (-1,2,1))
+
+
+@skip_if_no_tf
+def test_different_size_h():
+ with raises(ValueError):
+ colifilt(mandrill_t, (-1,2,1), (-0.5,-1,2,-1,0.5))
+
+
+@skip_if_no_tf
+def test_zero_input():
+ Y = colifilt(mandrill_t, (-1,1), (1,-1))
+ with tf.Session() as sess:
+ y = sess.run(Y, {mandrill_t : [np.zeros_like(mandrill)]})[0]
+ assert np.all(y[:0] == 0)
+
+
+@skip_if_no_tf
+def test_bad_input_size():
+ with raises(ValueError):
+ colifilt(mandrill_t[:,:511,:], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_good_input_size():
+ colifilt(mandrill_t[:,:,:511], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_output_size():
+ Y = colifilt(mandrill_t, (-1,1), (1,-1))
+ assert Y.shape[1:] == (mandrill.shape[0]*2, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_non_orthogonal_input():
+ Y = colifilt(mandrill_t, (1,1), (1,1))
+ assert Y.shape[1:] == (mandrill.shape[0]*2, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_output_size_non_mult_4():
+ Y = colifilt(mandrill_t, (-1,0,0,1), (1,0,0,-1))
+ assert Y.shape[1:] == (mandrill.shape[0]*2, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_non_orthogonal_input_non_mult_4():
+ Y = colifilt(mandrill_t, (1,0,0,1), (1,0,0,1))
+ assert Y.shape[1:] == (mandrill.shape[0]*2, mandrill.shape[1])
+
+
+@skip_if_no_tf
+def test_equal_small_in():
+ ha = qshift('qshift_b')[0]
+ hb = qshift('qshift_b')[1]
+ im = mandrill[0:4,0:4]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colifilt(im, ha, hb)
+ y_op = colifilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift1():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ ref = np_colifilt(mandrill, ha, hb)
+ y_op = colifilt(mandrill_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift2():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ im = mandrill[:508, :502]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colifilt(im, ha, hb)
+ y_op = colifilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfinputshapes.py b/tests/test_tfinputshapes.py
new file mode 100644
index 0000000..54e11b0
--- /dev/null
+++ b/tests/test_tfinputshapes.py
@@ -0,0 +1,173 @@
+import os
+import pytest
+
+from importlib import import_module
+
+from .util import skip_if_no_tf
+from dtcwt.utils import unpack
+import dtcwt
+import dtcwt.compat
+
+PRECISION_DECIMAL = 5
+
+
+@skip_if_no_tf
+def setup():
+ global tf
+ tf = import_module('tensorflow')
+ dtcwt.push_backend('tf')
+
+ # Make sure we run tests on cpu rather than gpus
+ os.environ["CUDA_VISIBLE_DEVICES"] = ""
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("nlevels, include_scale", [
+ (2,False),
+ (2,True),
+ (4,False),
+ (3,True)
+])
+def test_scales(nlevels, include_scale):
+ in_ = tf.placeholder(tf.float32, [512, 512])
+ t = dtcwt.Transform2d()
+
+ p = t.forward(in_, nlevels, include_scale)
+
+ # At level 1, the lowpass output will be the same size as the input. At
+ # levels above that, it will be half the size per level
+ extent = 512 * 2**(-(nlevels-1))
+ assert p.lowpass_op.get_shape().as_list() == [extent, extent]
+ assert p.lowpass_op.dtype == tf.float32
+
+ for i in range(nlevels):
+ extent = 512 * 2**(-(i+1))
+ assert (p.highpasses_ops[i].get_shape().as_list() ==
+ [extent, extent, 6])
+ assert (p.highpasses_ops[i].dtype ==
+ tf.complex64)
+ if include_scale:
+ assert (p.scales_ops[i].get_shape().as_list() ==
+ [2*extent, 2*extent])
+ assert p.scales_ops[i].dtype == tf.float32
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("nlevels, include_scale", [
+ (2,False),
+ (2,True),
+ (4,False),
+ (3,True)
+])
+def test_2d_input_tuple(nlevels, include_scale):
+ in_ = tf.placeholder(tf.float32, [512, 512])
+ t = dtcwt.Transform2d()
+ if include_scale:
+ Yl, Yh, Yscale = unpack(t.forward(in_, nlevels, include_scale), 'tf')
+ else:
+ Yl, Yh = unpack(t.forward(in_, nlevels, include_scale), 'tf')
+
+ # At level 1, the lowpass output will be the same size as the input. At
+ # levels above that, it will be half the size per level
+ extent = 512 * 2**(-(nlevels-1))
+ assert Yl.get_shape().as_list() == [extent, extent]
+ assert Yl.dtype == tf.float32
+
+ for i in range(nlevels):
+ extent = 512 * 2**(-(i+1))
+ assert Yh[i].get_shape().as_list() == [extent, extent, 6]
+ assert Yh[i].dtype == tf.complex64
+ if include_scale:
+ assert Yscale[i].get_shape().as_list() == [2*extent, 2*extent]
+ assert Yscale[i].dtype == tf.float32
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("nlevels, include_scale, batch_size", [
+ (2,False,None),
+ (2,True,10),
+ (4,False,None),
+ (3,True,2)
+])
+def test_batch_input(nlevels, include_scale, batch_size):
+ in_ = tf.placeholder(tf.float32, [batch_size, 512, 512])
+ t = dtcwt.Transform2d()
+ p = t.forward_channels(in_, "nhw", nlevels, include_scale)
+
+ # At level 1, the lowpass output will be the same size as the input. At
+ # levels above that, it will be half the size per level
+ extent = 512 * 2**(-(nlevels-1))
+ assert p.lowpass_op.get_shape().as_list() == [batch_size, extent, extent]
+ assert p.lowpass_op.dtype == tf.float32
+
+ for i in range(nlevels):
+ extent = 512 * 2**(-(i+1))
+ assert (p.highpasses_ops[i].get_shape().as_list() ==
+ [batch_size, extent, extent, 6])
+ assert p.highpasses_ops[i].dtype == tf.complex64
+ if include_scale:
+ assert (p.scales_ops[i].get_shape().as_list() ==
+ [batch_size, 2*extent, 2*extent])
+ assert p.scales_ops[i].dtype == tf.float32
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("nlevels, include_scale, batch_size", [
+ (2,False,None),
+ (2,True,10),
+ (4,False,None),
+ (3,True,2)
+])
+def test_batch_input_tuple(nlevels, include_scale, batch_size):
+ in_ = tf.placeholder(tf.float32, [batch_size, 512, 512])
+ t = dtcwt.Transform2d()
+
+ if include_scale:
+ Yl, Yh, Yscale = unpack(
+ t.forward_channels(in_, "nhw", nlevels, include_scale), "tf")
+ else:
+ Yl, Yh = unpack(
+ t.forward_channels(in_, "nhw", nlevels, include_scale), "tf")
+
+ # At level 1, the lowpass output will be the same size as the input. At
+ # levels above that, it will be half the size per level
+ extent = 512 * 2**(-(nlevels-1))
+ assert Yl.get_shape().as_list() == [batch_size, extent, extent]
+ assert Yl.dtype == tf.float32
+
+ for i in range(nlevels):
+ extent = 512 * 2**(-(i+1))
+ assert Yh[i].get_shape().as_list() == [batch_size, extent, extent, 6]
+ assert Yh[i].dtype == tf.complex64
+ if include_scale:
+ assert (Yscale[i].get_shape().as_list() ==
+ [batch_size, 2*extent, 2*extent])
+ assert Yscale[i].dtype == tf.float32
+
+
+@skip_if_no_tf
+@pytest.mark.parametrize("nlevels, channels", [
+ (2,5),
+ (2,2),
+ (4,10),
+ (3,6)
+])
+def test_multichannel(nlevels, channels):
+ in_ = tf.placeholder(tf.float32, [None, 512, 512, channels])
+ t = dtcwt.Transform2d()
+ Yl, Yh, Yscale = unpack(
+ t.forward_channels(in_, "nhwc", nlevels, include_scale=True), "tf")
+ # At level 1, the lowpass output will be the same size as the input. At
+ # levels above that, it will be half the size per level
+ extent = 512 * 2**(-(nlevels-1))
+ assert Yl.get_shape().as_list() == [None, extent, extent, channels]
+ assert Yl.dtype == tf.float32
+
+ for i in range(nlevels):
+ extent = 512 * 2**(-(i+1))
+ assert (Yh[i].get_shape().as_list() ==
+ [None, extent, extent, channels, 6])
+ assert Yh[i].dtype == tf.complex64
+ assert Yscale[i].get_shape().as_list() == [
+ None, 2*extent, 2*extent, channels]
+ assert Yscale[i].dtype == tf.float32
diff --git a/tests/test_tfrowdfilt.py b/tests/test_tfrowdfilt.py
new file mode 100644
index 0000000..45b4592
--- /dev/null
+++ b/tests/test_tfrowdfilt.py
@@ -0,0 +1,102 @@
+from pytest import raises
+
+import numpy as np
+from importlib import import_module
+from dtcwt.coeffs import qshift
+from dtcwt.numpy.lowlevel import coldfilt as np_coldfilt
+
+from tests.util import skip_if_no_tf
+import tests.datasets as datasets
+
+
+@skip_if_no_tf
+def test_setup():
+ global mandrill, mandrill_t, rowdfilt, tf
+ tf = import_module('tensorflow')
+ lowlevel = import_module('dtcwt.tf.lowlevel')
+ rowdfilt = getattr(lowlevel, 'rowdfilt')
+ mandrill = datasets.mandrill()
+ mandrill_t = tf.expand_dims(tf.constant(mandrill, dtype=tf.float32),axis=0)
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+ assert mandrill_t.get_shape() == (1, 512, 512)
+
+
+@skip_if_no_tf
+def test_odd_filter():
+ with raises(ValueError):
+ rowdfilt(mandrill_t, (-1,2,-1), (-1,2,1))
+
+
+@skip_if_no_tf
+def test_different_size():
+ with raises(ValueError):
+ rowdfilt(mandrill_t, (-0.5,-1,2,1,0.5), (-1,2,-1))
+
+
+@skip_if_no_tf
+def test_bad_input_size():
+ with raises(ValueError):
+ rowdfilt(mandrill_t[:,:,:511], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_good_input_size():
+ rowdfilt(mandrill_t[:,:511,:], (-1,1), (1,-1))
+
+
+@skip_if_no_tf
+def test_good_input_size_non_orthogonal():
+ rowdfilt(mandrill_t[:,:511,:], (1,1), (1,1))
+
+
+@skip_if_no_tf
+def test_output_size():
+ y_op = rowdfilt(mandrill_t, (-1,1), (1,-1))
+ assert y_op.shape[1:] == (mandrill.shape[0], mandrill.shape[1]/2)
+
+
+@skip_if_no_tf
+# @pytest.mark.skip(reason='Cant pad by more than half the dimension of the input')
+def test_equal_small_in():
+ ha = qshift('qshift_b')[0]
+ hb = qshift('qshift_b')[1]
+ im = mandrill[0:4,0:4]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_coldfilt(im.T, ha, hb).T
+ y_op = rowdfilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift1():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ ref = np_coldfilt(mandrill.T, ha, hb).T
+ y_op = rowdfilt(mandrill_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift2():
+ ha = qshift('qshift_c')[0]
+ hb = qshift('qshift_c')[1]
+ im = mandrill[:508, :504]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_coldfilt(im.T, ha, hb).T
+ y_op = rowdfilt(im_t, ha, hb)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+# vim:sw=4:sts=4:et
diff --git a/tests/test_tfrowfilter.py b/tests/test_tfrowfilter.py
new file mode 100644
index 0000000..8980cfe
--- /dev/null
+++ b/tests/test_tfrowfilter.py
@@ -0,0 +1,123 @@
+import numpy as np
+from importlib import import_module
+from dtcwt.coeffs import biort, qshift
+from dtcwt.numpy.lowlevel import colfilter as np_colfilter
+
+from tests.util import skip_if_no_tf
+import tests.datasets as datasets
+
+
+@skip_if_no_tf
+def test_setup():
+ global mandrill, mandrill_t, rowfilter, tf
+ tf = import_module('tensorflow')
+ lowlevel = import_module('dtcwt.tf.lowlevel')
+ rowfilter = getattr(lowlevel, 'rowfilter')
+
+ mandrill = datasets.mandrill()
+ mandrill_t = tf.expand_dims(tf.constant(mandrill, dtype=tf.float32),axis=0)
+
+
+@skip_if_no_tf
+def test_mandrill_loaded():
+ assert mandrill.shape == (512, 512)
+ assert mandrill.min() >= 0
+ assert mandrill.max() <= 1
+ assert mandrill.dtype == np.float32
+ assert mandrill_t.get_shape() == (1, 512, 512)
+
+
+@skip_if_no_tf
+def test_odd_size():
+ y_op = rowfilter(mandrill_t, [-1, 2, -1])
+ assert y_op.get_shape()[1:] == mandrill.shape
+
+
+@skip_if_no_tf
+def test_even_size():
+ y_op = rowfilter(mandrill_t, [-1, -1])
+ assert y_op.get_shape()[1:] == (mandrill.shape[0], mandrill.shape[1]+1)
+
+
+@skip_if_no_tf
+def test_qshift():
+ h = qshift('qshift_a')[0]
+ y_op = rowfilter(mandrill_t, h)
+ assert y_op.get_shape()[1:] == (mandrill.shape[0], mandrill.shape[1]+1)
+
+
+@skip_if_no_tf
+def test_biort():
+ h = biort('antonini')[0]
+ y_op = rowfilter(mandrill_t, h)
+ assert y_op.get_shape()[1:] == mandrill.shape
+
+
+@skip_if_no_tf
+def test_even_size_batch():
+ h = tf.constant([-1,1], dtype=tf.float32)
+ zero_t = tf.zeros((1,) + mandrill.shape, tf.float32)
+ y_op = rowfilter(zero_t, h)
+ assert y_op.get_shape()[1:] == (mandrill.shape[0], mandrill.shape[1]+1)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ assert not np.any(y[:] != 0.0)
+
+
+@skip_if_no_tf
+# @pytest.mark.skip(reason='Cant pad by more than half the dimension of the input')
+def test_equal_small_in():
+ h = qshift('qshift_b')[0]
+ im = mandrill[0:4,0:4]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im.T, h).T
+ y_op = rowfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_biort1():
+ h = biort('near_sym_b')[0]
+ ref = np_colfilter(mandrill.T, h).T
+ y_op = rowfilter(mandrill_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_biort2():
+ h = biort('near_sym_b')[0]
+ im = mandrill[15:307, 40:267]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im.T, h).T
+ y_op = rowfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift1():
+ h = qshift('qshift_c')[0]
+ ref = np_colfilter(mandrill.T, h).T
+ y_op = rowfilter(mandrill_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+
+@skip_if_no_tf
+def test_equal_numpy_qshift2():
+ h = qshift('qshift_c')[0]
+ im = mandrill[15:307, 40:267]
+ im_t = tf.expand_dims(tf.constant(im, tf.float32), axis=0)
+ ref = np_colfilter(im.T, h).T
+ y_op = rowfilter(im_t, h)
+ with tf.Session() as sess:
+ y = sess.run(y_op)
+ np.testing.assert_array_almost_equal(y[0], ref, decimal=4)
+
+# vim:sw=4:sts=4:et
diff --git a/tests/tf-requirements.txt b/tests/tf-requirements.txt
new file mode 100644
index 0000000..340bc51
--- /dev/null
+++ b/tests/tf-requirements.txt
@@ -0,0 +1,2 @@
+# Tensorflow specific requirements
+tensorflow
diff --git a/tests/util.py b/tests/util.py
index 14bb01f..ce37aed 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -3,6 +3,8 @@
import pytest
from dtcwt.opencl.lowlevel import _HAVE_CL as HAVE_CL
+# from dtcwt.utils import _HAVE_TF as HAVE_TF
+from dtcwt.tf.lowlevel import _HAVE_TF as HAVE_TF
from six.moves import xrange
@@ -65,4 +67,5 @@ def summarise_cube(M, apron=4):
)
skip_if_no_cl = pytest.mark.skipif(not HAVE_CL, reason="OpenCL not present")
+skip_if_no_tf = pytest.mark.skipif(not HAVE_TF, reason="Tensorflow not present")
diff --git a/tox.ini b/tox.ini
index fd9479f..437f386 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist=py{27,3}{,-opencl},docs
+envlist=py{27,3}{,-opencl,-tf},docs
[testenv:docs]
deps=
@@ -18,4 +18,5 @@ commands=
# We can't list these in deps since pyopencl moans if numpy is not
# fully installed at pip-install time.
py{27,3}-opencl: pip install -rtests/opencl-requirements.txt
+ py{27,3}-tf: pip install -rtests/tf-requirements.txt
py.test --cov=dtcwt/ --cov-report=term {posargs}