Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP:] refactoring for tensorflow2 keras compat #229

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions innvestigate/analyzer/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
###############################################################################


import keras.backend as K
import keras.layers
import keras.models
import tensorflow.keras.backend as K
import tensorflow.keras.layers
import tensorflow.keras.models
import numpy as np
import warnings

Expand Down Expand Up @@ -49,7 +49,7 @@ class AnalyzerBase(object):

This class defines the basic interface for analyzers:

>>> model = create_keras_model()
>>> model = create_tensorflow.keras_model()
>>> a = Analyzer(model)
>>> a.fit(X_train) # If analyzer needs training.
>>> analysis = a.analyze(X_test)
Expand Down Expand Up @@ -190,7 +190,7 @@ def _state_to_kwargs(clazz, state):
disable_model_checks = state.pop("disable_model_checks")
assert len(state) == 0

model = keras.models.model_from_json(model_json)
model = tensorflow.keras.models.model_from_json(model_json)
model.set_weights(model_weights)
return {"model": model,
"disable_model_checks": disable_model_checks}
Expand Down Expand Up @@ -330,7 +330,7 @@ def __init__(self, model,
self._allow_lambda_layers = allow_lambda_layers
self._add_model_check(
lambda layer: (not self._allow_lambda_layers and
isinstance(layer, keras.layers.core.Lambda)),
isinstance(layer, tensorflow.keras.layers.Lambda)),
("Lamda layers are not allowed. "
"To force use set allow_lambda_layers parameter."),
check_type="exception",
Expand Down Expand Up @@ -368,18 +368,18 @@ def _prepare_model(self, model):

# Flatten to form (batch_size, other_dimensions):
if K.ndim(model_output[0]) > 2:
model_output = keras.layers.Flatten()(model_output)
model_output = tensorflow.keras.layers.Flatten()(model_output[0])

if neuron_selection_mode == "max_activation":
l = ilayers.Max(name="iNNvestigate_max")
model_output = l(model_output)
self._special_helper_layers.append(l)
elif neuron_selection_mode == "index":
neuron_indexing = keras.layers.Input(
neuron_indexing = tensorflow.keras.layers.Input(
batch_shape=[None, None], dtype=np.int32,
name='iNNvestigate_neuron_indexing')
self._special_helper_layers.append(
neuron_indexing._keras_history[0])
neuron_indexing._tensorflow.keras_history[0])
analysis_inputs.append(neuron_indexing)
# The indexing tensor should not be analyzed.
stop_analysis_at_tensors.append(neuron_indexing)
Expand All @@ -392,7 +392,7 @@ def _prepare_model(self, model):
else:
raise NotImplementedError()

model = keras.models.Model(inputs=model_inputs+analysis_inputs,
model = tensorflow.keras.models.Model(inputs=model_inputs+analysis_inputs,
outputs=model_output)
return model, analysis_inputs, stop_analysis_at_tensors

Expand Down Expand Up @@ -432,7 +432,7 @@ def create_analyzer_model(self):
self._n_constant_input = len(constant_inputs)
self._n_data_output = len(analysis_outputs)
self._n_debug_output = len(debug_outputs)
self._analyzer_model = keras.models.Model(
self._analyzer_model = tensorflow.keras.models.Model(
inputs=model_inputs+analysis_inputs+constant_inputs,
outputs=analysis_outputs+debug_outputs)

Expand Down
30 changes: 15 additions & 15 deletions innvestigate/analyzer/deeplift.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@


import importlib
import keras.backend as K
import keras.layers
import tensorflow.keras.backend as K
import tensorflow.keras.layers
import numpy as np
import tempfile
import warnings
Expand Down Expand Up @@ -47,7 +47,7 @@ def rescale_f(x):
return a*(dy/(dx + K.epsilon()))

grad = ilayers.GradientWRT(len(Xs))
rescale = keras.layers.Lambda(rescale_f)
rescale = tensorflow.keras.layers.Lambda(rescale_f)

Xs_references = [
reference_mapping.get(x, local_references.get(x, None))
Expand All @@ -58,9 +58,9 @@ def rescale_f(x):
for x in Ys
]

Xs_differences = [keras.layers.Subtract()([x, r])
Xs_differences = [tensorflow.keras.layers.Subtract()([x, r])
for x, r in zip(Xs, Xs_references)]
Ys_differences = [keras.layers.Subtract()([x, r])
Ys_differences = [tensorflow.keras.layers.Subtract()([x, r])
for x, r in zip(Ys, Ys_references)]
gradients = iutils.to_list(grad(Xs+Ys+As))

Expand All @@ -79,15 +79,15 @@ def switch_f(x):
return a

grad = ilayers.GradientWRT(len(Xs))
switch = keras.layers.Lambda(switch_f)
switch = tensorflow.keras.layers.Lambda(switch_f)

Xs_references = [reference_mapping[x] for x in Xs]

Ys_references = [reference_mapping[x] for x in Ys]

Xs_differences = [keras.layers.Subtract()([x, r])
Xs_differences = [tensorflow.keras.layers.Subtract()([x, r])
for x, r in zip(Xs, Xs_references)]
Ys_differences = [keras.layers.Subtract()([x, r])
Ys_differences = [tensorflow.keras.layers.Subtract()([x, r])
for x, r in zip(Ys, Ys_references)]

# Divide incoming relevance by the activations.
Expand All @@ -99,7 +99,7 @@ def switch_f(x):
tmp = iutils.to_list(grad(Xs+Ys+tmp))

# Re-weight relevance with the input values.
tmp = [keras.layers.Multiply()([a, b])
tmp = [tensorflow.keras.layers.Multiply()([a, b])
for a, b in zip(Xs_differences, tmp)]

# only the gradient
Expand Down Expand Up @@ -144,12 +144,12 @@ def _create_reference_activations(self, model):
self._reference_activations = {}

# Create references and graph inputs.
tmp = kutils.broadcast_np_tensors_to_keras_tensors(
tmp = kutils.broadcast_np_tensors_to_tensorflow.keras_tensors(
model.inputs, self._reference_inputs)
tmp = [K.variable(x) for x in tmp]

constant_reference_inputs = [
keras.layers.Input(tensor=x, shape=K.int_shape(x)[1:])
tensorflow.keras.layers.Input(tensor=x, shape=K.int_shape(x)[1:])
for x in tmp
]

Expand All @@ -163,7 +163,7 @@ def _create_reference_activations(self, model):
for layer, Xs, Ys in execution_list:
activations = [self._reference_activations[x] for x in Xs]

if isinstance(layer, keras.layers.InputLayer):
if isinstance(layer, tensorflow.keras.layers.InputLayer):
# Special case. Do nothing.
next_activations = activations
else:
Expand Down Expand Up @@ -220,7 +220,7 @@ def _create_analysis(self, model, *args, **kwargs):
constant_inputs+constant_reference_inputs)

def _head_mapping(self, X):
return keras.layers.Subtract()([X, self._reference_activations[X]])
return tensorflow.keras.layers.Subtract()([X, self._reference_activations[X]])

def _reverse_model(self,
model,
Expand Down Expand Up @@ -314,12 +314,12 @@ def fix_name(s):
score_layer_names = [fix_name(l.name) for l in self._model.inputs]
if len(self._model.outputs) > 1:
raise ValueError("Only a single output layer is supported.")
tmp = self._model.outputs[0]._keras_history
tmp = self._model.outputs[0]._tensorflow.keras_history
target_layer_name = fix_name(tmp[0].name+"_%i" % tmp[1])
self._func = deeplift_model.get_target_contribs_func(
find_scores_layer_name=score_layer_names,
pre_activation_target_layer_name=target_layer_name)
self._references = kutils.broadcast_np_tensors_to_keras_tensors(
self._references = kutils.broadcast_np_tensors_to_tensorflow.keras_tensors(
self._model.inputs, self._reference_inputs)

def _analyze_with_deeplift(self, X, neuron_idx, batch_size):
Expand Down
50 changes: 25 additions & 25 deletions innvestigate/analyzer/deeptaylor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
###############################################################################


import keras.layers
import keras.models
import tensorflow.keras.layers
import tensorflow.keras.models


from . import base
Expand Down Expand Up @@ -91,20 +91,20 @@ def do_nothing(Xs, Ys, As, reverse_state):
name="deep_taylor_average_pooling",
)
self._add_conditional_reverse_mapping(
lambda l: isinstance(l, keras.layers.Add),
lambda l: isinstance(l, tensorflow.keras.layers.Add),
# Ignore scaling with 0.5
self._gradient_reverse_mapping,
name="deep_taylor_add",
)
self._add_conditional_reverse_mapping(
lambda l: isinstance(l, (
keras.layers.convolutional.UpSampling1D,
keras.layers.convolutional.UpSampling2D,
keras.layers.convolutional.UpSampling3D,
keras.layers.core.Dropout,
keras.layers.core.SpatialDropout1D,
keras.layers.core.SpatialDropout2D,
keras.layers.core.SpatialDropout3D,
tensorflow.keras.layers.convolutional.UpSampling1D,
tensorflow.keras.layers.convolutional.UpSampling2D,
tensorflow.keras.layers.convolutional.UpSampling3D,
tensorflow.keras.layers.core.Dropout,
tensorflow.keras.layers.core.SpatialDropout1D,
tensorflow.keras.layers.core.SpatialDropout2D,
tensorflow.keras.layers.core.SpatialDropout3D,
)),
self._gradient_reverse_mapping,
name="deep_taylor_special_layers",
Expand All @@ -113,19 +113,19 @@ def do_nothing(Xs, Ys, As, reverse_state):
# Layers w/o transformation
self._add_conditional_reverse_mapping(
lambda l: isinstance(l, (
keras.engine.topology.InputLayer,
keras.layers.convolutional.Cropping1D,
keras.layers.convolutional.Cropping2D,
keras.layers.convolutional.Cropping3D,
keras.layers.convolutional.ZeroPadding1D,
keras.layers.convolutional.ZeroPadding2D,
keras.layers.convolutional.ZeroPadding3D,
keras.layers.Concatenate,
keras.layers.core.Flatten,
keras.layers.core.Masking,
keras.layers.core.Permute,
keras.layers.core.RepeatVector,
keras.layers.core.Reshape,
tensorflow.keras.engine.topology.InputLayer,
tensorflow.keras.layers.convolutional.Cropping1D,
tensorflow.keras.layers.convolutional.Cropping2D,
tensorflow.keras.layers.convolutional.Cropping3D,
tensorflow.keras.layers.convolutional.ZeroPadding1D,
tensorflow.keras.layers.convolutional.ZeroPadding2D,
tensorflow.keras.layers.convolutional.ZeroPadding3D,
tensorflow.keras.layers.Concatenate,
tensorflow.keras.layers.core.Flatten,
tensorflow.keras.layers.core.Masking,
tensorflow.keras.layers.core.Permute,
tensorflow.keras.layers.core.RepeatVector,
tensorflow.keras.layers.core.Reshape,
)),
self._gradient_reverse_mapping,
name="deep_taylor_no_transform",
Expand All @@ -146,8 +146,8 @@ def _prepare_model(self, model):
To be theoretically sound Deep-Taylor expects only positive outputs.
"""

positive_outputs = [keras.layers.ReLU()(x) for x in model.outputs]
model_with_positive_output = keras.models.Model(
positive_outputs = [tensorflow.keras.layers.ReLU()(x) for x in model.outputs]
model_with_positive_output = tensorflow.keras.models.Model(
inputs=model.inputs, outputs=positive_outputs)

return super(DeepTaylor, self)._prepare_model(
Expand Down
10 changes: 5 additions & 5 deletions innvestigate/analyzer/gradient_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
###############################################################################


import keras.models
import keras
import tensorflow.keras.models
import tensorflow.keras


from . import base
Expand Down Expand Up @@ -159,7 +159,7 @@ def _create_analysis(self, model, stop_analysis_at_tensors=[]):
if x not in stop_analysis_at_tensors]
gradients = super(InputTimesGradient, self)._create_analysis(
model, stop_analysis_at_tensors=stop_analysis_at_tensors)
return [keras.layers.Multiply()([i, g])
return [tensorflow.keras.layers.Multiply()([i, g])
for i, g in zip(tensors_to_analyze, gradients)]


Expand All @@ -171,7 +171,7 @@ def _create_analysis(self, model, stop_analysis_at_tensors=[]):
class DeconvnetReverseReLULayer(kgraph.ReverseMappingBase):

def __init__(self, layer, state):
self._activation = keras.layers.Activation("relu")
self._activation = tensorflow.keras.layers.Activation("relu")
self._layer_wo_relu = kgraph.copy_layer_wo_activation(
layer,
name_template="reversed_%s",
Expand Down Expand Up @@ -217,7 +217,7 @@ def _create_analysis(self, *args, **kwargs):


def GuidedBackpropReverseReLULayer(Xs, Ys, reversed_Ys, reverse_state):
activation = keras.layers.Activation("relu")
activation = tensorflow.keras.layers.Activation("relu")
# Apply relus conditioned on backpropagated values.
reversed_Ys = kutils.apply(activation, reversed_Ys)

Expand Down
45 changes: 21 additions & 24 deletions innvestigate/analyzer/pattern_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,10 @@
###############################################################################
###############################################################################

import keras.activations
import keras.engine.topology
import keras.layers
import keras.layers.core
import keras.layers.pooling
import keras.models
import keras
import tensorflow.keras.activations
import tensorflow.keras.layers
import tensorflow.keras.models
import tensorflow.keras
import numpy as np
import warnings

Expand All @@ -39,21 +36,21 @@


SUPPORTED_LAYER_PATTERNNET = (
keras.engine.topology.InputLayer,
keras.layers.convolutional.Conv2D,
keras.layers.core.Dense,
keras.layers.core.Dropout,
keras.layers.core.Flatten,
keras.layers.core.Masking,
keras.layers.core.Permute,
keras.layers.core.Reshape,
keras.layers.Concatenate,
keras.layers.pooling.GlobalMaxPooling1D,
keras.layers.pooling.GlobalMaxPooling2D,
keras.layers.pooling.GlobalMaxPooling3D,
keras.layers.pooling.MaxPooling1D,
keras.layers.pooling.MaxPooling2D,
keras.layers.pooling.MaxPooling3D,
tensorflow.keras.layers.InputLayer,
tensorflow.keras.layers.Conv2D,
tensorflow.keras.layers.Dense,
tensorflow.keras.layers.Dropout,
tensorflow.keras.layers.Flatten,
tensorflow.keras.layers.Masking,
tensorflow.keras.layers.Permute,
tensorflow.keras.layers.Reshape,
tensorflow.keras.layers.Concatenate,
tensorflow.keras.layers.GlobalMaxPooling1D,
tensorflow.keras.layers.GlobalMaxPooling2D,
tensorflow.keras.layers.GlobalMaxPooling3D,
tensorflow.keras.layers.MaxPooling1D,
tensorflow.keras.layers.MaxPooling2D,
tensorflow.keras.layers.MaxPooling3D,
)


Expand All @@ -75,7 +72,7 @@ def __init__(self, layer, state, pattern):
if "activation" in config:
activation = config["activation"]
config["activation"] = None
self._act_layer = keras.layers.Activation(
self._act_layer = tensorflow.keras.layers.Activation(
activation,
name="reversed_act_%s" % config["name"])
self._filter_layer = kgraph.copy_layer_wo_activation(
Expand Down Expand Up @@ -106,7 +103,7 @@ def apply(self, Xs, Ys, reversed_Ys, reverse_state):

# First step: propagate through the activation layer.
# Workaround for linear activations.
linear_activations = [None, keras.activations.get("linear")]
linear_activations = [None, tensorflow.keras.activations.get("linear")]
if self._act_layer.activation in linear_activations:
tmp = reversed_Ys
else:
Expand Down
Loading