Skip to content

Commit

Permalink
Remove subpixel upscaling option (deepfakes#1024)
Browse files Browse the repository at this point in the history
  • Loading branch information
torzdf authored May 13, 2020
1 parent 92bc9af commit ac40b0f
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 37 deletions.
20 changes: 5 additions & 15 deletions lib/model/nn_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from keras.layers.core import Activation
from keras.initializers import he_uniform, VarianceScaling
from .initializers import ICNR, ConvolutionAware
from .layers import PixelShuffler, SubPixelUpscaling, ReflectionPadding2D
from .layers import PixelShuffler, ReflectionPadding2D
from .normalization import InstanceNormalization

logger = logging.getLogger(__name__) # pylint: disable=invalid-name
Expand All @@ -26,10 +26,6 @@ class NNBlocks():
Parameters
----------
use_subpixel: bool, Optional
``True`` if sub-pixel up-scaling layer should be used instead of pixel shuffler for
up-scaling. This option is deprecated as sub-pixel up-scaling is Nvidia only, but is kept
for legacy models. Default: ``False``
use_icnr_init: bool, Optional
``True`` if ICNR initialization should be used rather than the default. Default: ``False``
use_convaware_init: bool, Optional
Expand All @@ -44,18 +40,16 @@ class NNBlocks():
is being reloaded. Default: ``True``
"""
def __init__(self,
use_subpixel=False,
use_icnr_init=False,
use_convaware_init=False,
use_reflect_padding=False,
first_run=True):
logger.debug("Initializing %s: (use_subpixel: %s, use_icnr_init: %s, use_convaware_init: "
"%s, use_reflect_padding: %s, first_run: %s)",
self.__class__.__name__, use_subpixel, use_icnr_init, use_convaware_init,
logger.debug("Initializing %s: (use_icnr_init: %s, use_convaware_init: %s, "
"use_reflect_padding: %s, first_run: %s)",
self.__class__.__name__, use_icnr_init, use_convaware_init,
use_reflect_padding, first_run)
self.names = dict()
self.first_run = first_run
self.use_subpixel = use_subpixel
self.use_icnr_init = use_icnr_init
self.use_convaware_init = use_convaware_init
self.use_reflect_padding = use_reflect_padding
Expand Down Expand Up @@ -311,11 +305,7 @@ def upscale(self, input_tensor, filters, kernel_size=3, padding="same",
var_x = InstanceNormalization(name="{}_instancenorm".format(name))(var_x)
if not res_block_follows:
var_x = LeakyReLU(0.1, name="{}_leakyrelu".format(name))(var_x)
if self.use_subpixel:
var_x = SubPixelUpscaling(name="{}_subpixel".format(name),
scale_factor=scale_factor)(var_x)
else:
var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
return var_x

# <<< DFaker Model Blocks >>> #
Expand Down
7 changes: 0 additions & 7 deletions plugins/train/_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,13 +135,6 @@ def set_globals(self):
"\n\t Building the model will likely take several minutes as the calculations "
"for this initialization technique are expensive. This will only impact starting "
"a new model.")
self.add_item(
section=section, title="subpixel_upscaling", datatype=bool,
default=False, group="network",
info="Use subpixel upscaling rather than pixel shuffler. These techniques "
"are both designed to produce better resolving upscaling than other "
"methods. Each perform the same operations, but using different TF opts."
"\n\t https://arxiv.org/pdf/1609.05158.pdf")
self.add_item(
section=section, title="reflect_padding", datatype=bool,
default=False, group="network",
Expand Down
12 changes: 5 additions & 7 deletions plugins/train/model/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,7 @@ def __init__(self,
self.vram_savings.pingpong,
training_image_size)

self.blocks = NNBlocks(use_subpixel=self.config["subpixel_upscaling"],
use_icnr_init=self.config["icnr_init"],
self.blocks = NNBlocks(use_icnr_init=self.config["icnr_init"],
use_convaware_init=self.config["conv_aware_init"],
use_reflect_padding=self.config["reflect_padding"],
first_run=self.state.first_run)
Expand Down Expand Up @@ -377,9 +376,9 @@ def get_optimizer(self, lr=5e-5, beta_1=0.5, beta_2=0.999): # pylint: disable=i
opt_kwargs = dict(lr=lr, beta_1=beta_1, beta_2=beta_2)
if (self.config.get("clipnorm", False) and
keras.backend.backend() != "plaidml.keras.backend"):
# NB: Clipnorm is ballooning VRAM usage, which is not expected behavior
# and may be a bug in Keras/TF.
# PlaidML has a bug regarding the clipnorm parameter
# NB: Clip-norm is ballooning VRAM usage, which is not expected behavior
# and may be a bug in Keras/Tensorflow.
# PlaidML has a bug regarding the clip-norm parameter
# See: https://github.com/plaidml/plaidml/issues/228
# Workaround by simply removing it.
# TODO: Remove this as soon it is fixed in PlaidML.
Expand Down Expand Up @@ -581,7 +580,6 @@ def rename_legacy(self):
self.state.inputs = {"face:0": [64, 64, 3]}
self.state.training_size = 256
self.state.config["coverage"] = 62.5
self.state.config["subpixel_upscaling"] = False
self.state.config["reflect_padding"] = False
self.state.config["mask_type"] = None
self.state.config["mask_blur_kernel"] = 3
Expand Down Expand Up @@ -1014,7 +1012,7 @@ def _update_legacy_config(self):
set it to `mae`. Remove old `dssim_loss` item
* masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is
not ``None`` otherwised it is set to ``False``.
not ``None`` otherwise it is set to ``False``.
* masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask
Expand Down
12 changes: 4 additions & 8 deletions tests/lib/model/nn_blocks_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@
from lib.model.nn_blocks import NNBlocks
from lib.utils import get_backend

_PARAMS = ["use_subpixel", "use_icnr_init", "use_convaware_init", "use_reflect_padding"]
_PARAMS = ["use_icnr_init", "use_convaware_init", "use_reflect_padding"]
_VALUES = list(product([True, False], repeat=len(_PARAMS)))
_IDS = ["{}[{}]".format("|".join([_PARAMS[idx] for idx, b in enumerate(v) if b]),
get_backend().upper()) for v in _VALUES]


def block_test(layer_func, kwargs={}, input_shape=None):
"""Test routine for a faceswaps neural network blocks.
"""Test routine for faceswap neural network blocks.
Tests are simple and are to ensure that the blocks compile on both tensorflow
and plaidml backends
Expand Down Expand Up @@ -62,13 +62,9 @@ def block_test(layer_func, kwargs={}, input_shape=None):


@pytest.mark.parametrize(_PARAMS, _VALUES, ids=_IDS)
def test_blocks(use_subpixel, use_icnr_init, use_convaware_init, use_reflect_padding):
def test_blocks(use_icnr_init, use_convaware_init, use_reflect_padding):
""" Test for all blocks contained within the NNBlocks Class """
if get_backend() == "amd" and use_subpixel:
# Subpixel upscaling does not work on plaidml so skip this test
pytest.skip("Subpixel upscaling not supported in plaidML")
cls_ = NNBlocks(use_subpixel=use_subpixel,
use_icnr_init=use_icnr_init,
cls_ = NNBlocks(use_icnr_init=use_icnr_init,
use_convaware_init=use_convaware_init,
use_reflect_padding=use_reflect_padding)
block_test(cls_.conv2d, input_shape=(2, 5, 5, 128), kwargs=dict(filters=1024, kernel_size=3))
Expand Down

0 comments on commit ac40b0f

Please sign in to comment.