diff --git a/lib/model/nn_blocks.py b/lib/model/nn_blocks.py index b2d3aebe30..180d0649ee 100644 --- a/lib/model/nn_blocks.py +++ b/lib/model/nn_blocks.py @@ -9,7 +9,7 @@ from keras.layers.core import Activation from keras.initializers import he_uniform, VarianceScaling from .initializers import ICNR, ConvolutionAware -from .layers import PixelShuffler, SubPixelUpscaling, ReflectionPadding2D +from .layers import PixelShuffler, ReflectionPadding2D from .normalization import InstanceNormalization logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -26,10 +26,6 @@ class NNBlocks(): Parameters ---------- - use_subpixel: bool, Optional - ``True`` if sub-pixel up-scaling layer should be used instead of pixel shuffler for - up-scaling. This option is deprecated as sub-pixel up-scaling is Nvidia only, but is kept - for legacy models. Default: ``False`` use_icnr_init: bool, Optional ``True`` if ICNR initialization should be used rather than the default. Default: ``False`` use_convaware_init: bool, Optional @@ -44,18 +40,16 @@ class NNBlocks(): is being reloaded. Default: ``True`` """ def __init__(self, - use_subpixel=False, use_icnr_init=False, use_convaware_init=False, use_reflect_padding=False, first_run=True): - logger.debug("Initializing %s: (use_subpixel: %s, use_icnr_init: %s, use_convaware_init: " - "%s, use_reflect_padding: %s, first_run: %s)", - self.__class__.__name__, use_subpixel, use_icnr_init, use_convaware_init, + logger.debug("Initializing %s: (use_icnr_init: %s, use_convaware_init: %s, " + "use_reflect_padding: %s, first_run: %s)", + self.__class__.__name__, use_icnr_init, use_convaware_init, use_reflect_padding, first_run) self.names = dict() self.first_run = first_run - self.use_subpixel = use_subpixel self.use_icnr_init = use_icnr_init self.use_convaware_init = use_convaware_init self.use_reflect_padding = use_reflect_padding @@ -311,11 +305,7 @@ def upscale(self, input_tensor, filters, kernel_size=3, padding="same", var_x = InstanceNormalization(name="{}_instancenorm".format(name))(var_x) if not res_block_follows: var_x = LeakyReLU(0.1, name="{}_leakyrelu".format(name))(var_x) - if self.use_subpixel: - var_x = SubPixelUpscaling(name="{}_subpixel".format(name), - scale_factor=scale_factor)(var_x) - else: - var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x) + var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x) return var_x # <<< DFaker Model Blocks >>> # diff --git a/plugins/train/_config.py b/plugins/train/_config.py index 0326bc7160..7ebce93dd3 100644 --- a/plugins/train/_config.py +++ b/plugins/train/_config.py @@ -135,13 +135,6 @@ def set_globals(self): "\n\t Building the model will likely take several minutes as the calculations " "for this initialization technique are expensive. This will only impact starting " "a new model.") - self.add_item( - section=section, title="subpixel_upscaling", datatype=bool, - default=False, group="network", - info="Use subpixel upscaling rather than pixel shuffler. These techniques " - "are both designed to produce better resolving upscaling than other " - "methods. Each perform the same operations, but using different TF opts." - "\n\t https://arxiv.org/pdf/1609.05158.pdf") self.add_item( section=section, title="reflect_padding", datatype=bool, default=False, group="network", diff --git a/plugins/train/model/_base.py b/plugins/train/model/_base.py index 8fa9dc8241..2540448f0f 100644 --- a/plugins/train/model/_base.py +++ b/plugins/train/model/_base.py @@ -83,8 +83,7 @@ def __init__(self, self.vram_savings.pingpong, training_image_size) - self.blocks = NNBlocks(use_subpixel=self.config["subpixel_upscaling"], - use_icnr_init=self.config["icnr_init"], + self.blocks = NNBlocks(use_icnr_init=self.config["icnr_init"], use_convaware_init=self.config["conv_aware_init"], use_reflect_padding=self.config["reflect_padding"], first_run=self.state.first_run) @@ -377,9 +376,9 @@ def get_optimizer(self, lr=5e-5, beta_1=0.5, beta_2=0.999): # pylint: disable=i opt_kwargs = dict(lr=lr, beta_1=beta_1, beta_2=beta_2) if (self.config.get("clipnorm", False) and keras.backend.backend() != "plaidml.keras.backend"): - # NB: Clipnorm is ballooning VRAM usage, which is not expected behavior - # and may be a bug in Keras/TF. - # PlaidML has a bug regarding the clipnorm parameter + # NB: Clip-norm is ballooning VRAM usage, which is not expected behavior + # and may be a bug in Keras/Tensorflow. + # PlaidML has a bug regarding the clip-norm parameter # See: https://github.com/plaidml/plaidml/issues/228 # Workaround by simply removing it. # TODO: Remove this as soon it is fixed in PlaidML. @@ -581,7 +580,6 @@ def rename_legacy(self): self.state.inputs = {"face:0": [64, 64, 3]} self.state.training_size = 256 self.state.config["coverage"] = 62.5 - self.state.config["subpixel_upscaling"] = False self.state.config["reflect_padding"] = False self.state.config["mask_type"] = None self.state.config["mask_blur_kernel"] = 3 @@ -1014,7 +1012,7 @@ def _update_legacy_config(self): set it to `mae`. Remove old `dssim_loss` item * masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is - not ``None`` otherwised it is set to ``False``. + not ``None`` otherwise it is set to ``False``. * masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask diff --git a/tests/lib/model/nn_blocks_test.py b/tests/lib/model/nn_blocks_test.py index 9c2c5c980a..9515a2dc9d 100644 --- a/tests/lib/model/nn_blocks_test.py +++ b/tests/lib/model/nn_blocks_test.py @@ -15,14 +15,14 @@ from lib.model.nn_blocks import NNBlocks from lib.utils import get_backend -_PARAMS = ["use_subpixel", "use_icnr_init", "use_convaware_init", "use_reflect_padding"] +_PARAMS = ["use_icnr_init", "use_convaware_init", "use_reflect_padding"] _VALUES = list(product([True, False], repeat=len(_PARAMS))) _IDS = ["{}[{}]".format("|".join([_PARAMS[idx] for idx, b in enumerate(v) if b]), get_backend().upper()) for v in _VALUES] def block_test(layer_func, kwargs={}, input_shape=None): - """Test routine for a faceswaps neural network blocks. + """Test routine for faceswap neural network blocks. Tests are simple and are to ensure that the blocks compile on both tensorflow and plaidml backends @@ -62,13 +62,9 @@ def block_test(layer_func, kwargs={}, input_shape=None): @pytest.mark.parametrize(_PARAMS, _VALUES, ids=_IDS) -def test_blocks(use_subpixel, use_icnr_init, use_convaware_init, use_reflect_padding): +def test_blocks(use_icnr_init, use_convaware_init, use_reflect_padding): """ Test for all blocks contained within the NNBlocks Class """ - if get_backend() == "amd" and use_subpixel: - # Subpixel upscaling does not work on plaidml so skip this test - pytest.skip("Subpixel upscaling not supported in plaidML") - cls_ = NNBlocks(use_subpixel=use_subpixel, - use_icnr_init=use_icnr_init, + cls_ = NNBlocks(use_icnr_init=use_icnr_init, use_convaware_init=use_convaware_init, use_reflect_padding=use_reflect_padding) block_test(cls_.conv2d, input_shape=(2, 5, 5, 128), kwargs=dict(filters=1024, kernel_size=3))