diff --git a/.install/windows/install.nsi b/.install/windows/install.nsi index 528d21689a..a81f04fa98 100644 --- a/.install/windows/install.nsi +++ b/.install/windows/install.nsi @@ -1,5 +1,6 @@ !include MUI2.nsh !include nsDialogs.nsh +!include winmessages.nsh !include LogicLib.nsh !include CPUFeatures.nsh !include MultiDetailPrint.nsi @@ -46,7 +47,8 @@ Var InstallFailed Var lblPos Var hasAVX Var hasSSE4 -Var noNvidia +Var setupType +Var ctlRadio Var ctlCondaText Var ctlCondaButton Var Log @@ -139,13 +141,26 @@ Function pgPrereqCreate intOp $lblPos $lblPos + 7 ${EndIf} ${NSD_CreateLabel} 10% $lblPos% 80% 14u "Faceswap" + Pop $0 StrCpy $lblPos 46 # Info Custom Options ${NSD_CreateGroupBox} 5% 40% 90% 60% "Custom Items" Pop $0 - ${NSD_CreateCheckBox} 10% $lblPos% 80% 11u " IMPORTANT! Check here if you do NOT have an NVIDIA graphics card" - Pop $noNvidia + ${NSD_CreateRadioButton} 10% $lblPos% 27% 11u "Setup for NVIDIA GPU" + Pop $ctlRadio + ${NSD_AddStyle} $ctlRadio ${WS_GROUP} + nsDialogs::SetUserData $ctlRadio "nvidia" + ${NSD_OnClick} $ctlRadio RadioClick + ${NSD_CreateRadioButton} 40% $lblPos% 25% 11u "Setup for AMD GPU" + Pop $ctlRadio + nsDialogs::SetUserData $ctlRadio "amd" + ${NSD_OnClick} $ctlRadio RadioClick + ${NSD_CreateRadioButton} 70% $lblPos% 20% 11u "Setup for CPU" + Pop $ctlRadio + nsDialogs::SetUserData $ctlRadio "cpu" + ${NSD_OnClick} $ctlRadio RadioClick + intOp $lblPos $lblPos + 10 ${NSD_CreateLabel} 10% $lblPos% 80% 10u "Environment Name (NB: Existing envs with this name will be deleted):" @@ -172,6 +187,12 @@ Function pgPrereqCreate nsDialogs::Show FunctionEnd +Function RadioClick + Pop $R0 + nsDialogs::GetUserData $R0 + Pop $setupType +FunctionEnd + Function fnc_hCtl_test_DirRequest1_Click Pop $R0 ${If} $R0 == $ctlCondaButton @@ -185,12 +206,21 @@ Function fnc_hCtl_test_DirRequest1_Click FunctionEnd Function pgPrereqLeave + call CheckSetupType Call CheckCustomCondaPath - ${NSD_GetState} $noNvidia $noNvidia ${NSD_GetText} $envName $envName FunctionEnd +Function CheckSetupType + ${If} $setupType == "" + MessageBox MB_OK "Please specify whether to setup for Nvidia, AMD or CPU." + Abort + ${EndIf} + StrCpy $Log "$log(check) Setting up for: $setupType$\n" +FunctionEnd + + Function CheckCustomCondaPath ${NSD_GetText} $ctlCondaText $2 ${If} $2 != "" @@ -392,8 +422,8 @@ FunctionEnd Function SetupFaceSwap DetailPrint "Setting up FaceSwap Environment... This may take a while" StrCpy $0 "${flagsSetup}" - ${If} $noNvidia != 1 - StrCpy $0 "$0 --gpu" + ${If} $setupType != "cpu" + StrCpy $0 "$0 --$setupType" ${EndIf} SetDetailsPrint listonly diff --git a/INSTALL.md b/INSTALL.md index 3f7136311f..50fa0cf76f 100755 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,32 +1,32 @@ # Installing Faceswap -- [Installing Faceswap](#installing-faceswap) -- [Prerequisites](#prerequisites) - - [Hardware Requirements](#hardware-requirements) - - [Supported operating systems](#supported-operating-systems) -- [Important before you proceed](#important-before-you-proceed) -- [Windows Install Guide](#windows-install-guide) - - [Installer](#installer) - - [Manual Install](#manual-install) - - [Prerequisites](#prerequisites-1) - - [Anaconda](#anaconda) - - [Git](#git) - - [Setup](#setup) - - [Anaconda](#anaconda-1) - - [Set up a virtual environment](#set-up-a-virtual-environment) - - [Entering your virtual environment](#entering-your-virtual-environment) - - [Faceswap](#faceswap) - - [Easy install](#easy-install) - - [Manual install](#manual-install) - - [Running Faceswap](#running-faceswap) - - [Create a desktop shortcut](#create-a-desktop-shortcut) - - [Updating faceswap](#updating-faceswap) -- [General Install Guide](#general-install-guide) - - [Installing dependencies](#installing-dependencies) - - [Getting the faceswap code](#getting-the-faceswap-code) - - [Setup](#setup-1) - - [About some of the options](#about-some-of-the-options) - - [Run the project](#run-the-project) - - [Notes](#notes) +- [Installing Faceswap](#Installing-Faceswap) +- [Prerequisites](#Prerequisites) + - [Hardware Requirements](#Hardware-Requirements) + - [Supported operating systems](#Supported-operating-systems) +- [Important before you proceed](#Important-before-you-proceed) +- [Windows Install Guide](#Windows-Install-Guide) + - [Installer](#Installer) + - [Manual Install](#Manual-Install) + - [Prerequisites](#Prerequisites-1) + - [Anaconda](#Anaconda) + - [Git](#Git) + - [Setup](#Setup) + - [Anaconda](#Anaconda-1) + - [Set up a virtual environment](#Set-up-a-virtual-environment) + - [Entering your virtual environment](#Entering-your-virtual-environment) + - [Faceswap](#Faceswap) + - [Easy install](#Easy-install) + - [Manual install](#Manual-install) + - [Running Faceswap](#Running-Faceswap) + - [Create a desktop shortcut](#Create-a-desktop-shortcut) + - [Updating faceswap](#Updating-faceswap) +- [General Install Guide](#General-Install-Guide) + - [Installing dependencies](#Installing-dependencies) + - [Getting the faceswap code](#Getting-the-faceswap-code) + - [Setup](#Setup-1) + - [About some of the options](#About-some-of-the-options) + - [Run the project](#Run-the-project) + - [Notes](#Notes) # Prerequisites Machine learning essentially involves a ton of trial and error. You're letting a program try millions of different settings to land on an algorithm that sort of does what you want it to do. This process is really really slow unless you have the hardware required to speed this up. @@ -39,9 +39,8 @@ The type of computations that the process does are well suited for graphics card - **A powerful CPU** - Laptop CPUs can often run the software, but will not be fast enough to train at reasonable speeds - **A powerful GPU** - - Currently, only Nvidia GPUs are supported. AMD graphics cards are not supported. - This is not something that we have control over. It is a requirement of the Tensorflow library. - - The GPU needs to support at least CUDA Compute Capability 3.0 or higher. + - Currently, Nvidia GPUs are fully supported. and AMD graphics cards are partially supported through plaidML. + - If using an Nvidia GPU, then it needs to support at least CUDA Compute Capability 3.0 or higher. To see which version your GPU supports, consult this list: https://developer.nvidia.com/cuda-gpus Desktop cards later than the 7xx series are most likely supported. - **A lot of patience** @@ -112,11 +111,12 @@ To enter the virtual environment: - If you have issues/errors follow the Manual install steps below. #### Manual install +Do not follow these steps if the Easy Install above completed succesfully. - Install tkinter (required for the GUI) by typing: `conda install tk` - Install requirements: `pip install -r requirements.txt` - Install Tensorflow (either GPU or CPU version depending on your setup): - - GPU Version: `pip install tensorflow-gpu` - - Non GPU Version: `pip install tensorflow` + - GPU Version: `conda install tensorflow-gpu` + - Non GPU Version: `conda install tensorflow` ## Running Faceswap - If you are not already in your virtual environment follow [these steps](#entering-your-virtual-environment) diff --git a/README.md b/README.md index 0163ec46ef..8f1c13bf14 100755 --- a/README.md +++ b/README.md @@ -12,35 +12,35 @@ FaceSwap is a tool that utilizes deep learning to recognize and swap faces in pi Make sure you check out [INSTALL.md](INSTALL.md) before getting started. - [deepfakes_faceswap](#deepfakesfaceswap) -- [Manifesto](#manifesto) - - [FaceSwap is not porn.](#faceswap-is-not-porn) -- [How To setup and run the project](#how-to-setup-and-run-the-project) -- [Overview](#overview) - - [Extract](#extract) - - [Train](#train) - - [Convert](#convert) - - [GUI](#gui) -- [General notes:](#general-notes) -- [Help I need support!](#help-i-need-support) - - [Discord Server](#discord-server) - - [FaceSwap-Playground](#faceswap-playground) -- [Donate](#donate) +- [Manifesto](#Manifesto) + - [FaceSwap is not porn.](#FaceSwap-is-not-porn) +- [How To setup and run the project](#How-To-setup-and-run-the-project) +- [Overview](#Overview) + - [Extract](#Extract) + - [Train](#Train) + - [Convert](#Convert) + - [GUI](#GUI) +- [General notes:](#General-notes) +- [Help I need support!](#Help-I-need-support) + - [Discord Server](#Discord-Server) + - [FaceSwap-Playground](#FaceSwap-Playground) +- [Donate](#Donate) - [@torzdf](#torzdf) - [@andenixa](#andenixa) - [@kvrooman](#kvrooman) -- [How to contribute](#how-to-contribute) - - [For people interested in the generative models](#for-people-interested-in-the-generative-models) - - [For devs](#for-devs) - - [For non-dev advanced users](#for-non-dev-advanced-users) - - [For end-users](#for-end-users) - - [For haters](#for-haters) -- [About github.com/deepfakes](#about-githubcomdeepfakes) - - [What is this repo?](#what-is-this-repo) - - [Why this repo?](#why-this-repo) - - [Why is it named 'deepfakes' if it is not /u/deepfakes?](#why-is-it-named-deepfakes-if-it-is-not-udeepfakes) - - [What if /u/deepfakes feels bad about that?](#what-if-udeepfakes-feels-bad-about-that) -- [About machine learning](#about-machine-learning) - - [How does a computer know how to recognize/shape faces? How does machine learning work? What is a neural network?](#how-does-a-computer-know-how-to-recognizeshape-faces-how-does-machine-learning-work-what-is-a-neural-network) +- [How to contribute](#How-to-contribute) + - [For people interested in the generative models](#For-people-interested-in-the-generative-models) + - [For devs](#For-devs) + - [For non-dev advanced users](#For-non-dev-advanced-users) + - [For end-users](#For-end-users) + - [For haters](#For-haters) +- [About github.com/deepfakes](#About-githubcomdeepfakes) + - [What is this repo?](#What-is-this-repo) + - [Why this repo?](#Why-this-repo) + - [Why is it named 'deepfakes' if it is not /u/deepfakes?](#Why-is-it-named-deepfakes-if-it-is-not-udeepfakes) + - [What if /u/deepfakes feels bad about that?](#What-if-udeepfakes-feels-bad-about-that) +- [About machine learning](#About-machine-learning) + - [How does a computer know how to recognize/shape faces? How does machine learning work? What is a neural network?](#How-does-a-computer-know-how-to-recognizeshape-faces-How-does-machine-learning-work-What-is-a-neural-network) # Manifesto @@ -64,7 +64,7 @@ We are very troubled by the fact that FaceSwap can be used for unethical and dis # How To setup and run the project FaceSwap is a Python program that will run on multiple Operating Systems including Windows, Linux, and MacOS. -See [INSTALL.md](INSTALL.md) for full installation instructions. You will need a modern GPU with CUDA support for best performance. +See [INSTALL.md](INSTALL.md) for full installation instructions. You will need a modern GPU with CUDA support for best performance. AMD GPUs are partially supported. # Overview The project has multiple entry points. You will have to: @@ -135,7 +135,6 @@ Responsible for consolidating the converters, adding a lot of code to fix model ## For devs - Read this README entirely - Fork the repo - - Download the data with the link provided above - Play with it - Check issues with the 'dev' tag - For devs more interested in computer vision and openCV, look at issues with the 'opencv' tag. Also feel free to add your own alternatives/improvements @@ -143,7 +142,6 @@ Responsible for consolidating the converters, adding a lot of code to fix model ## For non-dev advanced users - Read this README entirely - Clone the repo - - Download the data with the link provided above - Play with it - Check issues with the 'advuser' tag - Also go to the 'faceswap-playground' repo and help others. diff --git a/lib/cli.py b/lib/cli.py index 2c6c15bceb..c4d1ca3f0d 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -108,6 +108,9 @@ def execute_script(self, arguments): is_gui = hasattr(arguments, "redirect_gui") and arguments.redirect_gui log_setup(arguments.loglevel, arguments.logfile, self.command, is_gui) logger.debug("Executing: %s. PID: %s", self.command, os.getpid()) + if hasattr(arguments, "amd") and arguments.amd: + from lib.plaidml_tools import setup_plaidml + setup_plaidml(arguments.loglevel) try: script = self.import_script() process = script(arguments) @@ -356,6 +359,11 @@ def get_global_arguments(): """ Arguments that are used in ALL parts of Faceswap DO NOT override this """ global_args = list() + global_args.append({"opts": ("-amd", "--amd"), + "action": "store_true", + "dest": "amd", + "default": False, + "help": "AMD GPU users must enable this option for PlaidML support"}) global_args.append({"opts": ("-C", "--configfile"), "action": FileFullPaths, "filetypes": "ini", diff --git a/lib/gpu_stats.py b/lib/gpu_stats.py index 31af29f38a..8cf2cbf080 100644 --- a/lib/gpu_stats.py +++ b/lib/gpu_stats.py @@ -5,6 +5,10 @@ import os import platform +from lib.utils import keras_backend_quiet + +K = keras_backend_quiet() + if platform.system() == 'Darwin': import pynvx # pylint: disable=import-error IS_MACOS = True @@ -12,6 +16,12 @@ import pynvml IS_MACOS = False +# Limited PlaidML/AMD Stats +try: + from lib.plaidml_tools import PlaidMLStats as plaidlib # pylint:disable=ungrouped-imports +except ImportError: + plaidlib = None + class GPUStats(): """ Holds information about system GPU(s) """ @@ -23,6 +33,7 @@ def __init__(self, log=True): self.logger = logging.getLogger(__name__) # pylint: disable=invalid-name self.logger.debug("Initializing %s", self.__class__.__name__) + self.plaid = None self.initialized = False self.device_count = 0 self.active_devices = list() @@ -31,7 +42,7 @@ def __init__(self, log=True): self.devices = None self.vram = None - self.initialize() + self.initialize(log) self.driver = self.get_driver() self.devices = self.get_devices() @@ -45,10 +56,14 @@ def __init__(self, log=True): if self.logger: self.logger.debug("Initialized %s", self.__class__.__name__) - def initialize(self): + def initialize(self, log=False): """ Initialize pynvml """ if not self.initialized: - if IS_MACOS: + if K.backend() == "plaidml.keras.backend": + if self.logger: + self.logger.debug("plaidML Detected. Using plaidMLStats") + self.plaid = plaidlib(log=log) + elif IS_MACOS: if self.logger: self.logger.debug("macOS Detected. Using pynvx") try: @@ -63,9 +78,11 @@ def initialize(self): pynvml.nvmlInit() except (pynvml.NVMLError_LibraryNotFound, # pylint: disable=no-member pynvml.NVMLError_DriverNotLoaded, # pylint: disable=no-member - pynvml.NVMLError_NoPermission): # pylint: disable=no-member - self.initialized = True - return + pynvml.NVMLError_NoPermission) as err: # pylint: disable=no-member + if plaidlib is not None: + self.plaid = plaidlib(log=log) + else: + raise err self.initialized = True self.get_device_count() self.get_active_devices() @@ -75,13 +92,15 @@ def shutdown(self): """ Shutdown pynvml """ if self.initialized: self.handles = None - if not IS_MACOS: + if not IS_MACOS and not self.plaid: pynvml.nvmlShutdown() self.initialized = False def get_device_count(self): """ Return count of Nvidia devices """ - if IS_MACOS: + if self.plaid is not None: + self.device_count = self.plaid.device_count + elif IS_MACOS: self.device_count = pynvx.cudaDeviceGetCount(ignore=True) else: try: @@ -93,19 +112,24 @@ def get_device_count(self): def get_active_devices(self): """ Return list of active Nvidia devices """ - devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) - if self.device_count == 0: - self.active_devices = list() - elif devices is not None: - self.active_devices = [int(i) for i in devices.split(",") if devices] + if self.plaid is not None: + self.active_devices = self.plaid.active_devices else: - self.active_devices = list(range(self.device_count)) - if self.logger: - self.logger.debug("Active GPU Devices: %s", self.active_devices) + devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) + if self.device_count == 0: + self.active_devices = list() + elif devices is not None: + self.active_devices = [int(i) for i in devices.split(",") if devices] + else: + self.active_devices = list(range(self.device_count)) + if self.logger: + self.logger.debug("Active GPU Devices: %s", self.active_devices) def get_handles(self): """ Return all listed Nvidia handles """ - if IS_MACOS: + if self.plaid is not None: + self.handles = self.plaid.devices + elif IS_MACOS: self.handles = pynvx.cudaDeviceGetHandles(ignore=True) else: self.handles = [pynvml.nvmlDeviceGetHandleByIndex(i) @@ -115,7 +139,9 @@ def get_handles(self): def get_driver(self): """ Get the driver version """ - if IS_MACOS: + if self.plaid is not None: + driver = self.plaid.drivers + elif IS_MACOS: driver = pynvx.cudaSystemGetDriverVersion(ignore=True) else: try: @@ -131,6 +157,8 @@ def get_devices(self): self.initialize() if self.device_count == 0: names = list() + if self.plaid is not None: + names = self.plaid.names elif IS_MACOS: names = [pynvx.cudaGetName(handle, ignore=True) for handle in self.handles] @@ -146,6 +174,8 @@ def get_vram(self): self.initialize() if self.device_count == 0: vram = list() + elif self.plaid: + vram = self.plaid.vram elif IS_MACOS: vram = [pynvx.cudaGetMemTotal(handle, ignore=True) / (1024 * 1024) for handle in self.handles] @@ -160,7 +190,14 @@ def get_vram(self): def get_used(self): """ Return the vram in use """ self.initialize() - if IS_MACOS: + if self.plaid: + # NB There is no useful way to get allocated VRAM on PlaidML. + # OpenCL loads and unloads VRAM as required, so this returns the global memory size + # less the maximum allowed allocation size. It's not particularly useful + vram = [self.plaid.vram[idx] - self.plaid.max_alloc[idx] + for idx in range(self.device_count)] + + elif IS_MACOS: vram = [pynvx.cudaGetMemUsed(handle, ignore=True) / (1024 * 1024) for handle in self.handles] else: @@ -175,7 +212,12 @@ def get_used(self): def get_free(self): """ Return the vram available """ self.initialize() - if IS_MACOS: + if self.plaid: + # NB There is no useful way to get free VRAM on PlaidML. + # OpenCL loads and unloads VRAM as required, so this returns the maximum allowed + # allocation size. It's not particularly useful + vram = self.plaid.max_alloc + elif IS_MACOS: vram = [pynvx.cudaGetMemFree(handle, ignore=True) / (1024 * 1024) for handle in self.handles] else: diff --git a/lib/plaidml_tools.py b/lib/plaidml_tools.py new file mode 100644 index 0000000000..9cc178ed16 --- /dev/null +++ b/lib/plaidml_tools.py @@ -0,0 +1,206 @@ +#!/usr/bin python3 + +""" PlaidML tools + + Must be kept separate from keras as the keras backend needs to be set from this module +""" + +import json +import logging +import os + +import plaidml + +_INIT = False +_LOGGER = None + + +class PlaidMLStats(): + """ Stats for plaidML """ + def __init__(self, loglevel="INFO", log=True): + if not _INIT and log: + # Logger is held internally, as we don't want to log + # when obtaining system stats on crash + global _LOGGER # pylint:disable=global-statement + _LOGGER = logging.getLogger(__name__) # pylint:disable=invalid-name + _LOGGER.debug("Initializing: %s: (loglevel: %s)", self.__class__.__name__, loglevel) + self.initialize(loglevel) + self.ctx = plaidml.Context() + self.supported_devices = self.get_supported_devices() + self.devices = self.get_all_devices() + + self.device_details = [json.loads(device.details.decode()) for device in self.devices] + if _LOGGER: + _LOGGER.debug("Initialized: %s", self.__class__.__name__) + + # PROPERTIES + @property + def active_devices(self): + """ Return the active device IDs """ + return plaidml.settings.device_ids + + @property + def device_count(self): + """ Return count of PlaidML Devices """ + return len(self.devices) + + @property + def drivers(self): + """ Return all PlaidML device drivers """ + return [device.get("driverVersion", "No Driver Found") for device in self.device_details] + + @property + def vram(self): + """ Return Total VRAM for all PlaidML Devices """ + return [int(device.get("globalMemSize", 0)) / (1024 * 1024) + for device in self.device_details] + + @property + def max_alloc(self): + """ Return Maximum allowed VRAM allocation for all PlaidML Devices """ + return [int(device.get("maxMemAllocSize", 0)) / (1024 * 1024) + for device in self.device_details] + + @property + def ids(self): + """ Return all PlaidML Device IDs """ + return [device.id.decode() for device in self.devices] + + @property + def names(self): + """ Return all PlaidML Device Names """ + return ["{} - {}".format(device.get("vendor", "unknown"), device.get("name", "unknown")) + for device in self.device_details] + + @property + def supported_indices(self): + """ Return the indices from self.devices of GPUs categorized as supported """ + retval = [idx for idx, device in enumerate(self.devices) + if device in self.supported_devices] + if _LOGGER: + _LOGGER.debug(retval) + return retval + + @property + def experimental_indices(self): + """ Return the indices from self.devices of GPUs categorized as experimental """ + retval = [idx for idx, device in enumerate(self.devices) + if device not in self.supported_devices] + if _LOGGER: + _LOGGER.debug(retval) + return retval + + # INITIALIZATION + def initialize(self, loglevel): + """ Initialize PlaidML """ + global _INIT # pylint:disable=global-statement + if _INIT: + if _LOGGER: + _LOGGER.debug("PlaidML already initialized") + return + if _LOGGER: + _LOGGER.debug("Initializing PlaidML") + self.set_plaidml_logger() + self.set_verbosity(loglevel) + _INIT = True + if _LOGGER: + _LOGGER.debug("Initialized PlaidML") + + @staticmethod + def set_plaidml_logger(): + """ Set PlaidMLs default logger to Faceswap Logger and prevent propagation """ + if _LOGGER: + _LOGGER.debug("Setting PlaidML Default Logger") + plaidml.DEFAULT_LOG_HANDLER = logging.getLogger("plaidml_root") + plaidml.DEFAULT_LOG_HANDLER.propagate = 0 + if _LOGGER: + _LOGGER.debug("Set PlaidML Default Logger") + + @staticmethod + def set_verbosity(loglevel): + """ Set the PlaidML Verbosity """ + if _LOGGER: + _LOGGER.debug("Setting PlaidML Loglevel: %s", loglevel) + numeric_level = getattr(logging, loglevel.upper(), None) + if numeric_level < 10: + # DEBUG Logging + plaidml._internal_set_vlog(1) # pylint:disable=protected-access + elif numeric_level < 20: + # INFO Logging + plaidml._internal_set_vlog(0) # pylint:disable=protected-access + else: + # WARNING Logging + plaidml.quiet() + + def get_supported_devices(self): + """ Return a list of supported devices """ + experimental_setting = plaidml.settings.experimental + plaidml.settings.experimental = False + devices, _ = plaidml.devices(self.ctx, limit=100, return_all=True) + plaidml.settings.experimental = experimental_setting + + supported = [device for device in devices + if json.loads(device.details.decode()).get("type", "cpu").lower() == "gpu"] + if _LOGGER: + _LOGGER.debug(supported) + return supported + + def get_all_devices(self): + """ Return list of supported and experimental devices """ + experimental_setting = plaidml.settings.experimental + plaidml.settings.experimental = True + devices, _ = plaidml.devices(self.ctx, limit=100, return_all=True) + plaidml.settings.experimental = experimental_setting + + experimental = [device for device in devices + if json.loads(device.details.decode()).get("type", "cpu").lower() == "gpu"] + if _LOGGER: + _LOGGER.debug("Experimental Devices: %s", experimental) + all_devices = experimental + self.supported_devices + if _LOGGER: + _LOGGER.debug(all_devices) + return all_devices + + def load_active_devices(self): + """ Load settings from PlaidML.settings.usersettings or select biggest gpu """ + if not os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member + if _LOGGER: + _LOGGER.debug("Setting largest PlaidML device") + self.set_largest_gpu() + else: + if _LOGGER: + _LOGGER.debug("Setting PlaidML devices from user_settings") + + def set_largest_gpu(self): + """ Get a supported GPU with largest VRAM. If no supported, get largest experimental """ + category = "supported" if self.supported_devices else "experimental" + if _LOGGER: + _LOGGER.debug("Obtaining largest %s device", category) + indices = getattr(self, "{}_indices".format(category)) + max_vram = max([self.vram[idx] for idx in indices]) + if _LOGGER: + _LOGGER.debug("Max VRAM: %s", max_vram) + gpu_idx = min([idx for idx, vram in enumerate(self.vram) + if vram == max_vram and idx in indices]) + if _LOGGER: + _LOGGER.debug("GPU IDX: %s", gpu_idx) + + selected_gpu = self.ids[gpu_idx] + if _LOGGER: + _LOGGER.info("Setting GPU to largest available %s device. If you want to override " + "this selection, run `plaidml-setup` from the command line.", category) + + plaidml.settings.experimental = category == "experimental" + plaidml.settings.device_ids = [selected_gpu] + + +def setup_plaidml(loglevel): + """ Setup plaidml for AMD Cards """ + logger = logging.getLogger(__name__) # pylint:disable=invalid-name + logger.info("Setting up for PlaidML") + logger.verbose("Setting Keras Backend to PlaidML") + os.environ["KERAS_BACKEND"] = "plaidml.keras.backend" + plaid = PlaidMLStats(loglevel) + plaid.load_active_devices() + logger.info("Using GPU: %s", plaid.active_devices) + logger.info("Successfully set up for PlaidML") diff --git a/lib/sysinfo.py b/lib/sysinfo.py index 815560388a..bef8061d8a 100644 --- a/lib/sysinfo.py +++ b/lib/sysinfo.py @@ -194,7 +194,7 @@ def cudnn_version(self): break if not cudnn_checkfile: - retval = "Not Found" + retval = "No global version found" if self.is_conda: retval += ". Check Conda packages for Conda cuDNN" return retval @@ -214,7 +214,7 @@ def cudnn_version(self): if found == 3: break if found != 3: - retval = "Not Found" + retval = "No global version found" if self.is_conda: retval += ". Check Conda packages for Conda cuDNN" return retval @@ -286,7 +286,7 @@ def cuda_version_linux(self): if chk: break if not chk: - retval = "Not Found" + retval = "No global version found" if self.is_conda: retval += ". Check Conda packages for Conda Cuda" return retval @@ -299,7 +299,7 @@ def cuda_version_windows(self): for key in os.environ.keys() if key.lower().startswith("cuda_path_v")] if not cuda_keys: - retval = "Not Found" + retval = "No global version found" if self.is_conda: retval += ". Check Conda packages for Conda Cuda" return retval diff --git a/lib/utils.py b/lib/utils.py index 34c150b808..a4e2c0fc87 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -3,6 +3,7 @@ import logging import os +import sys import urllib import warnings import zipfile @@ -18,11 +19,8 @@ from tqdm import tqdm from lib.faces_detect import BoundingBox, DetectedFace -from lib.logger import get_loglevel -logger = logging.getLogger(__name__) # pylint: disable=invalid-name - # Global variables _image_extensions = [ # pylint: disable=invalid-name ".bmp", ".jpeg", ".jpg", ".png", ".tif", ".tiff"] @@ -32,6 +30,7 @@ def get_folder(path, make_folder=True): """ Return a path to a folder, creating it if it doesn't exist """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.debug("Requested path: '%s'", path) output_dir = Path(path) if not make_folder and not output_dir.exists(): @@ -44,6 +43,7 @@ def get_folder(path, make_folder=True): def get_image_paths(directory): """ Return a list of images that reside in a folder """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name image_extensions = _image_extensions dir_contents = list() @@ -67,6 +67,7 @@ def get_image_paths(directory): def full_path_split(path): """ Split a given path into all of it's separate components """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name allparts = list() while True: parts = os.path.split(path) @@ -88,6 +89,7 @@ def cv2_read_img(filename, raise_error=False): Logs an error if the image returned is None. or an error has occured. Pass raise_error=True if error should be raised """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.trace("Requested image: '%s'", filename) success = True image = None @@ -120,6 +122,7 @@ def cv2_read_img(filename, raise_error=False): def hash_image_file(filename): """ Return an image file's sha1 hash """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name img = cv2_read_img(filename, raise_error=True) img_hash = sha1(img).hexdigest() logger.trace("filename: '%s', hash: %s", filename, img_hash) @@ -137,6 +140,7 @@ def hash_encode_image(image, extension): def backup_file(directory, filename): """ Backup a given file by appending .bk to the end """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.trace("Backing up: '%s'", filename) origfile = os.path.join(directory, filename) backupfile = origfile + '.bk' @@ -148,6 +152,16 @@ def backup_file(directory, filename): os.rename(origfile, backupfile) +def keras_backend_quiet(): + """ Suppresses the "Using x backend" message when importing + backend from keras """ + stderr = sys.stderr + sys.stderr = open(os.devnull, 'w') + from keras import backend as K + sys.stderr = stderr + return K + + def set_system_verbosity(loglevel): """ Set the verbosity level of tensorflow and suppresses future and deprecation warnings from any modules @@ -159,6 +173,8 @@ def set_system_verbosity(loglevel): 2 - filter out WARNING logs 3 - filter out ERROR logs """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name + from lib.logger import get_loglevel numeric_level = get_loglevel(loglevel) loglevel = "2" if numeric_level > 15 else "0" logger.debug("System Verbosity level: %s", loglevel) @@ -173,6 +189,7 @@ def rotate_landmarks(face, rotation_matrix): """ Rotate the landmarks and bounding box for faces found in rotated images. Pass in a DetectedFace object, Alignments dict or BoundingBox""" + logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.trace("Rotating landmarks: (rotation_matrix: %s, type(face): %s", rotation_matrix, type(face)) if isinstance(face, DetectedFace): @@ -259,6 +276,7 @@ def camel_case_split(identifier): def safe_shutdown(): """ Close queues, threads and processes in event of crash """ + logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.debug("Safely shutting down") from lib.queue_manager import queue_manager from lib.multithreading import terminate_processes @@ -295,6 +313,7 @@ class GetModel(): """ def __init__(self, model_filename, cache_dir, git_model_id): + self.logger = logging.getLogger(__name__) # pylint: disable=invalid-name if not isinstance(model_filename, list): model_filename = [model_filename] self.model_filename = model_filename @@ -311,21 +330,21 @@ def _model_full_name(self): """ Return the model full name from the filename(s) """ common_prefix = os.path.commonprefix(self.model_filename) retval = os.path.splitext(common_prefix)[0] - logger.trace(retval) + self.logger.trace(retval) return retval @property def _model_name(self): """ Return the model name from the model full name """ retval = self._model_full_name[:self._model_full_name.rfind("_")] - logger.trace(retval) + self.logger.trace(retval) return retval @property def _model_version(self): """ Return the model version from the model full name """ retval = int(self._model_full_name[self._model_full_name.rfind("_") + 2:]) - logger.trace(retval) + self.logger.trace(retval) return retval @property @@ -333,14 +352,14 @@ def _model_path(self): """ Return the model path(s) in the cache folder """ retval = [os.path.join(self.cache_dir, fname) for fname in self.model_filename] retval = retval[0] if len(retval) == 1 else retval - logger.trace(retval) + self.logger.trace(retval) return retval @property def _model_zip_path(self): """ Full path to downloaded zip file """ retval = os.path.join(self.cache_dir, "{}.zip".format(self._model_full_name)) - logger.trace(retval) + self.logger.trace(retval) return retval @property @@ -350,7 +369,7 @@ def _model_exists(self): retval = all(os.path.exists(pth) for pth in self._model_path) else: retval = os.path.exists(self._model_path) - logger.trace(retval) + self.logger.trace(retval) return retval @property @@ -359,7 +378,7 @@ def _plugin_section(self): path = os.path.normpath(self.cache_dir) split = path.split(os.sep) retval = split[split.index("plugins") + 1] - logger.trace(retval) + self.logger.trace(retval) return retval @property @@ -367,7 +386,7 @@ def _url_section(self): """ Return the section ID in github for this plugin type """ sections = dict(extract=1, train=2, convert=3) retval = sections[self._plugin_section] - logger.trace(retval) + self.logger.trace(retval) return retval @property @@ -375,7 +394,7 @@ def _url_download(self): """ Base URL for models """ tag = "v{}.{}.{}".format(self._url_section, self.git_model_id, self._model_version) retval = "{}/{}/{}.zip".format(self.url_base, tag, self._model_full_name) - logger.trace("Download url: %s", retval) + self.logger.trace("Download url: %s", retval) return retval @property @@ -383,13 +402,13 @@ def _url_partial_size(self): """ Return how many bytes have already been downloaded """ zip_file = self._model_zip_path retval = os.path.getsize(zip_file) if os.path.exists(zip_file) else 0 - logger.trace(retval) + self.logger.trace(retval) return retval def get(self): """ Check the model exists, if not, download and unzip into location """ if self._model_exists: - logger.debug("Model exists: %s", self._model_path) + self.logger.debug("Model exists: %s", self._model_path) return self.download_model() self.unzip_model() @@ -397,7 +416,7 @@ def get(self): def download_model(self): """ Download model zip to cache dir """ - logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download) + self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download) for attempt in range(self.retries): try: downloaded_size = self._url_partial_size @@ -405,28 +424,29 @@ def download_model(self): if downloaded_size != 0: req.add_header("Range", "bytes={}-".format(downloaded_size)) response = urllib.request.urlopen(req, timeout=10) - logger.debug("header info: {%s}", response.info()) - logger.debug("Return Code: %s", response.getcode()) + self.logger.debug("header info: {%s}", response.info()) + self.logger.debug("Return Code: %s", response.getcode()) self.write_zipfile(response, downloaded_size) break except (socket_error, socket_timeout, urllib.error.HTTPError, urllib.error.URLError) as err: if attempt + 1 < self.retries: - logger.warning("Error downloading model (%s). Retrying %s of %s...", - str(err), attempt + 2, self.retries) + self.logger.warning("Error downloading model (%s). Retrying %s of %s...", + str(err), attempt + 2, self.retries) else: - logger.error("Failed to download model. Exiting. (Error: '%s', URL: '%s')", - str(err), self._url_download) - logger.info("You can try running again to resume the download.") - logger.info("Alternatively, you can manually download the model from: %s and " - "unzip the contents to: %s", self._url_download, self.cache_dir) + self.logger.error("Failed to download model. Exiting. (Error: '%s', URL: " + "'%s')", str(err), self._url_download) + self.logger.info("You can try running again to resume the download.") + self.logger.info("Alternatively, you can manually download the model from: %s " + "and unzip the contents to: %s", + self._url_download, self.cache_dir) exit(1) def write_zipfile(self, response, downloaded_size): """ Write the model zip file to disk """ length = int(response.getheader("content-length")) + downloaded_size if length == downloaded_size: - logger.info("Zip already exists. Skipping download") + self.logger.info("Zip already exists. Skipping download") return write_type = "wb" if downloaded_size == 0 else "ab" with open(self._model_zip_path, write_type) as out_file: @@ -446,23 +466,23 @@ def write_zipfile(self, response, downloaded_size): def unzip_model(self): """ Unzip the model file to the cachedir """ - logger.info("Extracting: '%s'", self._model_name) + self.logger.info("Extracting: '%s'", self._model_name) try: zip_file = zipfile.ZipFile(self._model_zip_path, "r") self.write_model(zip_file) except Exception as err: # pylint:disable=broad-except - logger.error("Unable to extract model file: %s", str(err)) + self.logger.error("Unable to extract model file: %s", str(err)) exit(1) def write_model(self, zip_file): """ Extract files from zipfile and write, with progress bar """ length = sum(f.file_size for f in zip_file.infolist()) fnames = zip_file.namelist() - logger.debug("Zipfile: Filenames: %s, Total Size: %s", fnames, length) + self.logger.debug("Zipfile: Filenames: %s, Total Size: %s", fnames, length) pbar = tqdm(desc="Extracting", unit="B", total=length, unit_scale=True, unit_divisor=1024) for fname in fnames: out_fname = os.path.join(self.cache_dir, fname) - logger.debug("Extracting from: '%s' to '%s'", self._model_zip_path, out_fname) + self.logger.debug("Extracting from: '%s' to '%s'", self._model_zip_path, out_fname) zipped = zip_file.open(fname) with open(out_fname, "wb") as out_file: while True: diff --git a/scripts/train.py b/scripts/train.py index 50c0e42e97..cdef9d3214 100644 --- a/scripts/train.py +++ b/scripts/train.py @@ -241,13 +241,15 @@ def monitor(self, thread): """ Monitor the console, and generate + monitor preview if requested """ is_preview = self.args.preview logger.debug("Launching Monitor") - logger.info("R|===============================================") - logger.info("R|- Starting -") + logger.info("R|===================================================") + logger.info("R| Starting") if is_preview: - logger.info("R|- Using live preview -") - logger.info("R|- Press 'ENTER' to save and quit -") - logger.info("R|- Press 'S' to save model weights immediately -") - logger.info("R|===============================================") + logger.info("R| Using live preview") + logger.info("R| Press '%s' to save and quit", + "Terminate" if self.args.redirect_gui else "ENTER") + if not self.args.redirect_gui: + logger.info("R| Press 'S' to save model weights immediately") + logger.info("R|===================================================") keypress = KBHit(is_gui=self.args.redirect_gui) err = False diff --git a/setup.py b/setup.py index 7df42bdaf2..eb7d8c2526 100755 --- a/setup.py +++ b/setup.py @@ -28,6 +28,7 @@ def __init__(self): self.cuda_path = "" self.cuda_version = "" self.cudnn_version = "" + self.enable_amd = False self.enable_docker = False self.enable_cuda = False self.required_packages = self.get_required_packages() @@ -101,8 +102,10 @@ def process_arguments(self): for arg in argv: if arg == "--installer": self.is_installer = True - if arg == "--gpu": + if arg == "--nvidia": self.enable_cuda = True + if arg == "--amd": + self.enable_amd = True @staticmethod def get_required_packages(): @@ -207,7 +210,7 @@ def update_tf_dep(self): return if not self.enable_cuda: - self.required_packages.append("tensorflow") + self.required_packages.append("tensorflow==1.13.1") return tf_ver = None @@ -257,6 +260,11 @@ def update_tf_dep_conda(self): else: self.required_packages.append("tensorflow-gpu==1.13.1") + def update_amd_dep(self): + """ Update amd dependency for AMD cards """ + if self.enable_amd: + self.required_packages.append("plaidml-keras") + class Output(): """ Format and display output """ @@ -313,11 +321,14 @@ def __init__(self, environment): # Checks not required for installer if self.env.is_installer: self.env.update_tf_dep() + self.env.update_amd_dep() return - # Ask Docker/Cuda - self.docker_ask_enable() - self.cuda_ask_enable() + # Ask AMD/Docker/Cuda + self.amd_ask_enable() + if not self.env.enable_amd: + self.docker_ask_enable() + self.cuda_ask_enable() if self.env.os_version[0] != "Linux" and self.env.enable_docker and self.env.enable_cuda: self.docker_confirm() if self.env.enable_docker: @@ -336,9 +347,22 @@ def __init__(self, environment): self.env.cuda_version = input("Manually specify CUDA version: ") self.env.update_tf_dep() + self.env.update_amd_dep() if self.env.os_version[0] == "Windows": self.tips.pip() + def amd_ask_enable(self): + """ Enable or disable Plaidml for AMD""" + self.output.info("AMD Support: AMD GPU support is currently limited.\r\n" + "Nvidia Users MUST answer 'no' to this option.") + i = input("Enable AMD Support? [y/N] ") + if i in ("Y", "y"): + self.output.info("AMD Support Enabled") + self.env.enable_amd = True + else: + self.output.info("AMD Support Disabled") + self.env.enable_amd = False + def docker_ask_enable(self): """ Enable or disable Docker """ i = input("Enable Docker? [y/N] ") @@ -584,7 +608,7 @@ def conda_installer(self, package, channel=None, verbose=False, conda_only=False run(condaexe, stdout=devnull, stderr=devnull, check=True) except CalledProcessError: if not conda_only: - self.output.info("Couldn't install {} with Conda. Trying pip".format(package)) + self.output.info("{} not available in Conda. Installing with pip".format(package)) else: self.output.warning("Couldn't install {} with Conda. " "Please install this package manually".format(package))