From 68281bb65f8388870a9c3ba43fcaf9a0ced0fe28 Mon Sep 17 00:00:00 2001 From: tazlin Date: Wed, 4 Oct 2023 15:35:22 -0400 Subject: [PATCH 1/4] fix: better signal to comfyui lowvram mode is desired --- hordelib/comfy_horde.py | 1 + requirements.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hordelib/comfy_horde.py b/hordelib/comfy_horde.py index 0d568a02..40bf6e6c 100644 --- a/hordelib/comfy_horde.py +++ b/hordelib/comfy_horde.py @@ -146,6 +146,7 @@ def always_cpu(parameters, dtype): comfy.model_management.unet_inital_load_device = always_cpu comfy.model_management.DISABLE_SMART_MEMORY = True + comfy.model_management.lowvram_available = True total_vram = get_torch_total_vram_mb() total_ram = psutil.virtual_memory().total / (1024 * 1024) diff --git a/requirements.txt b/requirements.txt index 060d089a..593e6b34 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ torchsde einops open-clip-torch transformers>=4.25.1 -safetensors +safetensors>=0.3.0 pytorch_lightning pynvml aiohttp From ed3120a23d2986564bf9aff9605a0f9576d71e16 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 5 Oct 2023 12:29:51 -0400 Subject: [PATCH 2/4] fix: more relaxed memory management (allows high vram?) --- hordelib/comfy_horde.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/hordelib/comfy_horde.py b/hordelib/comfy_horde.py index 40bf6e6c..e8591047 100644 --- a/hordelib/comfy_horde.py +++ b/hordelib/comfy_horde.py @@ -138,15 +138,19 @@ def do_comfy_import(): import comfy.model_management - comfy.model_management.vram_state = comfy.model_management.VRAMState.LOW_VRAM - comfy.model_management.set_vram_to = comfy.model_management.VRAMState.LOW_VRAM + # comfy.model_management.vram_state = comfy.model_management.VRAMState.HIGH_VRAM + # comfy.model_management.set_vram_to = comfy.model_management.VRAMState.HIGH_VRAM - def always_cpu(parameters, dtype): - return torch.device("cpu") + logger.info("Comfy_Horde initialised") - comfy.model_management.unet_inital_load_device = always_cpu + # def always_cpu(parameters, dtype): + # return torch.device("cpu") + + # comfy.model_management.unet_inital_load_device = always_cpu comfy.model_management.DISABLE_SMART_MEMORY = True - comfy.model_management.lowvram_available = True + # comfy.model_management.lowvram_available = True + + # comfy.model_management.unet_offload_device = _unet_offload_device_hijack total_vram = get_torch_total_vram_mb() total_ram = psutil.virtual_memory().total / (1024 * 1024) @@ -163,27 +167,33 @@ def always_cpu(parameters, dtype): def cleanup(): - logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy") - _comfy_soft_empty_cache() - logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy") def unload_all_models_vram(): from hordelib.shared_model_manager import SharedModelManager + logger.debug("In unload_all_models_vram") + logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy") # _comfy_free_memory(_comfy_get_total_memory(), _comfy_get_torch_device()) with torch.no_grad(): - for model in _comfy_current_loaded_models: - model.model_unload() - _comfy_soft_empty_cache() + try: + for model in _comfy_current_loaded_models: + model.model_unload() + _comfy_soft_empty_cache() + except Exception as e: + logger.error(f"Exception during comfy unload: {e}") + _comfy_cleanup_models() + _comfy_soft_empty_cache() logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy") def unload_all_models_ram(): from hordelib.shared_model_manager import SharedModelManager + logger.debug("In unload_all_models_ram") + SharedModelManager.manager._models_in_ram = {} logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy") with torch.no_grad(): From d8961bb2bd20266e970c31ced8a008c218a05817 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 5 Oct 2023 12:30:19 -0400 Subject: [PATCH 3/4] chore: latest comfyui version --- hordelib/consts.py | 7 +------ hordelib/nodes/node_model_loader.py | 2 ++ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/hordelib/consts.py b/hordelib/consts.py index 98fdb9eb..51f71ebe 100644 --- a/hordelib/consts.py +++ b/hordelib/consts.py @@ -6,7 +6,7 @@ from hordelib.config_path import get_hordelib_path -COMFYUI_VERSION = "099226015edcdf595d915f0f39359ef0d83fbba6" +COMFYUI_VERSION = "0e763e880f5e838e7a1e3914444cae6790c48627" """The exact version of ComfyUI version to load.""" REMOTE_PROXY = "" @@ -22,11 +22,6 @@ class HordeSupportedBackends(Enum): ComfyUI = auto() -# Models Excluded from hordelib (for now) # FIXME -# this could easily be a json file on the AI-Horde-image-model-reference repo -EXCLUDED_MODEL_NAMES = ["pix2pix"] - - class MODEL_CATEGORY_NAMES(StrEnum): """Look up str enum for the categories of models (compvis, controlnet, clip, etc...).""" diff --git a/hordelib/nodes/node_model_loader.py b/hordelib/nodes/node_model_loader.py index 588db649..2ece4707 100644 --- a/hordelib/nodes/node_model_loader.py +++ b/hordelib/nodes/node_model_loader.py @@ -59,6 +59,8 @@ def load_checkpoint( same_loaded_model[0][0].model.apply(make_regular) make_regular_vae(same_loaded_model[0][2]) + logger.debug("Model was previously loaded, returning it.") + return same_loaded_model[0] if not ckpt_name: From e6955eaf43ead5d02e4d074e74c3c686b8a34353 Mon Sep 17 00:00:00 2001 From: tazlin Date: Thu, 5 Oct 2023 12:30:50 -0400 Subject: [PATCH 4/4] feat: use torch 2.1 + CU121 --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 593e6b34..9e8951b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ # Add this in for tox, comment out for build ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://download.pytorch.org/whl/cu121 horde_sdk>=0.7.10 horde_model_reference>=0.5.2 pydantic -torch>=2.0.0 -xformers>=0.0.19 +torch>=2.1.0 +# xformers>=0.0.19 torchvision torchaudio torchdiffeq