Skip to content

Commit

Permalink
Merge pull request #310 from Haidra-Org/main
Browse files Browse the repository at this point in the history
fix: unload from RAM as intended; force empty cache
  • Loading branch information
tazlin authored Aug 24, 2024
2 parents 9be80a1 + a6ec17d commit a05b763
Showing 1 changed file with 37 additions and 9 deletions.
46 changes: 37 additions & 9 deletions hordelib/comfy_horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,35 +247,61 @@ def IsChangedCache_get_hijack(self, *args, **kwargs):


def unload_all_models_vram():
log_free_ram()

from hordelib.shared_model_manager import SharedModelManager

logger.debug("In unload_all_models_vram")
logger.debug(f"{len(SharedModelManager.manager._models_in_ram)} models cached in shared model manager")

logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy")
# _comfy_free_memory(_comfy_get_total_memory(), _comfy_get_torch_device())

logger.debug("Freeing memory on all devices")
_comfy_free_memory(1e30, _comfy_get_torch_device())
log_free_ram()

logger.debug("Cleaning up models")
with torch.no_grad():
try:
for model in _comfy_current_loaded_models:
model.model_unload()
_comfy_soft_empty_cache()
_comfy_soft_empty_cache(True)
log_free_ram()
except Exception as e:
logger.error(f"Exception during comfy unload: {e}")
_comfy_cleanup_models()
_comfy_soft_empty_cache()
_comfy_soft_empty_cache(True)

logger.debug(f"{len(SharedModelManager.manager._models_in_ram)} models cached in shared model manager")
logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy")


def unload_all_models_ram():
log_free_ram()
from hordelib.shared_model_manager import SharedModelManager

logger.debug("In unload_all_models_ram")
logger.debug(f"{len(SharedModelManager.manager._models_in_ram)} models cached in shared model manager")

SharedModelManager.manager._models_in_ram = {}
logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy")
all_devices = set()
for model in _comfy_current_loaded_models:
all_devices.add(model.device)

with torch.no_grad():
_comfy_free_memory(_comfy_get_total_memory(), _comfy_get_torch_device())
for device in all_devices:
logger.debug(f"Freeing memory on device {device}")
_comfy_free_memory(1e30, device)
log_free_ram()

logger.debug("Cleaning up models")
_comfy_cleanup_models()
_comfy_soft_empty_cache()
log_free_ram()

logger.debug("Soft emptying cache")
_comfy_soft_empty_cache(True)
log_free_ram()

logger.debug(f"{len(SharedModelManager.manager._models_in_ram)} models cached in shared model manager")
logger.debug(f"{len(_comfy_current_loaded_models)} models loaded in comfy")


Expand All @@ -292,8 +318,10 @@ def get_torch_free_vram_mb():


def log_free_ram():
logger.debug(f"Free VRAM: {get_torch_free_vram_mb():0.0f} MB")
logger.debug(f"Free RAM: {psutil.virtual_memory().available / (1024 * 1024):0.0f} MB")
logger.debug(
f"Free VRAM: {get_torch_free_vram_mb():0.0f} MB, "
f"Free RAM: {psutil.virtual_memory().available / (1024 * 1024):0.0f} MB",
)


class Comfy_Horde:
Expand Down

0 comments on commit a05b763

Please sign in to comment.