Skip to content

Commit

Permalink
Merge branch 'master' into node_expansion
Browse files Browse the repository at this point in the history
  • Loading branch information
guill committed Sep 21, 2023
2 parents 04ebc9f + 4d41bd5 commit a2a537c
Show file tree
Hide file tree
Showing 43 changed files with 1,252 additions and 150 deletions.
31 changes: 31 additions & 0 deletions .github/workflows/test-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Build package

#
# This workflow is a test of the python package build.
# Install Python dependencies across different Python versions.
#

on:
push:
paths:
- "requirements.txt"
- ".github/workflows/test-build.yml"

jobs:
build:
name: Build Test
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
18 changes: 9 additions & 9 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
__pycache__/
*.py[cod]
output/
input/
!input/example.png
models/
temp/
custom_nodes/
/output/
/input/
!/input/example.png
/models/
/temp/
/custom_nodes/
!custom_nodes/example_node.py.example
extra_model_paths.yaml
/.vs
.idea/
venv/
web/extensions/*
!web/extensions/logging.js.example
!web/extensions/core/
/web/extensions/*
!/web/extensions/logging.js.example
!/web/extensions/core/
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you

See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor.

## Colab Notebook
## Jupyter Notebook

To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb)

## Manual Install (Windows, Linux)

Expand Down
7 changes: 5 additions & 2 deletions comfy/cli_args.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import argparse
import enum

import comfy.options

class EnumAction(argparse.Action):
"""
Expand Down Expand Up @@ -98,7 +98,10 @@ class LatentPreviewMethod(enum.Enum):

parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")

args = parser.parse_args()
if comfy.options.args_parsing:
args = parser.parse_args()
else:
args = parser.parse_args([])

if args.windows_standalone_build:
args.auto_launch = True
Expand Down
9 changes: 7 additions & 2 deletions comfy/clip_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,17 @@ def encode_image(self, image):
precision_scope = lambda a, b: contextlib.nullcontext(a)

with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32):
outputs = self.model(pixel_values=pixel_values)
outputs = self.model(pixel_values=pixel_values, output_hidden_states=True)

for k in outputs:
t = outputs[k]
if t is not None:
outputs[k] = t.cpu()
if k == 'hidden_states':
outputs["penultimate_hidden_states"] = t[-2].cpu()
outputs["hidden_states"] = None
else:
outputs[k] = t.cpu()

return outputs

def convert_to_transformers(sd, prefix):
Expand Down
14 changes: 11 additions & 3 deletions comfy/controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,10 +449,18 @@ def copy(self):
return c

def load_t2i_adapter(t2i_data):
keys = t2i_data.keys()
if 'adapter' in keys:
if 'adapter' in t2i_data:
t2i_data = t2i_data['adapter']
keys = t2i_data.keys()
if 'adapter.body.0.resnets.0.block1.weight' in t2i_data: #diffusers format
prefix_replace = {}
for i in range(4):
for j in range(2):
prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j)
prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2)
prefix_replace["adapter."] = ""
t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace)
keys = t2i_data.keys()

if "body.0.in_conv.weight" in keys:
cin = t2i_data['body.0.in_conv.weight'].shape[1]
model_ad = comfy.t2i_adapter.adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
Expand Down
31 changes: 31 additions & 0 deletions comfy/k_diffusion/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,3 +706,34 @@ def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disab
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)


def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
alpha_cumprod = 1 / ((sigma * sigma) + 1)
alpha_cumprod_prev = 1 / ((sigma_prev * sigma_prev) + 1)
alpha = (alpha_cumprod / alpha_cumprod_prev)

mu = (1.0 / alpha).sqrt() * (x - (1 - alpha) * noise / (1 - alpha_cumprod).sqrt())
if sigma_prev > 0:
mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev)
return mu


def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])

for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
x = step_function(x / torch.sqrt(1.0 + sigmas[i] ** 2.0), sigmas[i], sigmas[i + 1], (x - denoised) / sigmas[i], noise_sampler)
if sigmas[i + 1] != 0:
x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2.0)
return x


@torch.no_grad()
def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)

4 changes: 4 additions & 0 deletions comfy/latent_formats.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@

class LatentFormat:
scale_factor = 1.0
latent_rgb_factors = None
taesd_decoder_name = None

def process_in(self, latent):
return latent * self.scale_factor

Expand Down
3 changes: 1 addition & 2 deletions comfy/ldm/models/diffusion/ddim.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def make_schedule_timesteps(self, ddim_timesteps, ddim_eta=0., verbose=True):
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device)

self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))

Expand Down Expand Up @@ -195,7 +194,7 @@ def ddim_sampling(self, cond, shape,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None, disable_pbar=False):
device = self.model.betas.device
device = self.model.alphas_cumprod.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
Expand Down
3 changes: 1 addition & 2 deletions comfy/ldm/modules/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,8 +323,7 @@ def forward(self, x, context=None, value=None, mask=None):
break
except model_management.OOM_EXCEPTION as e:
if first_op_done == False:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
model_management.soft_empty_cache(True)
if cleared_cache == False:
cleared_cache = True
print("out of memory error, emptying cache and trying again")
Expand Down
1 change: 1 addition & 0 deletions comfy/ldm/modules/diffusionmodules/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ def slice_attention(q, k, v):
del s2
break
except model_management.OOM_EXCEPTION as e:
model_management.soft_empty_cache(True)
steps *= 2
if steps > 128:
raise e
Expand Down
Loading

0 comments on commit a2a537c

Please sign in to comment.