Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(runner): add support for SDXL-Lightning in image-to-image pipelines #40

Merged
merged 3 commits into from
Mar 15, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 74 additions & 8 deletions runner/app/pipelines/image_to_image.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
from app.pipelines.base import Pipeline
from app.pipelines.util import get_torch_device, get_model_dir

from diffusers import AutoPipelineForImage2Image
from huggingface_hub import file_download
from diffusers import (
AutoPipelineForImage2Image,
StableDiffusionXLPipeline,
UNet2DConditionModel,
EulerDiscreteScheduler,
)
from safetensors.torch import load_file
from huggingface_hub import file_download, hf_hub_download
import torch
import PIL
from typing import List
Expand All @@ -15,6 +21,8 @@

logger = logging.getLogger(__name__)

SDXL_LIGHTNING_MODEL_ID = "ByteDance/SDXL-Lightning"


class ImageToImagePipeline(Pipeline):
def __init__(self, model_id: str):
Expand All @@ -25,10 +33,13 @@ def __init__(self, model_id: str):
repo_id=model_id, repo_type="model"
)
folder_path = os.path.join(get_model_dir(), folder_name)
has_fp16_variant = any(
".fp16.safetensors" in fname
for _, _, files in os.walk(folder_path)
for fname in files
has_fp16_variant = (
any(
".fp16.safetensors" in fname
for _, _, files in os.walk(folder_path)
for fname in files
)
or SDXL_LIGHTNING_MODEL_ID in model_id
)
if torch_device != "cpu" and has_fp16_variant:
logger.info("ImageToImagePipeline loading fp16 variant for %s", model_id)
Expand All @@ -37,8 +48,49 @@ def __init__(self, model_id: str):
kwargs["variant"] = "fp16"

self.model_id = model_id
self.ldm = AutoPipelineForImage2Image.from_pretrained(model_id, **kwargs)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've made some minor changes here. First, I combine the pipeline initialization and GPU allocation steps in one line. Moreover, I've transitioned to using torch_device variable.

self.ldm.to(get_torch_device())

# Special case SDXL-Lightning because the unet for SDXL needs to be swapped
if SDXL_LIGHTNING_MODEL_ID in model_id:
base = "stabilityai/stable-diffusion-xl-base-1.0"

# ByteDance/SDXL-Lightning-2step
if "2step" in model_id:
unet_id = "sdxl_lightning_2step_unet"
# ByteDance/SDXL-Lightning-4step
elif "4step" in model_id:
unet_id = "sdxl_lightning_4step_unet"
# ByteDance/SDXL-Lightning-8step
elif "8step" in model_id:
unet_id = "sdxl_lightning_8step_unet"
else:
# Default to 2step
unet_id = "sdxl_lightning_2step_unet"

unet = UNet2DConditionModel.from_config(
base, subfolder="unet", cache_dir=kwargs["cache_dir"]
).to(torch_device, kwargs["torch_dtype"])
unet.load_state_dict(
load_file(
hf_hub_download(
SDXL_LIGHTNING_MODEL_ID,
f"{unet_id}.safetensors",
cache_dir=kwargs["cache_dir"],
),
device=str(torch_device),
)
)

self.ldm = StableDiffusionXLPipeline.from_pretrained(
base, unet=unet, **kwargs
).to(torch_device)

self.ldm.scheduler = EulerDiscreteScheduler.from_config(
self.ldm.scheduler.config, timestep_spacing="trailing"
)
else:
self.ldm = AutoPipelineForImage2Image.from_pretrained(
model_id, **kwargs
).to(torch_device)

if os.environ.get("SFAST"):
logger.info(
Expand Down Expand Up @@ -76,6 +128,20 @@ def __call__(self, prompt: str, image: PIL.Image, **kwargs) -> List[PIL.Image]:

if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = 2
elif SDXL_LIGHTNING_MODEL_ID in self.model_id:
# SDXL-Lightning models should have guidance_scale = 0 and use
# the correct number of inference steps for the unet checkpoint loaded
kwargs["guidance_scale"] = 0.0

if "2step" in self.model_id:
kwargs["num_inference_steps"] = 2
elif "4step" in self.model_id:
kwargs["num_inference_steps"] = 4
elif "8step" in self.model_id:
kwargs["num_inference_steps"] = 8
else:
# Default to 2step
kwargs["num_inference_steps"] = 2

return self.ldm(prompt, image=image, **kwargs).images

Expand Down