Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
echarlaix committed Oct 19, 2023
1 parent 057e576 commit 8af29e8
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 16 deletions.
8 changes: 4 additions & 4 deletions optimum/onnxruntime/modeling_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@
from ..pipelines.diffusers.pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipelineMixin
from ..pipelines.diffusers.pipeline_stable_diffusion_xl import StableDiffusionXLPipelineMixin
from ..pipelines.diffusers.pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipelineMixin
from ..pipelines.diffusers.pipeline_stable_diffusion_latent_consistency import LatentConsistencyModelPipelinePipelineMixin
from ..pipelines.diffusers.pipeline_stable_diffusion_latent_consistency import (
LatentConsistencyModelPipelinePipelineMixin,
)
from ..pipelines.diffusers.pipeline_utils import VaeImageProcessor
from ..utils import (
DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
Expand Down Expand Up @@ -565,18 +567,16 @@ class ORTStableDiffusionInpaintPipeline(ORTStableDiffusionPipelineBase, StableDi
__call__ = StableDiffusionInpaintPipelineMixin.__call__



@add_end_docstrings(ONNX_MODEL_END_DOCSTRING)
class ORTLatentConsistencyModelPipeline(ORTStableDiffusionPipelineBase, LatentConsistencyModelPipelinePipelineMixin):
"""
ONNX Runtime-powered stable diffusion pipeline corresponding to [diffusers.LatentConsistencyModelPipeline](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_consistency#diffusers.LatentConsistencyModelPipeline).
"""

auto_model_class = LatentConsistencyModelPipeline
__call__ = LatentConsistencyModelPipelinePipelineMixin.__call__




class ORTStableDiffusionXLPipelineBase(ORTStableDiffusionPipelineBase):
auto_model_class = StableDiffusionXLImg2ImgPipeline

Expand Down
23 changes: 12 additions & 11 deletions optimum/pipelines/diffusers/pipeline_latent_consistency.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,11 @@


class LatentConsistencyModelPipelinePipelineMixin(StableDiffusionPipelineMixin):

# Adapted from https://github.com/huggingface/diffusers/blob/v0.22.0/src/diffusers/pipelines/latent_consistency/pipeline_latent_consistency.py#L264
def __call__(
self,
prompt: Optional[Union[str, List[str]]] = None,
height: Optional[int] = 768, # TODO : default to None
height: Optional[int] = 768, # TODO : default to None
width: Optional[int] = 768,
num_inference_steps: int = 4,
guidance_scale: float = 7.5,
Expand Down Expand Up @@ -110,10 +109,9 @@ def __call__(
height = height or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
width = width or self.unet.config.get("sample_size", 64) * self.vae_scale_factor


# Don't need to get negative prompts due to LCM guided distillation
negative_prompt=None
negative_prompt_embeds=None
# Don't need to get negative prompts due to LCM guided distillation
negative_prompt = None
negative_prompt_embeds = None

# check inputs. Raise error if not correct
self.check_inputs(
Expand Down Expand Up @@ -164,10 +162,14 @@ def __call__(

num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
for i, t in enumerate(self.progress_bar(timesteps)):

# predict the noise residual
timestep = np.full(bs, t, dtype=timestep_dtype)
noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, timestep_cond=w_embedding)
noise_pred = self.unet(
sample=latent_model_input,
timestep=timestep,
encoder_hidden_states=prompt_embeds,
timestep_cond=w_embedding,
)
noise_pred = noise_pred[0]

# compute the previous noisy sample x_t -> x_t-1
Expand Down Expand Up @@ -202,20 +204,19 @@ def __call__(

return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)


# Adapted from https://github.com/huggingface/diffusers/blob/v0.22.0/src/diffusers/pipelines/latent_consistency/pipeline_latent_consistency.py#L264
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=None):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
Expand Down
2 changes: 1 addition & 1 deletion optimum/utils/input_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,7 +710,7 @@ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int
if input_name == "timestep":
shape = [self.batch_size]
return self.random_int_tensor(shape, max_value=self.vocab_size, framework=framework, dtype=int_dtype)

if input_name == "text_embeds":
dim = self.text_encoder_projection_dim
elif input_name == "timestep_cond":
Expand Down

0 comments on commit 8af29e8

Please sign in to comment.