diff --git a/runner/app/pipelines/image_to_image.py b/runner/app/pipelines/image_to_image.py index 9e20ff03..00f34f1d 100644 --- a/runner/app/pipelines/image_to_image.py +++ b/runner/app/pipelines/image_to_image.py @@ -176,7 +176,6 @@ def __call__( self, prompt: str, image: PIL.Image, **kwargs ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: seed = kwargs.pop("seed", None) - num_inference_steps = kwargs.get("num_inference_steps", None) safety_check = kwargs.pop("safety_check", True) if seed is not None: @@ -189,7 +188,9 @@ def __call__( torch.Generator(get_torch_device()).manual_seed(s) for s in seed ] - if num_inference_steps is None or num_inference_steps < 1: + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): del kwargs["num_inference_steps"] if ( diff --git a/runner/app/pipelines/image_to_video.py b/runner/app/pipelines/image_to_video.py index f605cb2f..7b248dcc 100644 --- a/runner/app/pipelines/image_to_video.py +++ b/runner/app/pipelines/image_to_video.py @@ -109,7 +109,6 @@ def __call__( self, image: PIL.Image, **kwargs ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: seed = kwargs.pop("seed", None) - num_inference_steps = kwargs.get("num_inference_steps", None) safety_check = kwargs.pop("safety_check", True) if "decode_chunk_size" not in kwargs: @@ -126,7 +125,9 @@ def __call__( torch.Generator(get_torch_device()).manual_seed(s) for s in seed ] - if num_inference_steps is None or num_inference_steps < 1: + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): del kwargs["num_inference_steps"] if safety_check: diff --git a/runner/app/pipelines/text_to_image.py b/runner/app/pipelines/text_to_image.py index 10e4f485..b24a3a95 100644 --- a/runner/app/pipelines/text_to_image.py +++ b/runner/app/pipelines/text_to_image.py @@ -206,7 +206,6 @@ def __call__( self, prompt: str, **kwargs ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: seed = kwargs.pop("seed", None) - num_inference_steps = kwargs.get("num_inference_steps", None) safety_check = kwargs.pop("safety_check", True) if seed is not None: @@ -219,7 +218,9 @@ def __call__( torch.Generator(get_torch_device()).manual_seed(s) for s in seed ] - if num_inference_steps is None or num_inference_steps < 1: + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): del kwargs["num_inference_steps"] if ( diff --git a/runner/app/pipelines/upscale.py b/runner/app/pipelines/upscale.py index e36e4606..5122a35d 100644 --- a/runner/app/pipelines/upscale.py +++ b/runner/app/pipelines/upscale.py @@ -97,7 +97,6 @@ def __call__( self, prompt: str, image: PIL.Image, **kwargs ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: seed = kwargs.pop("seed", None) - num_inference_steps = kwargs.get("num_inference_steps", None) safety_check = kwargs.pop("safety_check", True) if seed is not None: @@ -110,7 +109,9 @@ def __call__( torch.Generator(get_torch_device()).manual_seed(s) for s in seed ] - if num_inference_steps is None or num_inference_steps < 1: + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): del kwargs["num_inference_steps"] output = self.ldm(prompt, image=image, **kwargs)