Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
sdbds committed Dec 12, 2023
2 parents 39d7fad + 318a14b commit e83ea98
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 23 deletions.
63 changes: 42 additions & 21 deletions demo/animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,24 +43,33 @@
import math
from pathlib import Path


class MagicAnimate:
def __init__(self, config="configs/prompts/animation.yaml",controlnet_model="densepose") -> None:
def __init__(
self, config="configs/prompts/animation.yaml", controlnet_model="densepose"
) -> None:
print("Initializing MagicAnimate Pipeline...")
*_, func_args = inspect.getargvalues(inspect.currentframe())
func_args = dict(func_args)

self.config = config

config = OmegaConf.load(config)

inference_config = OmegaConf.load(config.inference_config)

motion_module = config.motion_module

self.controlnet_model = controlnet_model

### >>> create animation pipeline >>> ###
self.tokenizer, self.text_encoder, self.unet, noise_scheduler, self.vae = load_models(
(
self.tokenizer,
self.text_encoder,
self.unet,
noise_scheduler,
self.vae,
) = load_models(
config.pretrained_model_path,
scheduler_name="",
v2=False,
Expand Down Expand Up @@ -115,9 +124,10 @@ def __init__(self, config="configs/prompts/animation.yaml",controlnet_model="den
self.controlnet = ControlNetModel.from_pretrained(config.openpose_path)
print("Using OpenPose ControlNet")
else:
self.controlnet = ControlNetModel.from_pretrained(config.pretrained_controlnet_path)
self.controlnet = ControlNetModel.from_pretrained(
config.pretrained_controlnet_path
)
print("Using Densepose ControlNet")


self.vae.to(torch.float16)
self.unet.to(torch.float16)
Expand Down Expand Up @@ -187,9 +197,17 @@ def __init__(self, config="configs/prompts/animation.yaml",controlnet_model="den

def reset_init(instance, *args, **kwargs):
instance.__init__(*args, **kwargs)

def __call__(
self, source_image, motion_sequence, random_seed, step, guidance_scale, controlnet_model="densepose", size=512,
self,
source_image,
motion_sequence,
random_seed,
step,
guidance_scale,
debug,
controlnet_model="densepose",
size=512,
):
if self.controlnet_model != controlnet_model:
self.vae.to("cpu")
Expand All @@ -198,7 +216,10 @@ def __call__(
self.controlnet.to("cpu")
self.appearance_encoder.to("cpu")
torch_gc()
self.reset_init(config="configs/prompts/animation.yaml", controlnet_model=controlnet_model)
self.reset_init(
config="configs/prompts/animation.yaml",
controlnet_model=controlnet_model,
)
prompt = n_prompt = ""
random_seed = int(random_seed)
step = int(step)
Expand Down Expand Up @@ -250,16 +271,18 @@ def __call__(
source_image=source_image,
).videos

source_images = np.array([source_image] * original_length)
source_images = (
rearrange(torch.from_numpy(source_images), "t h w c -> 1 c t h w") / 255.0
)
samples_per_video.append(source_images)
if debug:
source_images = np.array([source_image] * original_length)
source_images = (
rearrange(torch.from_numpy(source_images), "t h w c -> 1 c t h w")
/ 255.0
)
samples_per_video.append(source_images)

control = control / 255.0
control = rearrange(control, "t h w c -> 1 c t h w")
control = torch.from_numpy(control)
samples_per_video.append(control[:, :, :original_length])
control = control / 255.0
control = rearrange(control, "t h w c -> 1 c t h w")
control = torch.from_numpy(control)
samples_per_video.append(control[:, :, :original_length])

samples_per_video.append(sample[:, :, :original_length])

Expand All @@ -273,5 +296,3 @@ def __call__(
save_videos_grid(samples_per_video, animation_path)

return animation_path


4 changes: 4 additions & 0 deletions demo/gradio_animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def animate(
seed,
steps,
guidance_scale,
debug,
controlnet_model,
):
return animator(
Expand All @@ -33,6 +34,7 @@ def animate(
seed,
steps,
guidance_scale,
debug,
controlnet_model,
)

Expand Down Expand Up @@ -69,6 +71,7 @@ def animate(
guidance_scale = gr.Textbox(
label="Guidance scale", value=7.5, info="default: 7.5"
)
debug = gr.Checkbox(label="Debug", value=True)
submit = gr.Button("Animate")

def read_video(video):
Expand All @@ -92,6 +95,7 @@ def read_image(image, size=512):
random_seed,
sampling_steps,
guidance_scale,
debug,
gr.Radio(
[
"densepose",
Expand Down
5 changes: 3 additions & 2 deletions demo/gradio_animate_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,5 +114,6 @@ def read_image(image, size=512):
outputs=animation,
)

# demo.queue(max_size=10)
demo.launch(share=True)

demo.queue(max_size=10)
demo.launch(share=True)

0 comments on commit e83ea98

Please sign in to comment.