Skip to content

Commit

Permalink
fix bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
sdbds committed Dec 6, 2023
1 parent 3da23f9 commit 1d435fa
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 8 deletions.
4 changes: 2 additions & 2 deletions configs/prompts/animation.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
pretrained_model_path: "pretrained_models/stable-diffusion-v1-5"
pretrained_vae_path: "pretrained_models/sd-vae-ft-mse"
pretrained_model_path: "D:\\sd-webui-aki-v4.1\\models\\Stable-diffusion\\动漫\\cetusMix_v4.safetensors"
pretrained_vae_path: ""
pretrained_controlnet_path: "pretrained_models/MagicAnimate/densepose_controlnet"
pretrained_appearance_encoder_path: "pretrained_models/MagicAnimate/appearance_encoder"
pretrained_unet_path: ""
Expand Down
4 changes: 2 additions & 2 deletions demo/animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def __init__(self, config="configs/prompts/animation.yaml") -> None:
)
else:
unet = UNet3DConditionModel.from_pretrained_2d(
unet,
unet.config,
subfolder=None,
unet_additional_kwargs=OmegaConf.to_container(
inference_config.unet_additional_kwargs
Expand All @@ -99,7 +99,7 @@ def __init__(self, config="configs/prompts/animation.yaml") -> None:
mode="read",
fusion_blocks=config.fusion_blocks,
)
if config.pretrained_vae_path is not None:
if config.pretrained_vae_path:
vae = AutoencoderKL.from_pretrained(config.pretrained_vae_path)
# else:
# vae = AutoencoderKL.from_pretrained(
Expand Down
35 changes: 34 additions & 1 deletion magicanimate/models/unet_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor


class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True

@register_to_config
Expand Down Expand Up @@ -571,6 +571,39 @@ def from_pretrained_2d(
]
print(f"### Temporal Module Parameters: {sum(params) / 1e6} M")
else:
config = {
"_class_name": "UNet2DConditionModel",
"_diffusers_version": "0.6.0",
"act_fn": "silu",
"attention_head_dim": 8,
"block_out_channels": [320, 640, 1280, 1280],
"center_input_sample": False,
"cross_attention_dim": 768,
"down_block_types": [
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
],
"downsample_padding": 1,
"flip_sin_to_cos": True,
"freq_shift": 0,
"in_channels": 4,
"layers_per_block": 2,
"mid_block_scale_factor": 1,
"norm_eps": 1e-05,
"norm_num_groups": 32,
"out_channels": 4,
"sample_size": 64,
"up_block_types": [
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
],
}
config["_class_name"] = cls.__name__
model = cls.from_config(config, **unet_additional_kwargs)
state_dict = pretrained_model_path
m, u = model.load_state_dict(state_dict, strict=False)
print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};")
Expand Down
6 changes: 3 additions & 3 deletions magicanimate/pipelines/animation.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def main(args):
v2=False,
v_pred=False,
)
unet.

# tokenizer = CLIPTokenizer.from_pretrained(
# config.pretrained_model_path, subfolder="tokenizer"
# )
Expand All @@ -94,7 +94,7 @@ def main(args):
)
else:
unet = UNet3DConditionModel.from_pretrained_2d(
unet,
unet.config,
subfolder=None,
unet_additional_kwargs=OmegaConf.to_container(
inference_config.unet_additional_kwargs
Expand All @@ -115,7 +115,7 @@ def main(args):
mode="read",
fusion_blocks=config.fusion_blocks,
)
if config.pretrained_vae_path is not None:
if config.pretrained_vae_path:
vae = AutoencoderKL.from_pretrained(config.pretrained_vae_path)
# else:
# vae = AutoencoderKL.from_pretrained(
Expand Down

0 comments on commit 1d435fa

Please sign in to comment.