Skip to content

Commit

Permalink
update magic anime
Browse files Browse the repository at this point in the history
  • Loading branch information
sdbds committed Dec 9, 2023
1 parent ffef1e4 commit e1416b5
Show file tree
Hide file tree
Showing 8 changed files with 125 additions and 85 deletions.
4 changes: 2 additions & 2 deletions configs/prompts/animation.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
pretrained_model_path: "D:\\sd-webui-aki-v4.1\\models\\Stable-diffusion\\动漫\\cetusMix_v4.safetensors"
pretrained_model_path: "pretrained_models/stable-diffusion-v1-5"
pretrained_vae_path: ""
pretrained_controlnet_path: "pretrained_models/MagicAnimate/densepose_controlnet"
openpose_path: "pretrained_models/openpose"
openpose_path: "pretrained_models/control_v11p_sd15_openpose"
pretrained_appearance_encoder_path: "pretrained_models/MagicAnimate/appearance_encoder"
pretrained_unet_path: ""

Expand Down
112 changes: 78 additions & 34 deletions demo/gradio_animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,26 @@

animator = MagicAnimate()

def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale,controlnet_model):
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale,controlnet_model)

with gr.Blocks() as demo:
def animate(
reference_image,
motion_sequence_state,
seed,
steps,
guidance_scale,
controlnet_model,
):
return animator(
reference_image,
motion_sequence_state,
seed,
steps,
guidance_scale,
controlnet_model,
)


with gr.Blocks() as demo:
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
Expand All @@ -38,60 +53,89 @@ def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale,
</div>
</div>
</div>
""")
"""
)
animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)

with gr.Row():
reference_image = gr.Image(label="Reference Image")
motion_sequence = gr.Video(format="mp4", label="Motion Sequence")
reference_image = gr.Image(label="Reference Image")
motion_sequence = gr.Video(format="mp4", label="Motion Sequence")

with gr.Column():
random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
submit = gr.Button("Animate")
random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
sampling_steps = gr.Textbox(
label="Sampling steps", value=25, info="default: 25"
)
guidance_scale = gr.Textbox(
label="Guidance scale", value=7.5, info="default: 7.5"
)
submit = gr.Button("Animate")

def read_video(video):
reader = imageio.get_reader(video)
fps = reader.get_meta_data()['fps']
fps = reader.get_meta_data()["fps"]
return video

def read_image(image, size=512):
return np.array(Image.fromarray(image).resize((size, size)))

# when user uploads a new video
motion_sequence.upload(
read_video,
motion_sequence,
motion_sequence
)
motion_sequence.upload(read_video, motion_sequence, motion_sequence)
# when `first_frame` is updated
reference_image.upload(
read_image,
reference_image,
reference_image
)
reference_image.upload(read_image, reference_image, reference_image)
# when the `submit` button is clicked
submit.click(
animate,
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale, gr.Radio(["densepose", "openpose"], label="Controlnet Model", value="densepose")],
animation
[
reference_image,
motion_sequence,
random_seed,
sampling_steps,
guidance_scale,
gr.Radio(
[
"densepose",
"openpose", # "animalpose"
],
label="Controlnet Model",
value="densepose",
),
],
animation,
)

# Examples
gr.Markdown("## Examples")
gr.Examples(
examples=[
["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
["inputs/applications/source_image/0002.png", "inputs/applications/driving/densepose/demo4.mp4"],
["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
[
"inputs/applications/source_image/monalisa.png",
"inputs/applications/driving/densepose/running.mp4",
],
[
"inputs/applications/source_image/demo4.png",
"inputs/applications/driving/densepose/demo4.mp4",
],
[
"inputs/applications/source_image/0002.png",
"inputs/applications/driving/densepose/demo4.mp4",
],
[
"inputs/applications/source_image/dalle2.jpeg",
"inputs/applications/driving/densepose/running2.mp4",
],
[
"inputs/applications/source_image/dalle8.jpeg",
"inputs/applications/driving/densepose/dancing2.mp4",
],
[
"inputs/applications/source_image/multi1_source.png",
"inputs/applications/driving/densepose/multi_dancing.mp4",
],
],
inputs=[reference_image, motion_sequence],
outputs=animation,
)


demo.launch(share=True)
demo.launch(share=True)
Binary file added inputs/cai-xukun.mp4
Binary file not shown.
16 changes: 16 additions & 0 deletions install.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,21 @@ if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq ""){
}
}

$install_CNOP = Read-Host "Do you need to download control_v11p_sd15_openpose? If you want use it select y, if you dont want select n. [y/n] (Default is y)"
if ($install_CNOP -eq "y" -or $install_CNOP -eq "Y" -or $install_CNOP -eq ""){
if (!(Test-Path -Path "control_v11p_sd15_openpose")) {
Write-Output "Downloading control_v11p_sd15_openpose models..."
git clone https://huggingface.co/bdsqlsz/control_v11p_sd15_openpose
}
}

Write-Output "Installing Video_controlnet_aux..."

git submodule update --recursive --init

Set-Location $PSScriptRoot/video_controlnet_aux
pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
pip install -r requirements-video.txt -i https://mirror.baidu.com/pypi/simple

Write-Output "Install completed"
Read-Host | Out-Null ;
44 changes: 25 additions & 19 deletions install_cn.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,35 @@ Set-Location $PSScriptRoot
$Env:PIP_DISABLE_PIP_VERSION_CHECK = 1

if (!(Test-Path -Path "venv")) {
Write-Output "创建python虚拟环境venv..."
Write-Output "创建python虚拟环境venv..."
python -m venv venv
}
.\venv\Scripts\activate

Write-Output "安装依赖..."
pip install -U -r requirements-windows.txt -i https://mirror.baidu.com/pypi/simple
Write-Output "安装依赖..."
#pip install -U -r requirements-windows.txt -i https://mirror.baidu.com/pypi/simple

Write-Output "检查模型..."
Write-Output "检查模型..."

if (!(Test-Path -Path "pretrained_models")) {
Write-Output "创建模型文件夹..."
Write-Output "创建模型文件夹..."
mkdir "pretrained_models"
}

Set-Location .\pretrained_models

if (!(Test-Path -Path "MagicAnimate")) {
Write-Output "下载MagicAnimate模型..."
Write-Output "下载MagicAnimate模型..."
git clone https://huggingface.co/zcxu-eric/MagicAnimate
}
if (Test-Path -Path "MagicAnimate/.git/lfs") {
Remove-Item -Path MagicAnimate/.git/lfs/* -Recurse -Force
}

$install_SD15 = Read-Host "是否需要下载huggingface的SD15模型? 若您本地没有任何SD15模型选择y,如果想要换其他SD1.5模型选择 n。[y/n] (默认为 y)"
$install_SD15 = Read-Host "是否需要下载huggingface的SD15模型? 若您本地没有任何SD15模型选择y,如果想要换其他SD1.5模型选择 n。[y/n] (默认为 y)"
if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq "") {
if (!(Test-Path -Path "stable-diffusion-v1-5")) {
Write-Output "下载 stable-diffusion-v1-5 模型..."
Write-Output "下载 stable-diffusion-v1-5 模型..."
git clone https://huggingface.co/bdsqlsz/stable-diffusion-v1-5

}
Expand All @@ -40,18 +40,24 @@ if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq "") {
}
}

Write-Output "安装Video2Pose..."
$install_CNOP = Read-Host "是否需要下载huggingface的control_v11p_sd15_openpose模型? 若您希望使用openpose选择y,如果不需要选择 n。[y/n] (默认为 y)"
if ($install_CNOP -eq "y" -or $install_CNOP -eq "Y" -or $install_CNOP -eq ""){
if (!(Test-Path -Path "control_v11p_sd15_openpose")) {
Write-Output "下载 control_v11p_sd15_openpose 模型..."
git clone https://huggingface.co/bdsqlsz/control_v11p_sd15_openpose
}
if (Test-Path -Path "control_v11p_sd15_openpose/.git/lfs") {
Remove-Item -Path control_v11p_sd15_openpose/.git/lfs/* -Recurse -Force
}
}

git submodule update --recursive --init
Write-Output "安装Video_controlnet_aux..."

Set-Location $PSScriptRoot/vid2pose
git submodule update --recursive --init
pip install ninja
pip install -U -r requirements.txt -i https://mirror.baidu.com/pypi/simple
mim install mmengine
mim install "mmcv>=2.0.1"
mim install "mmdet>=3.1.0"
mim install "mmpose>=1.1.0"

Write-Output "安装完毕"

Set-Location $PSScriptRoot/video_controlnet_aux
pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
pip install -r requirements-video.txt -i https://mirror.baidu.com/pypi/simple

Write-Output "安装完毕"
Read-Host | Out-Null ;
4 changes: 2 additions & 2 deletions requirements-windows.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ frozenlist==1.4.0
fsspec==2023.6.0
google-auth==2.22.0
google-auth-oauthlib==1.0.0
gradio==3.41.2
gradio-client==0.5.0
gradio
gradio-client
grpcio==1.57.0
h11==0.14.0
httpcore==0.17.3
Expand Down
9 changes: 2 additions & 7 deletions run_vid2openpose_gui.ps1 → run_VidControlnetAux_gui.ps1
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
$input_path="./vid2pose/sample_videos/input_video.mp4"
$input_path="./inputs/cai-xukun.mp4"
$output_path="./outputs/"
$pose_model="dwpose"


Set-Location $PSScriptRoot
Expand All @@ -19,9 +18,5 @@ if ($output_path) {
[void]$ext_args.Add("-o=$output_path")
}

if ($pose_model) {
[void]$ext_args.Add("--pose_model=$pose_model")
}


python.exe "vid2pose/video2openpose2.py" $ext_args
python.exe "video_controlnet_aux/src/video_controlnet_aux.py" $ext_args
21 changes: 0 additions & 21 deletions run_vid2dense_gui.ps1

This file was deleted.

0 comments on commit e1416b5

Please sign in to comment.