diff --git a/configs/prompts/animation.yaml b/configs/prompts/animation.yaml
index 328bbda6..29d0a567 100644
--- a/configs/prompts/animation.yaml
+++ b/configs/prompts/animation.yaml
@@ -1,7 +1,7 @@
-pretrained_model_path: "D:\\sd-webui-aki-v4.1\\models\\Stable-diffusion\\动漫\\cetusMix_v4.safetensors"
+pretrained_model_path: "pretrained_models/stable-diffusion-v1-5"
pretrained_vae_path: ""
pretrained_controlnet_path: "pretrained_models/MagicAnimate/densepose_controlnet"
-openpose_path: "pretrained_models/openpose"
+openpose_path: "pretrained_models/control_v11p_sd15_openpose"
pretrained_appearance_encoder_path: "pretrained_models/MagicAnimate/appearance_encoder"
pretrained_unet_path: ""
diff --git a/demo/gradio_animate.py b/demo/gradio_animate.py
index 6fa2058a..70feab7f 100644
--- a/demo/gradio_animate.py
+++ b/demo/gradio_animate.py
@@ -18,11 +18,26 @@
animator = MagicAnimate()
-def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale,controlnet_model):
- return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale,controlnet_model)
-with gr.Blocks() as demo:
+def animate(
+ reference_image,
+ motion_sequence_state,
+ seed,
+ steps,
+ guidance_scale,
+ controlnet_model,
+):
+ return animator(
+ reference_image,
+ motion_sequence_state,
+ seed,
+ steps,
+ guidance_scale,
+ controlnet_model,
+ )
+
+with gr.Blocks() as demo:
gr.HTML(
"""
@@ -38,60 +53,89 @@ def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale,
- """)
+ """
+ )
animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)
-
+
with gr.Row():
- reference_image = gr.Image(label="Reference Image")
- motion_sequence = gr.Video(format="mp4", label="Motion Sequence")
-
+ reference_image = gr.Image(label="Reference Image")
+ motion_sequence = gr.Video(format="mp4", label="Motion Sequence")
+
with gr.Column():
- random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
- sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
- guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
- submit = gr.Button("Animate")
+ random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
+ sampling_steps = gr.Textbox(
+ label="Sampling steps", value=25, info="default: 25"
+ )
+ guidance_scale = gr.Textbox(
+ label="Guidance scale", value=7.5, info="default: 7.5"
+ )
+ submit = gr.Button("Animate")
def read_video(video):
reader = imageio.get_reader(video)
- fps = reader.get_meta_data()['fps']
+ fps = reader.get_meta_data()["fps"]
return video
-
+
def read_image(image, size=512):
return np.array(Image.fromarray(image).resize((size, size)))
-
+
# when user uploads a new video
- motion_sequence.upload(
- read_video,
- motion_sequence,
- motion_sequence
- )
+ motion_sequence.upload(read_video, motion_sequence, motion_sequence)
# when `first_frame` is updated
- reference_image.upload(
- read_image,
- reference_image,
- reference_image
- )
+ reference_image.upload(read_image, reference_image, reference_image)
# when the `submit` button is clicked
submit.click(
animate,
- [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale, gr.Radio(["densepose", "openpose"], label="Controlnet Model", value="densepose")],
- animation
+ [
+ reference_image,
+ motion_sequence,
+ random_seed,
+ sampling_steps,
+ guidance_scale,
+ gr.Radio(
+ [
+ "densepose",
+ "openpose", # "animalpose"
+ ],
+ label="Controlnet Model",
+ value="densepose",
+ ),
+ ],
+ animation,
)
# Examples
gr.Markdown("## Examples")
gr.Examples(
examples=[
- ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
- ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
- ["inputs/applications/source_image/0002.png", "inputs/applications/driving/densepose/demo4.mp4"],
- ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
- ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
- ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
+ [
+ "inputs/applications/source_image/monalisa.png",
+ "inputs/applications/driving/densepose/running.mp4",
+ ],
+ [
+ "inputs/applications/source_image/demo4.png",
+ "inputs/applications/driving/densepose/demo4.mp4",
+ ],
+ [
+ "inputs/applications/source_image/0002.png",
+ "inputs/applications/driving/densepose/demo4.mp4",
+ ],
+ [
+ "inputs/applications/source_image/dalle2.jpeg",
+ "inputs/applications/driving/densepose/running2.mp4",
+ ],
+ [
+ "inputs/applications/source_image/dalle8.jpeg",
+ "inputs/applications/driving/densepose/dancing2.mp4",
+ ],
+ [
+ "inputs/applications/source_image/multi1_source.png",
+ "inputs/applications/driving/densepose/multi_dancing.mp4",
+ ],
],
inputs=[reference_image, motion_sequence],
outputs=animation,
)
-demo.launch(share=True)
\ No newline at end of file
+demo.launch(share=True)
diff --git a/inputs/cai-xukun.mp4 b/inputs/cai-xukun.mp4
new file mode 100644
index 00000000..88516758
Binary files /dev/null and b/inputs/cai-xukun.mp4 differ
diff --git a/install.ps1 b/install.ps1
index d180a6bb..44f576e7 100644
--- a/install.ps1
+++ b/install.ps1
@@ -33,5 +33,21 @@ if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq ""){
}
}
+$install_CNOP = Read-Host "Do you need to download control_v11p_sd15_openpose? If you want use it select y, if you dont want select n. [y/n] (Default is y)"
+if ($install_CNOP -eq "y" -or $install_CNOP -eq "Y" -or $install_CNOP -eq ""){
+ if (!(Test-Path -Path "control_v11p_sd15_openpose")) {
+ Write-Output "Downloading control_v11p_sd15_openpose models..."
+ git clone https://huggingface.co/bdsqlsz/control_v11p_sd15_openpose
+ }
+}
+
+Write-Output "Installing Video_controlnet_aux..."
+
+git submodule update --recursive --init
+
+Set-Location $PSScriptRoot/video_controlnet_aux
+pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
+pip install -r requirements-video.txt -i https://mirror.baidu.com/pypi/simple
+
Write-Output "Install completed"
Read-Host | Out-Null ;
diff --git a/install_cn.ps1 b/install_cn.ps1
index 7f1983df..cb7c3b0f 100644
--- a/install_cn.ps1
+++ b/install_cn.ps1
@@ -3,35 +3,35 @@ Set-Location $PSScriptRoot
$Env:PIP_DISABLE_PIP_VERSION_CHECK = 1
if (!(Test-Path -Path "venv")) {
- Write-Output "创建python虚拟环境venv..."
+ Write-Output "pythonvenv..."
python -m venv venv
}
.\venv\Scripts\activate
-Write-Output "安装依赖..."
-pip install -U -r requirements-windows.txt -i https://mirror.baidu.com/pypi/simple
+Write-Output "װ..."
+#pip install -U -r requirements-windows.txt -i https://mirror.baidu.com/pypi/simple
-Write-Output "检查模型..."
+Write-Output "ģ..."
if (!(Test-Path -Path "pretrained_models")) {
- Write-Output "创建模型文件夹..."
+ Write-Output "ģļ..."
mkdir "pretrained_models"
}
Set-Location .\pretrained_models
if (!(Test-Path -Path "MagicAnimate")) {
- Write-Output "下载MagicAnimate模型..."
+ Write-Output "MagicAnimateģ..."
git clone https://huggingface.co/zcxu-eric/MagicAnimate
}
if (Test-Path -Path "MagicAnimate/.git/lfs") {
Remove-Item -Path MagicAnimate/.git/lfs/* -Recurse -Force
}
-$install_SD15 = Read-Host "是否需要下载huggingface的SD15模型? 若您本地没有任何SD15模型选择y,如果想要换其他SD1.5模型选择 n。[y/n] (默认为 y)"
+$install_SD15 = Read-Host "ǷҪhuggingfaceSD15ģ? ûκSD15ģѡyҪSD1.5ģѡ n[y/n] (ĬΪ y)"
if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq "") {
if (!(Test-Path -Path "stable-diffusion-v1-5")) {
- Write-Output "下载 stable-diffusion-v1-5 模型..."
+ Write-Output " stable-diffusion-v1-5 ģ..."
git clone https://huggingface.co/bdsqlsz/stable-diffusion-v1-5
}
@@ -40,18 +40,24 @@ if ($install_SD15 -eq "y" -or $install_SD15 -eq "Y" -or $install_SD15 -eq "") {
}
}
-Write-Output "安装Video2Pose..."
+$install_CNOP = Read-Host "ǷҪhuggingfacecontrol_v11p_sd15_openposeģ? ϣʹopenposeѡyҪѡ n[y/n] (ĬΪ y)"
+if ($install_CNOP -eq "y" -or $install_CNOP -eq "Y" -or $install_CNOP -eq ""){
+ if (!(Test-Path -Path "control_v11p_sd15_openpose")) {
+ Write-Output " control_v11p_sd15_openpose ģ..."
+ git clone https://huggingface.co/bdsqlsz/control_v11p_sd15_openpose
+ }
+ if (Test-Path -Path "control_v11p_sd15_openpose/.git/lfs") {
+ Remove-Item -Path control_v11p_sd15_openpose/.git/lfs/* -Recurse -Force
+ }
+}
-git submodule update --recursive --init
+Write-Output "װVideo_controlnet_aux..."
-Set-Location $PSScriptRoot/vid2pose
git submodule update --recursive --init
-pip install ninja
-pip install -U -r requirements.txt -i https://mirror.baidu.com/pypi/simple
-mim install mmengine
-mim install "mmcv>=2.0.1"
-mim install "mmdet>=3.1.0"
-mim install "mmpose>=1.1.0"
-
-Write-Output "安装完毕"
+
+Set-Location $PSScriptRoot/video_controlnet_aux
+pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
+pip install -r requirements-video.txt -i https://mirror.baidu.com/pypi/simple
+
+Write-Output "װ"
Read-Host | Out-Null ;
diff --git a/requirements-windows.txt b/requirements-windows.txt
index fb64b77e..e08aeb9d 100644
--- a/requirements-windows.txt
+++ b/requirements-windows.txt
@@ -29,8 +29,8 @@ frozenlist==1.4.0
fsspec==2023.6.0
google-auth==2.22.0
google-auth-oauthlib==1.0.0
-gradio==3.41.2
-gradio-client==0.5.0
+gradio
+gradio-client
grpcio==1.57.0
h11==0.14.0
httpcore==0.17.3
diff --git a/run_vid2openpose_gui.ps1 b/run_VidControlnetAux_gui.ps1
similarity index 64%
rename from run_vid2openpose_gui.ps1
rename to run_VidControlnetAux_gui.ps1
index 71ed67fe..6daaaa7d 100644
--- a/run_vid2openpose_gui.ps1
+++ b/run_VidControlnetAux_gui.ps1
@@ -1,6 +1,5 @@
-$input_path="./vid2pose/sample_videos/input_video.mp4"
+$input_path="./inputs/cai-xukun.mp4"
$output_path="./outputs/"
-$pose_model="dwpose"
Set-Location $PSScriptRoot
@@ -19,9 +18,5 @@ if ($output_path) {
[void]$ext_args.Add("-o=$output_path")
}
-if ($pose_model) {
- [void]$ext_args.Add("--pose_model=$pose_model")
-}
-
-python.exe "vid2pose/video2openpose2.py" $ext_args
+python.exe "video_controlnet_aux/src/video_controlnet_aux.py" $ext_args
diff --git a/run_vid2dense_gui.ps1 b/run_vid2dense_gui.ps1
deleted file mode 100644
index 64912302..00000000
--- a/run_vid2dense_gui.ps1
+++ /dev/null
@@ -1,21 +0,0 @@
-$input_video_path="./vid2pose/sample_videos/input_video.mp4"
-$output_video_path="./vid2pose/sample_videos/output_video1.mp4"
-
-Set-Location $PSScriptRoot
-.\venv\Scripts\activate
-
-$Env:HF_HOME = "./huggingface"
-$Env:XFORMERS_FORCE_DISABLE_TRITON = "1"
-#$Env:PYTHONPATH = $PSScriptRoot
-$ext_args = [System.Collections.ArrayList]::new()
-
-if ($input_video_path) {
- [void]$ext_args.Add("-i=$input_video_path")
-}
-
-if ($output_video_path) {
- [void]$ext_args.Add("-o=$output_video_path")
-}
-
-
-python.exe "vid2pose/main.py" $ext_args