forked from NUS-HPC-AI-Lab/VideoSys
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sample.py
77 lines (65 loc) · 2.58 KB
/
sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from videosys import CogVideoXConfig, VideoSysEngine
import argparse
import time
model_path = "/data2/lhj_data/ops/CogVideoX-2b"
# model_path = "/home/pod/shared-nvme/CogVideoX-2b"
def run_base():
# models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
# change num_gpus for multi-gpu inference
# config = CogVideoXConfig("THUDM/CogVideoX-2b", num_gpus=1)
# config = CogVideoXConfig("/home/pod/shared-nvme/CogVideoX-2b", num_gpus=1)
config = CogVideoXConfig(model_path="/data2/lhj_data/ops/CogVideoX-2b", num_gpus=1)
engine = VideoSysEngine(config)
prompt = "Sunset over the sea."
# num frames should be <= 49. resolution is fixed to 720p.
# seed=-1 means random seed. >0 means fixed seed.
seed = 10 #这里可能会影响视频生成效果
start = time.time()
# import debugpy
# try:
# # 5678 is the default attach port in the VS Code debug configurations. Unless a host and port are specified, host defaults to 127.0.0.1
# debugpy.listen(("localhost", 9501))
# print("Waiting for debugger attach")
# debugpy.wait_for_client()
# except Exception as e:
# pass
video = engine.generate(
prompt=prompt,
guidance_scale=6,
num_inference_steps=50,
num_frames=49,
seed=seed,
).video[0]
end = time.time()
print(f"CogVideoX-2b run_base generate video cost time:{end-start}")
engine.save_video(video, f"./outputs/{prompt}-{seed}-4.mp4")
def run_pab():
config = CogVideoXConfig(model_path, enable_pab=True,num_gpus=1)
engine = VideoSysEngine(config)
prompt = "Sunset over the sea."
start = time.time()
video = engine.generate(prompt).video[0]
end = time.time()
print(f"CogVideoX-2b run_pab generate video cost time:{end-start}")
engine.save_video(video, f"./outputs/{prompt}-pab.mp4")
def run_low_mem():
config = CogVideoXConfig(model_path, cpu_offload=True, vae_tiling=True)
engine = VideoSysEngine(config)
prompt = "Sunset over the sea."
start = time.time()
video = engine.generate(prompt).video[0]
end = time.time()
print(f"CogVideoX-2b run_low_mem generate video cost time:{end-start}")
engine.save_video(video, f"./outputs/{prompt}-low_mem.mp4")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--base',action='store_true')
parser.add_argument('--pab',action='store_true')
parser.add_argument('--low_mem',action='store_true')
args = parser.parse_args()
if args.base:
run_base()
elif args.pab:
run_pab()
else:
run_low_mem()