-
Notifications
You must be signed in to change notification settings - Fork 87
/
demo.py
103 lines (75 loc) · 3.42 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os
from multiprocessing import Process, Queue
from pathlib import Path
import cv2
import numpy as np
import torch
from evo.core.trajectory import PoseTrajectory3D
from evo.tools import file_interface
from dpvo.config import cfg
from dpvo.dpvo import DPVO
from dpvo.plot_utils import plot_trajectory, save_output_for_COLMAP, save_ply
from dpvo.stream import image_stream, video_stream
from dpvo.utils import Timer
SKIP = 0
def show_image(image, t=0):
image = image.permute(1, 2, 0).cpu().numpy()
cv2.imshow('image', image / 255.0)
cv2.waitKey(t)
@torch.no_grad()
def run(cfg, network, imagedir, calib, stride=1, skip=0, viz=False, timeit=False):
slam = None
queue = Queue(maxsize=8)
if os.path.isdir(imagedir):
reader = Process(target=image_stream, args=(queue, imagedir, calib, stride, skip))
else:
reader = Process(target=video_stream, args=(queue, imagedir, calib, stride, skip))
reader.start()
while 1:
(t, image, intrinsics) = queue.get()
if t < 0: break
image = torch.from_numpy(image).permute(2,0,1).cuda()
intrinsics = torch.from_numpy(intrinsics).cuda()
if slam is None:
_, H, W = image.shape
slam = DPVO(cfg, network, ht=H, wd=W, viz=viz)
with Timer("SLAM", enabled=timeit):
slam(t, image, intrinsics)
reader.join()
points = slam.pg.points_.cpu().numpy()[:slam.m]
colors = slam.pg.colors_.view(-1, 3).cpu().numpy()[:slam.m]
return slam.terminate(), (points, colors, (*intrinsics, H, W))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--network', type=str, default='dpvo.pth')
parser.add_argument('--imagedir', type=str)
parser.add_argument('--calib', type=str)
parser.add_argument('--name', type=str, help='name your run', default='result')
parser.add_argument('--stride', type=int, default=2)
parser.add_argument('--skip', type=int, default=0)
parser.add_argument('--config', default="config/default.yaml")
parser.add_argument('--timeit', action='store_true')
parser.add_argument('--viz', action="store_true")
parser.add_argument('--plot', action="store_true")
parser.add_argument('--opts', nargs='+', default=[])
parser.add_argument('--save_ply', action="store_true")
parser.add_argument('--save_colmap', action="store_true")
parser.add_argument('--save_trajectory', action="store_true")
args = parser.parse_args()
cfg.merge_from_file(args.config)
cfg.merge_from_list(args.opts)
print("Running with config...")
print(cfg)
(poses, tstamps), (points, colors, calib) = run(cfg, args.network, args.imagedir, args.calib, args.stride, args.skip, args.viz, args.timeit)
trajectory = PoseTrajectory3D(positions_xyz=poses[:,:3], orientations_quat_wxyz=poses[:, [6, 3, 4, 5]], timestamps=tstamps)
if args.save_ply:
save_ply(args.name, points, colors)
if args.save_colmap:
save_output_for_COLMAP(args.name, trajectory, points, colors, *calib)
if args.save_trajectory:
Path("saved_trajectories").mkdir(exist_ok=True)
file_interface.write_tum_trajectory_file(f"saved_trajectories/{args.name}.txt", trajectory)
if args.plot:
Path("trajectory_plots").mkdir(exist_ok=True)
plot_trajectory(trajectory, title=f"DPVO Trajectory Prediction for {args.name}", filename=f"trajectory_plots/{args.name}.pdf")