-
Notifications
You must be signed in to change notification settings - Fork 11
/
create_partial_vision_dataset.py
136 lines (108 loc) · 4.61 KB
/
create_partial_vision_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import numpy as np
import cv2
from PIL import Image
from ai2thor.controller import Controller
from ai2thor.platform import CloudRendering
from tqdm import tqdm
import json
from sklearn.cluster import KMeans
# Train
kitchens_train = [f"FloorPlan{i}" for i in range(1, 21)]
living_rooms_train = [f"FloorPlan{200 + i}" for i in range(1, 21)]
bedrooms_train = [f"FloorPlan{300 + i}" for i in range(1, 21)]
bathrooms_train = [f"FloorPlan{400 + i}" for i in range(1, 21)]
# Val
kitchens_val = [f"FloorPlan{i}" for i in range(21, 26)]
living_rooms_val = [f"FloorPlan{200 + i}" for i in range(21, 26)]
bedrooms_val = [f"FloorPlan{300 + i}" for i in range(21, 26)]
bathrooms_val = [f"FloorPlan{400 + i}" for i in range(21, 26)]
# Test
kitchens_test = [f"FloorPlan{i}" for i in range(26, 31)]
living_rooms_test = [f"FloorPlan{200 + i}" for i in range(26, 31)]
bedrooms_test = [f"FloorPlan{300 + i}" for i in range(26, 31)]
bathrooms_test = [f"FloorPlan{400 + i}" for i in range(26, 31)]
scenes_train = kitchens_train + living_rooms_train + bedrooms_train + bathrooms_train
scenes_val = kitchens_val + living_rooms_val + bedrooms_val + bathrooms_val
scenes_test = kitchens_test + living_rooms_test + bedrooms_test + bathrooms_test
# scenes_val = scenes_val + scenes_test
print("Number of training scenes: ", len(scenes_train))
print("Number of validation scenes: ", len(scenes_val))
controller = Controller(
agentMode="default",
visibilityDistance=1.5,
scene="FloorPlan319",
platform=CloudRendering,
makeAgentsVisible=False,
# step sizes
gridSize=0.75,
snapToGrid=False,
rotateStepDegrees=90,
# image modalities
renderDepthImage=True,
renderInstanceSegmentation=True,
# camera properties
width=1024,
height=1024,
fieldOfView=90
)
def save_rgb_frame(rgb_image, base_path, save_name):
save_path = os.path.join(base_path, save_name + ".png")
cv2.imwrite(save_path, rgb_image)
def save_depth_frame(depth_img, base_path, save_name):
save_path = os.path.join(base_path, save_name + ".npy")
depth_img = np.asarray(depth_img)
np.save(save_path, depth_img)
def save_json_dict(json_file, base_path, save_name):
save_path = os.path.join(base_path, save_name + ".json")
with open(save_path, 'w') as f:
json_str = json.dumps(json_file, indent=2)
f.write(json_str)
f.write('\n')
def mkdir_dataset_file(scene_path):
if not os.path.exists(scene_path):
os.makedirs(scene_path)
def create_dataset(scene_list, mode=None):
base_path = os.getcwd()
bad_scene = []
rgb_base_path = os.path.join(base_path, "ai2thor", "dataset", mode, "rgb_img")
position_dict_base_path = os.path.join(base_path, "ai2thor", "dataset", mode, "position_dict")
mkdir_dataset_file(rgb_base_path)
mkdir_dataset_file(position_dict_base_path)
degree_step = 60
for scene_one in tqdm(scene_list):
# The simulator resets the scene
try:
controller.reset(scene=scene_one)
except:
bad_scene.append(scene_one)
continue
# Start BFS navigation module
# Read position dict
positions_list_path = os.path.join(position_dict_base_path, scene_one + ".json")
f = open(positions_list_path, 'r')
content = f.read()
positions_list = json.loads(content)
f.close()
for position_ind, position_one in tqdm(positions_list.items()):
event = controller.step(action='Teleport', position=position_one)
for i in tqdm(range(int(360 / degree_step))):
event = controller.step(action="RotateRight", degrees=degree_step)
last_event = controller.last_event
rgb_image = last_event.cv2img
image_file_save_name = scene_one + "_" + str(position_ind) + "_" + str(i)
# position_dict_save_name = scene_one
save_rgb_frame(rgb_image, rgb_base_path, image_file_save_name)
print(bad_scene)
print(len(bad_scene))
def main():
# mode_list = ["train", "val"]
# mode_list = ["val_0.75_overall_center", "val_0.75_partial_center"]
mode_list = ["Priori_overall_60", "Priori_partial_60"]
for mode_one in mode_list:
if mode_one == "train":
create_dataset(scenes_train, mode_one)
else:
create_dataset(scenes_val, mode_one)
if __name__ == '__main__':
main()