Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Yexinyi #65

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file modified .gitignore
100644 → 100755
Empty file.
Empty file modified NUSCENES-GUIDE.md
100644 → 100755
Empty file.
Empty file modified README.md
100644 → 100755
Empty file.
Empty file modified builder/__init__.py
100644 → 100755
Empty file.
Binary file added builder/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added builder/__pycache__/data_builder.cpython-37.pyc
Binary file not shown.
Binary file added builder/__pycache__/loss_builder.cpython-37.pyc
Binary file not shown.
Binary file added builder/__pycache__/model_builder.cpython-37.pyc
Binary file not shown.
3 changes: 2 additions & 1 deletion builder/data_builder.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ def build(dataset_config,
return_ref=train_ref, label_mapping=label_mapping, nusc=nusc)
val_pt_dataset = SemKITTI(data_path, imageset=val_imageset,
return_ref=val_ref, label_mapping=label_mapping, nusc=nusc)

#import pdb
#pdb.set_trace()
train_dataset = get_model_class(dataset_config['dataset_type'])(
train_pt_dataset,
grid_size=grid_size,
Expand Down
Empty file modified builder/loss_builder.py
100644 → 100755
Empty file.
Empty file modified builder/model_builder.py
100644 → 100755
Empty file.
Empty file modified config/__init__.py
100644 → 100755
Empty file.
Binary file modified config/__pycache__/__init__.cpython-37.pyc
100644 → 100755
Binary file not shown.
Binary file modified config/__pycache__/config.cpython-37.pyc
100644 → 100755
Binary file not shown.
Empty file modified config/config.py
100644 → 100755
Empty file.
Binary file added config/label_mapping/.semantic-kitti.yaml.swp
Binary file not shown.
Empty file modified config/label_mapping/nuscenes.yaml
100644 → 100755
Empty file.
Empty file modified config/label_mapping/semantic-kitti-multiscan.yaml
100644 → 100755
Empty file.
15 changes: 8 additions & 7 deletions config/label_mapping/semantic-kitti.yaml
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -185,18 +185,19 @@ learning_ignore: # Ignore classes
19: False # "traffic-sign"
split: # sequence numbers
train:
- 0
- 1
- 2
# - 0
# - 1
# - 2
- 3
- 4
- 5
- 6
- 7
# - 5
# - 6
# - 7
- 9
- 10
valid:
- 8
# - 8
- 22
test:
- 11
- 12
Expand Down
Empty file modified config/nuScenes.yaml
100644 → 100755
Empty file.
8 changes: 4 additions & 4 deletions config/semantickitti.yaml
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,15 @@ dataset_params:
###################
## Data_loader options
train_data_loader:
data_path: "/data/dataset/semantic_kitti/data_semkitti/dataset/sequences/"
data_path: "/lustre/home/acct-stu/stu010/sequences/"
imageset: "train"
return_ref: True
batch_size: 2
shuffle: True
num_workers: 4

val_data_loader:
data_path: "/data/dataset/semantic_kitti/data_semkitti/dataset/sequences/"
data_path: "/lustre/home/acct-stu/stu010/sequences/"
imageset: "val"
return_ref: True
batch_size: 1
Expand All @@ -62,7 +62,7 @@ val_data_loader:
train_params:
model_load_path: "./model_load_dir/model_load.pt"
model_save_path: "./model_save_dir/model_save.pt"
checkpoint_every_n_steps: 4599
checkpoint_every_n_steps: 1932
max_num_epochs: 40
eval_every_n_steps: 4599
eval_every_n_steps: 1932
learning_rate: 0.001
Empty file modified dataloader/__init__.py
100644 → 100755
Empty file.
Binary file added dataloader/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Empty file modified dataloader/dataset_nuscenes.py
100644 → 100755
Empty file.
28 changes: 27 additions & 1 deletion dataloader/dataset_semantickitti.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@

REGISTERED_DATASET_CLASSES = {}

# triple
def triple_func(x):
return x ** (1/3)

# ln
def ln_func(x):
return np.log(x)

def register_dataset(cls, name=None):
global REGISTERED_DATASET_CLASSES
Expand Down Expand Up @@ -244,7 +251,26 @@ def __getitem__(self, index):
intervals = crop_range / (cur_grid_size - 1)

if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
# fixed
# grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)

# triple (quadratic interval)
#x_clip = np.clip(xyz_pol, min_bound, max_bound)
#a = (cur_grid_size[0] - 1) / (triple_func(max_bound[0] - min_bound[0]))
#tmp_y = a * triple_func(x_clip[:, 0] - min_bound[0])
#grid_ind1 = (np.floor(tmp_y)).astype(np.int)
#grid_ind2 = (np.floor((x_clip[:, 1:] - min_bound[1:]) / ((max_bound[1:] - min_bound[1:]) / (cur_grid_size[1:] - 1)))).astype(np.int)
#grid_ind = np.concatenate((grid_ind1.reshape((grid_ind1.shape[0], 1)), grid_ind2), axis=1)
##################

#exponential (exponential interval)
x_clip = np.clip(xyz_pol, min_bound, max_bound)
a = (cur_grid_size[0] - 1) / (ln_func(max_bound[0] + 1 - min_bound[0]))
tmp_y = a * ln_func(x_clip[:, 0] + 1 - min_bound[0])
grid_ind1 = (np.floor(tmp_y)).astype(np.int)
grid_ind2 = (np.floor((x_clip[:, 1:] - min_bound[1:]) / ((max_bound[1:] - min_bound[1:]) / (cur_grid_size[1:] - 1)))).astype(np.int)
grid_ind = np.concatenate((grid_ind1.reshape((grid_ind1.shape[0], 1)), grid_ind2), axis=1)
######################

voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
Expand Down
Empty file modified dataloader/pc_dataset.py
100644 → 100755
Empty file.
Empty file modified demo_folder.py
100644 → 100755
Empty file.
Empty file modified img/leaderboard.png
100644 → 100755
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file modified img/leaderboard2.png
100644 → 100755
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file modified img/pipeline.png
100644 → 100755
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
6 changes: 6 additions & 0 deletions logs_dir/cylinder_asym_networks_logs_tee.txt

Large diffs are not rendered by default.

Empty file modified network/__init__.py
100644 → 100755
Empty file.
Binary file added network/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Empty file modified network/cylinder_fea_generator.py
100644 → 100755
Empty file.
Empty file modified network/cylinder_spconv_3d.py
100644 → 100755
Empty file.
14 changes: 14 additions & 0 deletions network/segmentator_3d_asymm_spconv.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,13 @@ def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
####no asymconv
# self.conv1 = conv3x3(in_filters, out_filters, indice_key=indice_key + "bef")
# self.bn0 = nn.BatchNorm1d(out_filters)
# self.act1 = nn.LeakyReLU()
# self.weight_initialization()
##################

self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
Expand All @@ -71,6 +78,13 @@ def weight_initialization(self):
nn.init.constant_(m.bias, 0)

def forward(self, x):
####noasymconv
# shortcut = self.conv1(x)
# shortcut.features = self.act1(shortcut.features)
# shortcut.features = self.bn0(shortcut.features)
# return shortcut
############

shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
Expand Down
82 changes: 82 additions & 0 deletions statistic_class_num.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import numpy as np
import os
import tqdm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import shutil

def plot():
class_map = {5: 'bus', 4: 'truck', 8: 'motorcyclist', 6: 'person', 7: 'bicyclist', 1: 'car', 0: 'unlabeled',
19: 'traffic-sign',18: 'pole', 17: 'terrain', 16: 'trunk', 15: 'vegetation', 9: 'road', 14: 'fence', 13: 'building',
12: 'other-ground', 11: 'sidewalk', 10: 'parking', 3: 'motorcycle', 2: 'bicycle'}
file_num = 11
for j in range(file_num):
X = [str(i) for i in range(20)]
Y = []
fig = plt.figure()
with open('out' + str(j).zfill(2) + '.txt', 'r') as f:
for line in f.readlines():
line = line.strip('\n') # 去掉列表中每一个元素的换行符
Y.append(int(line))
plt.bar(X, Y, 0.4, color="blue")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title('Sequence' + str(j).zfill(2))

# plt.show()
plt.savefig('barChart' + str(j).zfill(2) + '.jpg')


def statistc_class():
learning_map = {0: 0, 1: 0, 10: 1, 11: 2, 13: 5, 15: 3, 16: 5, 18: 4, 20: 5, 30: 6, 31: 7, 32: 8, 40: 9, 44: 10,
48: 11, 49: 12, 50: 13, 51: 14, 52: 0, 60: 9, 70: 15, 71: 16, 72: 17, 80: 18, 81: 19, 99: 0, 252: 1,
253: 7, 254: 6, 255: 8, 256: 5, 257: 5, 258: 4, 259: 5}

statistc_class_dic = {}
file_num = 11
root_path = '/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences'
for i in range(20):
statistc_class_dic[i] = 0

for j in range(file_num):
folder_path = os.path.join(root_path, str(j).zfill(2), 'labels')
# folder_path = '/Users/yexinyi/Desktop/VE450.nosync/data/val_sub/dataset/sequences/08/labels'
for root, dirs, files in os.walk(folder_path):
for f in tqdm.tqdm(files):
# filepath='/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences/00/labels/003907.label'
filepath = os.path.join(root, f)
annotated_data = np.fromfile(filepath, dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
tmp = annotated_data.astype(np.uint8)
for i in range(len(tmp)):
if tmp[i][0] in learning_map.keys():
if learning_map[tmp[i][0]] in statistc_class_dic.keys():
statistc_class_dic[learning_map[tmp[i][0]]] += 1

with open('out_val' + str(j).zfill(2) + '.txt', 'w') as file:
for i in statistc_class_dic.values():
file.write(str(i) + '\n')

break

def split_dataset():
folder_path = '/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences/08/labels'
folder_path2 = '/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences/08/velodyne'

file_list = random.sample(os.listdir(folder_path), 814) #0.2*4070
for i in tqdm.tqdm(file_list):
source = os.path.join(folder_path, i)
target = os.path.join('/Users/yexinyi/Desktop/VE450.nosync/data/val_sub/dataset/sequences/08/labels', i)
source2 = os.path.join(folder_path2, i[:-5]+'bin')
target2 = os.path.join('/Users/yexinyi/Desktop/VE450.nosync/data/val_sub/dataset/sequences/08/velodyne', i[:-5]+'bin')
shutil.copy(source, target)
shutil.copy(source2, target2)



if __name__ == '__main__':
statistc_class()
# plot()
# split_dataset()
166 changes: 166 additions & 0 deletions statistic_new_data_coding.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import numpy as np
import os
import tqdm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import shutil

# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)

# quadratic
def quadratic_func(x):
return x ** 0.5

# triple
def triple_func(x):
return x ** (1/3)

# ln
def ln_func(x):
return np.log(x)

def cal_cell_num(func_type):
file_num = 11
root_path = '/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences'

max_bound = np.asarray([50, 3.1415926, 2])
min_bound = np.asarray([0, -3.1415926, -4])
crop_range = max_bound - min_bound
cur_grid_size = np.asarray([480, 360, 32])

# for plot
dim1 = cur_grid_size[0]
dim2 = cur_grid_size[1]
dim3 = cur_grid_size[2]

total_cell_list = [0, 0, 0, 0, 0]
full_cell_list = [0, 0, 0, 0, 0]
res = []
total_file_num = 0

for j in range(file_num):
if j == 8:
continue
folder_path = os.path.join(root_path, str(j).zfill(2), 'velodyne')
# folder_path = '/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences/01/velodyne'
for root, dirs, files in os.walk(folder_path):
for f in tqdm.tqdm(files):
# filepath='/Users/yexinyi/Desktop/VE450.nosync/data/merge/dataset/sequences/00/velodyne/003907.bin'
filepath = os.path.join(root, f)
raw_data = np.fromfile(filepath, dtype=np.float32).reshape((-1, 4))
xyz_pol = cart2polar(raw_data[:, :3])

total_file_num += 1
if func_type == "quadratic":
x_clip = np.clip(xyz_pol, min_bound, max_bound)
a = (cur_grid_size[0] - 1) / (quadratic_func(max_bound[0] - min_bound[0]))
tmp_y = a * quadratic_func(x_clip[:, 0] - min_bound[0])
grid_ind1 = (np.floor(tmp_y)).astype(np.int)
grid_ind2 = (np.floor((x_clip[:, 1:] - min_bound[1:]) / ((max_bound[1:] - min_bound[1:]) / (
cur_grid_size[1:] - 1)))).astype(np.int)
grid_ind = np.concatenate((grid_ind1.reshape((grid_ind1.shape[0], 1)), grid_ind2), axis=1)

for i in range(5):
left_x = i * 10
right_x = (i + 1) * 10
left_y = np.floor(a * quadratic_func(left_x))
right_y = np.floor(a * quadratic_func(right_x))
unique_cell = np.unique(grid_ind, axis=0)
full_cell_num = np.sum(np.logical_and((unique_cell[:, 0] >= left_y), (unique_cell[:, 0] < right_y)))
full_cell_list[i] += full_cell_num

elif func_type == "triple":
x_clip = np.clip(xyz_pol, min_bound, max_bound)
a = (cur_grid_size[0] - 1) / (triple_func(max_bound[0] - min_bound[0]))
tmp_y = a * triple_func(x_clip[:, 0] - min_bound[0])
grid_ind1 = (np.floor(tmp_y)).astype(np.int)
grid_ind2 = (np.floor((x_clip[:, 1:] - min_bound[1:]) / ((max_bound[1:] - min_bound[1:]) / (
cur_grid_size[1:] - 1)))).astype(np.int)
grid_ind = np.concatenate((grid_ind1.reshape((grid_ind1.shape[0], 1)), grid_ind2), axis=1)

for i in range(5):
left_x = i * 10
right_x = (i + 1) * 10
left_y = np.floor(a * triple_func(left_x))
right_y = np.floor(a * triple_func(right_x))
unique_cell = np.unique(grid_ind, axis=0)
full_cell_num = np.sum(np.logical_and((unique_cell[:, 0] >= left_y), (unique_cell[:, 0] < right_y)))
full_cell_list[i] += full_cell_num

elif func_type == "ln":
x_clip = np.clip(xyz_pol, min_bound, max_bound)
a = (cur_grid_size[0] - 1) / (ln_func(max_bound[0] + 1 - min_bound[0]))
tmp_y = a * ln_func(x_clip[:, 0] + 1 - min_bound[0])
grid_ind1 = (np.floor(tmp_y)).astype(np.int)
grid_ind2 = (np.floor((x_clip[:, 1:] - min_bound[1:]) / ((max_bound[1:] - min_bound[1:]) / (
cur_grid_size[1:] - 1)))).astype(np.int)
grid_ind = np.concatenate((grid_ind1.reshape((grid_ind1.shape[0], 1)), grid_ind2), axis=1)
for i in range(5):
left_x = i * 10
right_x = (i + 1) * 10
left_y = np.floor(a * ln_func(left_x))
right_y = np.floor(a * ln_func(right_x))
unique_cell = np.unique(grid_ind, axis=0)
full_cell_num = np.sum(np.logical_and((unique_cell[:, 0] >= left_y), (unique_cell[:, 0] < right_y)))
full_cell_list[i] += full_cell_num

elif func_type == "original":
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
x_clip = np.clip(xyz_pol, min_bound, max_bound)
grid_ind = (np.floor((x_clip - min_bound) / intervals)).astype(np.int)

for i in range(5):
left_x = i * 10
right_x = (i + 1) * 10
left_y = np.floor(left_x / intervals[0])
right_y = np.floor(right_x / intervals[0])
unique_cell = np.unique(grid_ind, axis=0)
full_cell_num = np.sum(
np.logical_and((unique_cell[:, 0] >= left_y), (unique_cell[:, 0] < right_y)))
full_cell_list[i] += full_cell_num


for j in range(5):
left_x = j * 10
right_x = (j + 1) * 10
if func_type == "quadratic":
a = (cur_grid_size[0] - 1) / (quadratic_func(max_bound[0] - min_bound[0]))
left_y = np.floor(a * quadratic_func(left_x))
right_y = np.floor(a * quadratic_func(right_x))
total_cell_list[j] = (right_y - left_y) * dim2 * dim3 * total_file_num

elif func_type == "triple":
a = (cur_grid_size[0] - 1) / (triple_func(max_bound[0] - min_bound[0]))
left_y = np.floor(a * triple_func(left_x))
right_y = np.floor(a * triple_func(right_x))
total_cell_list[j] = (right_y - left_y) * dim2 * dim3 * total_file_num

elif func_type == "ln":
a = (cur_grid_size[0] - 1) / (ln_func(max_bound[0] + 1 - min_bound[0]))
left_y = np.floor(a * ln_func(left_x + 1))
right_y = np.floor(a * ln_func(right_x + 1))
total_cell_list[j] = (right_y - left_y) * dim2 * dim3 * total_file_num

elif func_type == "original":
intervals = crop_range / (cur_grid_size - 1)
left_y = np.floor((np.clip(left_x, min_bound[0], max_bound[0]) - min_bound[0]) / intervals[0])
right_y = np.floor((np.clip(right_x, min_bound[0], max_bound[0]) - min_bound[0]) / intervals[0])
total_cell_list[j] = (right_y - left_y) * dim2 * dim3 * total_file_num

res.append(full_cell_list[j] / total_cell_list[j])
print("res:", res)

with open(func_type + '.txt', 'w') as file:
for i in res:
file.write(str(i) + '\n')

if __name__ == '__main__':
func_type = ["quadratic", "triple", "ln", "original"]
cal_cell_num(func_type[2])
4 changes: 2 additions & 2 deletions train.sh
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
name=cylinder_asym_networks
gpuid=0

CUDA_VISIBLE_DEVICES=${gpuid} python -u train_cylinder_asym.py \
2>&1 | tee logs_dir/${name}_logs_tee.txt
CUDA_VISIBLE_DEVICES=${gpuid} python3 -u train_cylinder_asym.py \
2>&1 | tee logs_dir/${name}_logs_tee.txt
Loading