forked from commaai/research
-
Notifications
You must be signed in to change notification settings - Fork 0
/
view_steering_model.py
executable file
·127 lines (102 loc) · 4.43 KB
/
view_steering_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/env python
import argparse
import sys
import numpy as np
import h5py
import pygame
import json
from keras.models import model_from_json
pygame.init()
size = (320*2, 160*2)
pygame.display.set_caption("comma.ai data viewer")
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF)
camera_surface = pygame.surface.Surface((320,160),0,24).convert()
# ***** get perspective transform for images *****
from skimage import transform as tf
rsrc = \
[[43.45456230828867, 118.00743250075844],
[104.5055617352614, 69.46865203761757],
[114.86050156739812, 60.83953551083698],
[129.74572757609468, 50.48459567870026],
[132.98164627363735, 46.38576532847949],
[301.0336906326895, 98.16046448916306],
[238.25686790036065, 62.56535881619311],
[227.2547443287154, 56.30924933427718],
[209.13359962247614, 46.817221154818526],
[203.9561297064078, 43.5813024572758]]
rdst = \
[[10.822125594094452, 1.42189132706374],
[21.177065426231174, 1.5297552836484982],
[25.275895776451954, 1.42189132706374],
[36.062291434927694, 1.6376192402332563],
[40.376849698318004, 1.42189132706374],
[11.900765159942026, -2.1376192402332563],
[22.25570499207874, -2.1376192402332563],
[26.785991168638553, -2.029755283648498],
[37.033067044190524, -2.029755283648498],
[41.67121717733509, -2.029755283648498]]
tform3_img = tf.ProjectiveTransform()
tform3_img.estimate(np.array(rdst), np.array(rsrc))
def perspective_tform(x, y):
p1, p2 = tform3_img((x,y))[0]
return p2, p1
# ***** functions to draw lines *****
def draw_pt(img, x, y, color, sz=1):
row, col = perspective_tform(x, y)
if row >= 0 and row < img.shape[0] and\
col >= 0 and col < img.shape[1]:
img[row-sz:row+sz, col-sz:col+sz] = color
def draw_path(img, path_x, path_y, color):
for x, y in zip(path_x, path_y):
draw_pt(img, x, y, color)
# ***** functions to draw predicted path *****
def calc_curvature(v_ego, angle_steers, angle_offset=0):
deg_to_rad = np.pi/180.
slip_fator = 0.0014 # slip factor obtained from real data
steer_ratio = 15.3 # from http://www.edmunds.com/acura/ilx/2016/road-test-specs/
wheel_base = 2.67 # from http://www.edmunds.com/acura/ilx/2016/sedan/features-specs/
angle_steers_rad = (angle_steers - angle_offset) * deg_to_rad
curvature = angle_steers_rad/(steer_ratio * wheel_base * (1. + slip_fator * v_ego**2))
return curvature
def calc_lookahead_offset(v_ego, angle_steers, d_lookahead, angle_offset=0):
#*** this function returns the lateral offset given the steering angle, speed and the lookahead distance
curvature = calc_curvature(v_ego, angle_steers, angle_offset)
# clip is to avoid arcsin NaNs due to too sharp turns
y_actual = d_lookahead * np.tan(np.arcsin(np.clip(d_lookahead * curvature, -0.999, 0.999))/2.)
return y_actual, curvature
def draw_path_on(img, speed_ms, angle_steers, color=(0,0,255)):
path_x = np.arange(0., 50.1, 0.5)
path_y, _ = calc_lookahead_offset(speed_ms, angle_steers, path_x)
draw_path(img, path_x, path_y, color)
# ***** main loop *****
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Path viewer')
parser.add_argument('model', type=str, help='Path to model definition json. Model weights should be on the same path.')
parser.add_argument('--dataset', type=str, default="2016-06-08--11-46-01", help='Dataset/video clip name')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(json.load(jfile))
model.compile("sgd", "mse")
weights_file = args.model.replace('json', 'keras')
model.load_weights(weights_file)
# default dataset is the validation data on the highway
dataset = args.dataset
skip = 300
log = h5py.File("dataset/log/"+dataset+".h5", "r")
cam = h5py.File("dataset/camera/"+dataset+".h5", "r")
print log.keys()
# skip to highway
for i in range(skip*100, log['times'].shape[0]):
if i%100 == 0:
print "%.2f seconds elapsed" % (i/100.0)
img = cam['X'][log['cam1_ptr'][i]].swapaxes(0,2).swapaxes(0,1)
predicted_steers = model.predict(img[None, :, :, :].transpose(0, 3, 1, 2))[0][0]
angle_steers = log['steering_angle'][i]
speed_ms = log['speed'][i]
draw_path_on(img, speed_ms, -angle_steers/10.0)
draw_path_on(img, speed_ms, -predicted_steers/10.0, (0, 255, 0))
# draw on
pygame.surfarray.blit_array(camera_surface, img.swapaxes(0,1))
camera_surface_2x = pygame.transform.scale2x(camera_surface)
screen.blit(camera_surface_2x, (0,0))
pygame.display.flip()