-
Notifications
You must be signed in to change notification settings - Fork 1
/
occ_grid_map.py
255 lines (215 loc) · 9.97 KB
/
occ_grid_map.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from IPython.display import HTML
'''
In this notebook, you will generate an occupancy grid based off of multiple simulated lidar scans.
The inverse scanner model will be given to you, in the inverse_scanner() function. It returns a matrix
of measured occupancy probability values based on the lidar scan model discussed in the video lectures.
The get_ranges() function actually returns the scanned ranges value for a given vehicle position and scanner bearing.
These two functions are given below. Make sure you understand what they are doing, as you will need to use them later in the notebook.
'''
# Calculates the inverse measurement model for a laser scanner.
# It identifies three regions. The first where no information is available occurs
# outside of the scanning arc. The second where objects are likely to exist, at the
# end of the range measurement within the arc. The third are where objects are unlikely
# to exist, within the arc but with less distance than the range measurement.
def inverse_scanner(num_rows, num_cols, x, y, theta, meas_phi, meas_r, rmax, alpha, beta):
m = np.zeros((M, N))
for i in range(num_rows):
for j in range(num_cols):
# Find range and bearing relative to the input state (x, y, theta).
r = math.sqrt((i - x)**2 + (j - y)**2)
phi = (math.atan2(j - y, i - x) - theta + math.pi) % (2 * math.pi) - math.pi
# Find the range measurement associated with the relative bearing.
k = np.argmin(np.abs(np.subtract(phi, meas_phi)))
# If the range is greater than the maximum sensor range, or behind our range
# measurement, or is outside of the field of view of the sensor, then no
# new information is available.
if (r > min(rmax, meas_r[k] + alpha / 2.0)) or (abs(phi - meas_phi[k]) > beta / 2.0):
m[i, j] = 0.5
# If the range measurement lied within this cell, it is likely to be an object.
elif (meas_r[k] < rmax) and (abs(r - meas_r[k]) < alpha / 2.0):
m[i, j] = 0.7
# If the cell is in front of the range measurement, it is likely to be empty.
elif r < meas_r[k]:
m[i, j] = 0.3
return m
# Generates range measurements for a laser scanner based on a map, vehicle position,
# and sensor parameters.
# Uses the ray tracing algorithm.
def get_ranges(true_map, X, meas_phi, rmax):
(M, N) = np.shape(true_map)
x = X[0]
y = X[1]
theta = X[2]
meas_r = rmax * np.ones(meas_phi.shape)
# Iterate for each measurement bearing.
for i in range(len(meas_phi)):
# Iterate over each unit step up to and including rmax.
for r in range(1, rmax+1):
# Determine the coordinates of the cell.
xi = int(round(x + r * math.cos(theta + meas_phi[i])))
yi = int(round(y + r * math.sin(theta + meas_phi[i])))
# If not in the map, set measurement there and stop going further.
if (xi <= 0 or xi >= M-1 or yi <= 0 or yi >= N-1):
meas_r[i] = r
break
# If in the map, but hitting an obstacle, set the measurement range
# and stop ray tracing.
elif true_map[int(round(xi)), int(round(yi))] == 1:
meas_r[i] = r
break
return meas_r
# Simulation time initialization.
T_MAX = 150
time_steps = np.arange(T_MAX)
# Initializing the robot's location.
x_0 = [30, 30, 0]
# The sequence of robot motions.
u = np.array([[3, 0, -3, 0], [0, 3, 0, -3]])
u_i = 1
# Robot sensor rotation command
w = np.multiply(0.3, np.ones(len(time_steps)))
# True map (note, columns of map correspond to y axis and rows to x axis, so
# robot position x = x(1) and y = x(2) are reversed when plotted to match
M = 50
N = 60
true_map = np.zeros((M, N))
true_map[0:10, 0:10] = 1
true_map[30:35, 40:45] = 1
true_map[3:6,40:60] = 1
true_map[20:30,25:29] = 1
true_map[40:50,5:25] = 1
# Initialize the belief map.
# We are assuming a uniform prior.
m = np.multiply(0.5, np.ones((M, N)))
# Initialize the log odds ratio.
L0 = np.log(np.divide(m, np.subtract(1, m))) # Note: np.divide returns element-wise division
L = L0
# Parameters for the sensor model.
meas_phi = np.arange(-0.4, 0.4, 0.05) # create a vector with diff equal to 0.05
rmax = 30 # Max beam range.
alpha = 1 # Width of an obstacle (distance about measurement to fill in).
beta = 0.05 # Angular width of a beam.
# Initialize the vector of states for our simulation.
x = np.zeros((3, len(time_steps)))
x[:, 0] = x_0
'''
Here is where you will enter your code. Your task is to complete the main simulation loop.
After each step of robot motion, you are required to gather range data from your lidar scan,
and then apply the inverse scanner model to map these to a measured occupancy belief map.
From this, you will then perform a logodds update on your logodds occupancy grid,
and update our belief map accordingly. As the car traverses through the environment,
the occupancy grid belief map should move closer and closer to the true map.
At the code block after the end of the loop, the code will output some values which will be
used for grading your assignment. Make sure to copy down these values and save them in a .txt file
for when your visualization looks correct. Good luck!
'''
'''
Note: Capture is the process or means of obtaining and storing external data, particularly images or sounds,
for use at a later time. ... At a later time, it can be analyzed by a computer, or compared with other
files in a database to verify identity or to provide authorization to enter a secured system.
'''
%%capture
# Intitialize figures.
# logit function
def logit(p):
return np.log( np.divide( p, np.subtract(1, p) ) ) # Note: must use the element-wise method np.divide
# inv_logit function
def inv_logit(l):
exp_logit = np.exp(l) # Note: must use the element-wise method np.exp(x)
return np.divide( exp_logit, 1 + exp_logit )
# Intitialize figures.
map_fig = plt.figure()
map_ax = map_fig.add_subplot(111)
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
invmod_fig = plt.figure()
invmod_ax = invmod_fig.add_subplot(111)
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
belief_fig = plt.figure()
belief_ax = belief_fig.add_subplot(111)
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
meas_rs = []
meas_r = get_ranges(true_map, x[:, 0], meas_phi, rmax)
meas_rs.append(meas_r)
invmods = []
invmod = inverse_scanner(M, N, x[0, 0], x[1, 0], x[2, 0], meas_phi, meas_r, \
rmax, alpha, beta)
invmods.append(invmod)
ms = []
ms.append(m)
# Main simulation loop.
for t in range(1, len(time_steps)):
# Perform robot motion.
move = np.add(x[0:2, t-1], u[:, u_i])
# If we hit the map boundaries, or a collision would occur, remain still.
if (move[0] >= M - 1) or (move[1] >= N - 1) or (move[0] <= 0) or (move[1] <= 0) \
or true_map[int(round(move[0])), int(round(move[1]))] == 1:
x[:, t] = x[:, t-1]
u_i = (u_i + 1) % 4
else:
x[0:2, t] = move
x[2, t] = (x[2, t-1] + w[t]) % (2 * math.pi)
# TODO(done) Gather the measurement range data, which we will convert to occupancy probabilities
# using our inverse measurement model.
meas_r = get_ranges(true_map, x[:, t], meas_phi, rmax)
meas_rs.append(meas_r)
# TODO(done) Given our range measurements and our robot location, apply our inverse scanner model
# to get our measure probabilities of occupancy.
invmod = inverse_scanner(M, N, x[0, t], x[1, t], x[2, t], meas_phi, meas_r, rmax, alpha, beta)
invmods.append(invmod)
# TODO(done) Calculate and update the log odds of our occupancy grid, given our measured
# occupancy probabilities from the inverse model.
L = logit(invmod) + L - L0
# TODO(done) Calculate a grid of probabilities from the log odds.
m = inv_logit(L)
ms.append(m)
'''
Now that you have written your main simulation loop, you can visualize your robot motion in the true map,
your measured belief map, and your occupancy grid belief map below. These are shown in the 1st, 2nd, and 3rd videos, respectively.
If your 3rd video converges towards the true map shown in the 1st video, congratulations! You have completed the assignment.
Please submit the output of the box above as a .txt file to the grader for this assignment.
'''
# Ouput for grading. Do not modify this code!
m_f = ms[-1]
print("{}".format(m_f[40, 10]))
print("{}".format(m_f[30, 40]))
print("{}".format(m_f[35, 40]))
print("{}".format(m_f[0, 50]))
print("{}".format(m_f[10, 5]))
print("{}".format(m_f[20, 15]))
print("{}".format(m_f[25, 50]))
def map_update(i):
map_ax.clear()
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
map_ax.imshow(np.subtract(1, true_map), cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
x_plot = x[1, :i+1]
y_plot = x[0, :i+1]
map_ax.plot(x_plot, y_plot, "bx-")
def invmod_update(i):
invmod_ax.clear()
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
invmod_ax.imshow(invmods[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
for j in range(len(meas_rs[i])):
invmod_ax.plot(x[1, i] + meas_rs[i][j] * math.sin(meas_phi[j] + x[2, i]), \
x[0, i] + meas_rs[i][j] * math.cos(meas_phi[j] + x[2, i]), "ko")
invmod_ax.plot(x[1, i], x[0, i], 'bx')
def belief_update(i):
belief_ax.clear()
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
belief_ax.imshow(ms[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
belief_ax.plot(x[1, max(0, i-10):i], x[0, max(0, i-10):i], 'bx-')
map_anim = anim.FuncAnimation(map_fig, map_update, frames=len(x[0, :]), repeat=False)
invmod_anim = anim.FuncAnimation(invmod_fig, invmod_update, frames=len(x[0, :]), repeat=False)
belief_anim = anim.FuncAnimation(belief_fig, belief_update, frames=len(x[0, :]), repeat=False)
HTML(map_anim.to_html5_video())
HTML(invmod_anim.to_html5_video())
HTML(belief_anim.to_html5_video())