Skip to content

Commit

Permalink
Changes on local jetson nano
Browse files Browse the repository at this point in the history
  • Loading branch information
rombie18 committed May 9, 2023
1 parent f4455dc commit 0bc4264
Show file tree
Hide file tree
Showing 8 changed files with 344 additions and 205 deletions.
94 changes: 43 additions & 51 deletions Software/dev/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,71 +8,63 @@
import math
import numpy as np

cap = cv2.VideoCapture('../videos/gras.avi')
skip = 0
cap = cv2.VideoCapture('/mnt/c/Users/woutr/OneDrive - KU Leuven/Bachelorproef/Beelden_opendeur/buitengras.avi')

while cap.isOpened():

# Load a frame (or image)
_, original_frame = cap.read()

if skip > 0:
skip = skip - 1
continue

# Discard unwanted part of image
original_height, original_width, original_channels = original_frame.shape
cropped_frame = original_frame[280:original_height, 0:original_width]

try:
# Create a Lane object
lane_obj = Lane(orig_frame=cropped_frame)

# Create a Lane object
lane_obj = Lane(orig_frame=cropped_frame)

# Perform thresholding to isolate lane lines
lane_line_markings = lane_obj.get_line_markings()

# Plot the region of interest on the image
lane_obj.plot_roi(plot=False)

# Perform the perspective transform to generate a bird's eye view
# If Plot == True, show image with new region of interest
warped_frame = lane_obj.perspective_transform(plot=False)

#
if np.sum(warped_frame == 255) < 3000:
print("Only {} valid pixels, skipping frame".format(np.sum(warped_frame == 255)))
continue

# Generate the image histogram to serve as a starting point
# for finding lane line pixels
histogram = lane_obj.calculate_histogram(plot=False)

# Find lane line pixels using the sliding window method
left_fit, right_fit = lane_obj.get_lane_line_indices_sliding_windows(
plot=False)

# Fill in the lane line
lane_obj.get_lane_line_previous_window(left_fit, right_fit, plot=False)

# Overlay lines on the original frame
frame_with_lane_lines = lane_obj.overlay_lane_lines(plot=False)
# Perform thresholding to isolate lane lines
lane_line_markings = lane_obj.get_line_markings()
# If there are less than 50 'lane' pixels detected, skip calculations
#if numpy.sum(lane_line_markings == 255) < 50:
# continue

# Plot the region of interest on the image
lane_obj.plot_roi(plot=True)

# Perform the perspective transform to generate a bird's eye view
# If Plot == True, show image with new region of interest
warped_frame = lane_obj.perspective_transform(plot=True)

#
if np.sum(warped_frame == 255) < 3000:
print("Only {} valid pixels, skipping frame".format(np.sum(warped_frame == 255)))
continue

# Generate the image histogram to serve as a starting point
# for finding lane line pixels
histogram = lane_obj.calculate_histogram(plot=False)

# Calculate lane line curvature (left and right lane lines)
lane_obj.calculate_curvature(print_to_terminal=False)
# Find lane line pixels using the sliding window method
left_fit, right_fit = lane_obj.get_lane_line_indices_sliding_windows(
plot=False)

# Fill in the lane line
lane_obj.get_lane_line_previous_window(left_fit, right_fit, plot=False)

# Calculate center offset
lane_obj.calculate_car_position(print_to_terminal=False)

# Calculate turning angle
wheel_base = 0.3
curvem = (lane_obj.left_curvem + lane_obj.right_curvem) / 2
angle = math.degrees(math.atan(wheel_base / curvem)) * 6 + 90
print(angle)
# Overlay lines on the original frame
frame_with_lane_lines = lane_obj.overlay_lane_lines(plot=False)
# Calculate lane line curvature (left and right lane lines)
lane_obj.calculate_curvature(print_to_terminal=True)

# Calculate center offset
lane_obj.calculate_car_position(print_to_terminal=True)

except:
print("Unable to process frame")
# Calculate turning angle
wheel_base = 0.3
curvem = (lane_obj.left_curvem + lane_obj.right_curvem) / 2
angle = math.degrees(math.atan(wheel_base / curvem)) * 6 + 90

print(angle)

cv2.imshow('frame', frame_with_lane_lines)
if cv2.waitKey(1) == ord('q'):
Expand Down
Binary file modified Software/libs/lane_detection/__pycache__/lane.cpython-36.pyc
Binary file not shown.
74 changes: 47 additions & 27 deletions Software/libs/lane_detection/lane.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def __init__(self, orig_frame):
# You need to find these corners manually.
self.roi_points = np.float32([
(460,0), # Top-left corner
(0, 260), # Bottom-left corner
(1280,260), # Bottom-right corner
(0, 330), # Bottom-left corner
(1280,330), # Bottom-right corner
(820,0) # Top-right corner
])

Expand All @@ -62,8 +62,8 @@ def __init__(self, orig_frame):
self.histogram = None

# Sliding window parameters
self.no_of_windows = 20
self.margin = int((1/8) * width) # Window width is +/- margin
self.no_of_windows = 10
self.margin = int((1/12) * width) # Window width is +/- margin
self.minpix = int((1/24) * width) # Min no. of pixels to recenter window

# Best fit polynomial lines for left line and right line of the lane
Expand Down Expand Up @@ -160,8 +160,6 @@ def calculate_histogram(self,frame=None,plot=True):
if frame is None:
frame = self.warped_frame

frame = frame[self.height-200:self.height, 0:self.width]

# Generate the histogram
self.histogram = np.sum(frame[int(
frame.shape[0]/2):,:], axis=0)
Expand Down Expand Up @@ -343,8 +341,6 @@ def get_lane_line_indices_sliding_windows(self, plot=False):

# Go through one window at a time
no_of_windows = self.no_of_windows
leftempty = 0
rightempty = 0

for window in range(no_of_windows):

Expand Down Expand Up @@ -376,16 +372,8 @@ def get_lane_line_indices_sliding_windows(self, plot=False):
minpix = self.minpix
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
else:
leftempty = leftempty + 1
if leftempty == 1:
leftx_current = 0
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
else:
rightempty = rightempty + 1
if rightempty == 1:
rightx_current = self.width

# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
Expand Down Expand Up @@ -448,12 +436,24 @@ def get_line_markings(self, frame=None):
"""
if frame is None:
frame = self.orig_frame


lower = np.array([161, 114, 8])
upper = np.array([180, 255, 255])

# Convert the video frame from BGR (blue, green, red)
# color space to HLS (hue, saturation, lightness).
hls = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

mask_video = cv2.inRange(hsv, lower, upper) # Masking the image to find our color


kernel = np.ones((9,9),np.uint8)
closing = cv2.morphologyEx(mask_video, cv2.MORPH_CLOSE, kernel)

self.lane_line_markings = closing
return self.lane_line_markings

"""
################### Isolate possible lane line edges ######################
# Perform Sobel edge detection on the L (lightness) channel of
Expand All @@ -470,18 +470,42 @@ def get_line_markings(self, frame=None):
######################## Isolate possible lane lines ######################
lower = np.array([0, 100, 50])
upper = np.array([180, 250, 255])
rs_binary = cv2.inRange(hsv, lower, upper)
# Perform binary thresholding on the S (saturation) channel
# of the video frame. A high saturation value means the hue color is pure.
# We expect lane lines to be nice, pure colors (i.e. solid white, yellow)
# and have high saturation channel values.
# s_binary is matrix full of 0s (black) and 255 (white) intensity values
# White in the regions with the purest hue colors (e.g. >80...play with
# this value for best results).
s_channel = hls[:, :, 2] # use only the saturation channel data
_, s_binary = edge.threshold(s_channel, (60, 255))
# Perform binary thresholding on the R (red) channel of the
# original BGR video frame.
# r_thresh is a matrix full of 0s (black) and 255 (white) intensity values
# White in the regions with the richest red channel values (e.g. >120).
# Remember, pure white is bgr(255, 255, 255).
# Pure yellow is bgr(0, 255, 255). Both have high red channel values.
_, r_thresh = edge.threshold(frame[:, :, 2], thresh=(120, 255))
_, g_thresh = edge.threshold(frame[:, :, 1], thresh=(170, 255))
_, b_thresh = edge.threshold(frame[:, :, 0], thresh=(150, 255))
# Lane lines should be pure in color and have high red channel values
# Bitwise AND operation to reduce noise and black-out any pixels that
# don't appear to be nice, pure, solid colors (like white or yellow lane
# lines.)
# Keep parts of images that are in range r_thresh and out of range g_thresh and b_thresh
rs_binary = cv2.bitwise_and(s_binary, cv2.bitwise_and(r_thresh, cv2.bitwise_not(cv2.bitwise_and(g_thresh, b_thresh))))
### Combine the possible lane lines with the possible lane line edges #####
# If you show rs_binary visually, you'll see that it is not that different
# from this return value. The edges of lane lines are thin lines of pixels.
self.lane_line_markings = cv2.bitwise_or(rs_binary, sxbinary.astype(
np.uint8))
return self.lane_line_markings

"""

def histogram_peak(self):
"""
Expand Down Expand Up @@ -550,11 +574,7 @@ def perspective_transform(self, frame=None, plot=False):
:return: Bird's eye view of the current lane
"""
if frame is None:
frame = self.lane_line_markings

mask = np.zeros(frame.shape)
cv2.fillPoly(mask, pts=np.int32([self.roi_points]), color=(255,255,255))
frame = cv2.bitwise_and(frame, np.uint8(mask))
frame = self.lane_line_markings

# Calculate the transformation matrix
self.transformation_matrix = cv2.getPerspectiveTransform(
Expand Down
7 changes: 2 additions & 5 deletions Software/main/camera_properties.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
{
"AutoFunctionsROIEnable": true,
"AutoFunctionsROIPreset": "Center 50%",
"BalanceWhiteAuto": "Off",
"BalanceWhiteBlue": 2.5625,
"BalanceWhiteGreen": 1.078125,
"BalanceWhiteRed": 1.0,
"BalanceWhiteAuto": "Continuous",
"BlackLevel": 0.0,
"Denoise": 0,
"ExposureAuto": "Continuous",
Expand All @@ -18,7 +15,7 @@
"Gamma": 1.0,
"Hue": 0.0,
"OffsetAutoCenter": "On",
"Saturation": 1.9999999999999982,
"Saturation": 3.0,
"Sharpness": 0,
"StrobeDelay": 0,
"StrobeDuration": 1000,
Expand Down
102 changes: 53 additions & 49 deletions Software/main/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,57 +26,61 @@

while True:

if camera.snap_image(1):
# Load a frame (or image)
original_frame = camera.get_image()

# Discard unwanted part of image
original_height, original_width, original_channels = original_frame.shape
cropped_frame = original_frame[280:original_height, 0:original_width]

# Create a Lane object
lane_obj = Lane(orig_frame=cropped_frame)

# Perform thresholding to isolate lane lines
lane_line_markings = lane_obj.get_line_markings()
# If there are less than 50 'lane' pixels detected, skip calculations
#if numpy.sum(lane_line_markings == 255) < 50:
# continue

# Plot the region of interest on the image
lane_obj.plot_roi(plot=False)

# Perform the perspective transform to generate a bird's eye view
# If Plot == True, show image with new region of interest
warped_frame = lane_obj.perspective_transform(plot=False)

# Generate the image histogram to serve as a starting point
# for finding lane line pixels
histogram = lane_obj.calculate_histogram(plot=False)
if camera.snap_image(1):
try:
# Load a frame (or image)
original_frame = camera.get_image()

# Find lane line pixels using the sliding window method
left_fit, right_fit = lane_obj.get_lane_line_indices_sliding_windows(
plot=False)

# Fill in the lane line
lane_obj.get_lane_line_previous_window(left_fit, right_fit, plot=False)
# Discard unwanted part of image
original_height, original_width, original_channels = original_frame.shape
cropped_frame = original_frame[280:original_height, 0:original_width]

# Overlay lines on the original frame
frame_with_lane_lines = lane_obj.overlay_lane_lines(plot=False)

# Calculate lane line curvature (left and right lane lines)
lane_obj.calculate_curvature(print_to_terminal=True)

# Calculate center offset
lane_obj.calculate_car_position(print_to_terminal=True)
# Create a Lane object
lane_obj = Lane(orig_frame=cropped_frame)

# Perform thresholding to isolate lane lines
lane_line_markings = lane_obj.get_line_markings()
# If there are less than 50 'lane' pixels detected, skip calculations
#if numpy.sum(lane_line_markings == 255) < 50:
# continue

# Plot the region of interest on the image
lane_obj.plot_roi(plot=False)

# Perform the perspective transform to generate a bird's eye view
# If Plot == True, show image with new region of interest
warped_frame = lane_obj.perspective_transform(plot=False)

# Generate the image histogram to serve as a starting point
# for finding lane line pixels
histogram = lane_obj.calculate_histogram(plot=False)

# Find lane line pixels using the sliding window method
left_fit, right_fit = lane_obj.get_lane_line_indices_sliding_windows(
plot=False)

# Fill in the lane line
lane_obj.get_lane_line_previous_window(left_fit, right_fit, plot=False)

# Overlay lines on the original frame
frame_with_lane_lines = lane_obj.overlay_lane_lines(plot=False)

# Calculate lane line curvature (left and right lane lines)
lane_obj.calculate_curvature(print_to_terminal=True)

# Calculate center offset
lane_obj.calculate_car_position(print_to_terminal=True)

# Calculate turning angle
wheel_base = 0.3
curvem = (lane_obj.left_curvem + lane_obj.right_curvem) / 2
angle = math.degrees(math.atan(wheel_base / curvem))

# Calculate turning angle
wheel_base = 0.3
curvem = (lane_obj.left_curvem + lane_obj.right_curvem) / 2
angle = math.degrees(math.atan(wheel_base / curvem))
print(angle)

print(angle)
cv2.imshow('frame', frame_with_lane_lines)
if cv2.waitKey(1) == ord('q'):
break

cv2.imshow('frame', frame_with_lane_lines)
if cv2.waitKey(1) == ord('q'):
break
except:
print("Unable to process image")
Binary file modified Software/threads/__pycache__/customthreads.cpython-36.pyc
Binary file not shown.
Loading

0 comments on commit 0bc4264

Please sign in to comment.