In contrast to the previous notebook, this example does not rely on machine learning, but rather uses classical computer vision to identify the lane markings in the video.
The following image is an indication of the output that you should expect to see if you execute this code.
We are providing this example as a demonstration only - the techniques involved are well beyond our curriculum, and we do not anticipate that you should understand how it works line-by-line.
!pip uninstall opencv-python opencv-contrib-python -y
!pip install matplotlib opencv-python-headless
%matplotlib auto
WARNING: Skipping opencv-python as it is not installed. WARNING: Skipping opencv-contrib-python as it is not installed. Requirement already satisfied: matplotlib in /opt/anaconda3/lib/python3.8/site-packages (3.2.2) Requirement already satisfied: opencv-python-headless in /opt/anaconda3/lib/python3.8/site-packages (4.4.0.46) Requirement already satisfied: cycler>=0.10 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib) (0.10.0) Requirement already satisfied: python-dateutil>=2.1 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib) (2.8.1) Requirement already satisfied: numpy>=1.11 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib) (1.19.2) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib) (2.4.7) Requirement already satisfied: kiwisolver>=1.0.1 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib) (1.2.0) Requirement already satisfied: six in /opt/anaconda3/lib/python3.8/site-packages (from cycler>=0.10->matplotlib) (1.15.0) Using matplotlib backend: MacOSX
import cv2
import numpy as np
import matplotlib.pyplot as plt
Extract the region of interest
def roi_mask(image, vertices):
mask = np.zeros_like(image)
cv2.fillPoly(mask, vertices, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
Hough transform for line detection
def hough_lines(roi_edges):
rho = 1
theta = np.pi / 180
threshold = 15
min_line_len = 40
max_line_gap = 20
lines = cv2.HoughLinesP(roi_edges, rho, theta, threshold, minLineLength=min_line_len,
maxLineGap=max_line_gap)
return lines
Use line fitting to find vertices of lanes
def calc_lane_vertices(point_list, ymin, ymax):
x = [p[0] for p in point_list]
y = [p[1] for p in point_list]
fit = np.polyfit(y, x, 1)
fit_fn = np.poly1d(fit)
xmin = int(fit_fn(ymin))
xmax = int(fit_fn(ymax))
return [xmin, ymin, xmax, ymax]
Draw image of detected lanes
def draw_lanes(image, lines):
color=[0, 255, 0]
thickness = 8
left_lines = []
right_lines = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
k = (y2 - y1) / (x2 - x1)
if k < 0:
left_lines.append(line)
else:
right_lines.append(line)
if (len(left_lines) <= 0 or len(right_lines) <= 0):
return image
left_points = [(x1, y1) for line in left_lines for x1, y1, x2, y2 in line]
left_points = left_points + [(x2, y2) for line in left_lines for x1, y1, x2, y2 in line]
right_points = [(x1, y1) for line in right_lines for x1, y1, x2, y2 in line]
right_points = right_points + [(x2, y2) for line in right_lines for x1, y1, x2, y2 in line]
left_vtx = calc_lane_vertices(left_points, 550, image.shape[0])
right_vtx = calc_lane_vertices(right_points, 550, image.shape[0])
lane_image = np.zeros_like(image)
# draw line on detected lanes
cv2.line(lane_image, (left_vtx[0], left_vtx[1]), (left_vtx[2], left_vtx[3]), color, thickness)
cv2.line(lane_image, (right_vtx[0], right_vtx[1]), (right_vtx[2], right_vtx[3]), color, thickness)
# merge detection results to original image
merged_image = cv2.addWeighted(image, 0.8, lane_image, 1, 0)
return merged_image
Predict lane and merge with original image
def lane_prediction(image):
blur_ksize = 5
canny_lthreshold = 50
canny_hthreshold = 150
# define the vertices of region of interest
roi_vtx = np.array([[(350, 770), (640, 590), (1000, 590), (1420, 770)]])
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# filter the noise
blur_gray = cv2.GaussianBlur(gray, (blur_ksize, blur_ksize), 0, 0)
# apply canny edge detector
edges = cv2.Canny(blur_gray, canny_lthreshold, canny_hthreshold)
# extract edges within the roi
roi_edges = roi_mask(edges, roi_vtx)
# apply Hough transform
lines = hough_lines(roi_edges)
merged_image = draw_lanes(image, lines)
return merged_image
Load video file
# path to video file
video_file = 'data/example.mp4'
# load video file
cap = cv2.VideoCapture(video_file)
Read the video frames in a loop and run lane detection
Press "Q" to exit the streaming
while(cap.isOpened()):
success, frame = cap.read()
if not success:
break
merged_image = lane_prediction(frame)
cv2.imshow('frame', merged_image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
-1