In [1]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from utils.utils import *
%matplotlib inline
In [2]:
coeff = pickle.load( open( "camera_cal/coefficients.p", "rb" ) )
mtx = coeff['mtx']
dist = coeff['dist']
In [3]:
def pipeline(img, l_thresh=(60, 255), v_thresh=(30, 100), y_thresh=(30, 255), kernel_size = 15):
'''
Applies different gradients to the current frame in order to detect road lanes.
'''
img = np.copy(img)
# Convert to HSV color space and separate the v channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
channel = cv2.GaussianBlur(l_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
hls_l_binary = np.zeros_like(scaled_sobel)
hls_l_binary[(scaled_sobel >= l_thresh[0]) & (scaled_sobel <= l_thresh[1])] = 1
# Convert to YUV
yuv = cv2.cvtColor(img, cv2.COLOR_RGB2YUV).astype(np.float)
y_channel = yuv[:,:,0]
v_channel = yuv[:,:,2]
channel = cv2.GaussianBlur(y_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yuv_y_binary = np.zeros_like(scaled_sobel)
yuv_y_binary[(scaled_sobel >= y_thresh[0]) & (scaled_sobel <= y_thresh[1])] = 1
white = np.zeros_like(scaled_sobel)
white[(hls_l_binary == 1) | (yuv_y_binary == 1)] = 1
channel = cv2.GaussianBlur(v_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yellow = np.zeros_like(scaled_sobel)
yellow[(scaled_sobel >= v_thresh[0]) & (scaled_sobel <= v_thresh[1])] = 1
combined = np.zeros_like(scaled_sobel)
combined[(yellow == 1) | (white == 1)] = 1
return combined
In [4]:
from moviepy.editor import *
video_file = "test_videos/harder_challenge_video.mp4"
video = VideoFileClip(video_file)
a = video.iter_frames()
In [45]:
for i in range(1):
frame = next(a)
#time = 0
#frame = video.get_frame(time)
undistorted = undistort(frame,mtx,dist)
result = pipeline(undistorted)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(undistorted)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(result, cmap='gray')
ax2.set_title('Binary Image', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
In [54]:
frame = cv2.imread('./test_images/test3.jpg')
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
undistorted = undistort(frame,mtx,dist)
result = pipeline(undistorted)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(undistorted)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(result, cmap='gray')
ax2.set_title('Binary Image', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
In [37]:
original = np.copy(undistorted)
# Convert to hls color space and separate the V channel
hls = cv2.cvtColor(original, cv2.COLOR_RGB2HLS).astype(np.float)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(h_channel, cmap='gray')
ax1.set_title('Hue', fontsize=26)
ax2.imshow(l_channel, cmap='gray')
ax2.set_title('Luminance', fontsize=26)
ax3.imshow(s_channel, cmap='gray')
ax3.set_title('Saturation', fontsize=26)
Out[37]:
The Luminance channel in HLS space ended up being quite useful when detecting the white lanes on the road under different illumination conditions. Below, we have an example of a filter extracting the white lanes in the image.
In [38]:
thr = (60,255)
kernel_size = 15
channel = cv2.GaussianBlur(l_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
hls_l_binary = np.zeros_like(scaled_sobel)
hls_l_binary[(scaled_sobel >= thr[0]) & (scaled_sobel <= thr[1])] = 1
plt.imshow(hls_l_binary, cmap='gray')
Out[38]:
The Saturation channel can also be used for detecting both lanes:
In [39]:
thr = (25,100)
kernel_size = 15
channel = cv2.GaussianBlur(s_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
hls_s_binary = np.zeros_like(scaled_sobel)
hls_s_binary[(scaled_sobel >= thr[0]) & (scaled_sobel <= thr[1])] = 1
plt.imshow(hls_s_binary, cmap='gray')
Out[39]:
In [40]:
original = np.copy(undistorted)
# Convert to HSV color space and separate the V channel
yuv = cv2.cvtColor(original, cv2.COLOR_RGB2YUV).astype(np.float)
y_channel = yuv[:,:,0]
u_channel = yuv[:,:,1]
v_channel = yuv[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(y_channel, cmap='gray')
ax1.set_title('Y value (luminance)', fontsize=26)
ax2.imshow(l_channel, cmap='gray')
ax2.set_title('U value', fontsize=26)
ax3.imshow(s_channel, cmap='gray')
ax3.set_title('V value', fontsize=26)
Out[40]:
The V-value in the YUV space ended up being the most robust channel for detecting the yellow lanes on the road after trying several parameters.
In [41]:
thr = (30,100)
kernel_size = 15
channel = cv2.GaussianBlur(v_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yuv_v_binary = np.zeros_like(scaled_sobel)
yuv_v_binary[(scaled_sobel >= thr[0]) & (scaled_sobel <= thr[1])] = 1
plt.imshow(yuv_v_binary, cmap='gray')
Out[41]:
The Y-value can also be used for detecting the lanes in different illumination conditions.
In [42]:
thr = (30,255)
kernel_size = 15
channel = cv2.GaussianBlur(y_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yuv_y_binary = np.zeros_like(scaled_sobel)
yuv_y_binary[(scaled_sobel >= thr[0]) & (scaled_sobel <= thr[1])] = 1
plt.imshow(yuv_y_binary, cmap='gray')
Out[42]:
In [43]:
original = np.copy(undistorted)
# Convert to HSV color space and separate the V channel
hsv = cv2.cvtColor(original, cv2.COLOR_RGB2HSV).astype(np.float)
h_channel = hsv[:,:,0]
s_channel = hsv[:,:,1]
v_channel = hsv[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(h_channel, cmap='gray')
ax1.set_title('Hue', fontsize=26)
ax2.imshow(s_channel, cmap='gray')
ax2.set_title('Saturation', fontsize=26)
ax3.imshow(v_channel, cmap='gray')
ax3.set_title('Value', fontsize=26)
Out[43]:
Only the Value channel showed some good results, but I didn't use this color space in the final pipeline.
In [44]:
thr = (40,150)
kernel_size = 15
channel = cv2.GaussianBlur(v_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
sxbinary2 = np.zeros_like(scaled_sobel)
sxbinary2[(scaled_sobel >= thr[0]) & (scaled_sobel <= thr[1])] = 1
plt.imshow(sxbinary2, cmap='gray')
Out[44]:
In [70]:
yellow = np.zeros_like(sxbinary2)
yellow[(yuv_v_binary == 1)] = 1
plt.imshow(yellow, cmap='gray')
Out[70]:
In [46]:
white = np.zeros_like(sxbinary2)
white[(hls_l_binary == 1) | (yuv_y_binary == 1)] = 1
plt.imshow(white, cmap='gray')
Out[46]:
In [47]:
combined = np.zeros_like(sxbinary2)
combined[(white == 1) | (yellow == 1)] = 1
plt.imshow(combined, cmap='gray')
Out[47]:
In [ ]: