In [60]:
#We run this first cell to obtain the necessary calibration matrices for our car's camera
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
import glob
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
print(images[0])
example_img = cv2.imread(str(images[0]))
image_shape = example_img.shape[0:2]
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
#img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
#cv2.imshow('img',img)
retval, cMat,dist,rvecs,tvecs = cv2.calibrateCamera(objpoints,imgpoints,image_shape,None,None)
pCounter = 1
#for fnames in images:
#img = cv2.imread(fnames)
#img = cv2.imread(fnames)
#cv2.imshow('img',img)
#cv2.waitKey(500)
#undistorted = cv2.undistort(img,cMat,dist,None,cMat)
#cv2.imwrite("./undistorted/undistorted" + str(pCounter) + ".jpg",undistorted)
#pCounter = pCounter +1
#cv2.imshow('undistorted',undistorted)
#cv2.waitKey(500)
print('done!')
#cv2.destroyAllWindows()
In [61]:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
import glob
%matplotlib inline
#this function converts the input picture into a binary picture of the lane edges
def refinePicture(pic):
picHSV = cv2.cvtColor(pic, cv2.COLOR_BGR2HSV)
lowYellow = np.array([15,50,50])
highYellow = np.array([45,255,255])
maskYellow = cv2.inRange(picHSV, lowYellow, highYellow)
resYellow = cv2.bitwise_and(picHSV, picHSV, mask=maskYellow)
lowWhite = np.array([0,0,135])
highWhite = np.array([255,140,255])
maskWhite = cv2.inRange(picHSV, lowWhite, highWhite)
resWhite = cv2.bitwise_and(picHSV,picHSV, mask=maskWhite)
newpic_white = cv2.cvtColor(resWhite,cv2.COLOR_HSV2RGB)
newpic_yellow = cv2.cvtColor(resYellow,cv2.COLOR_HSV2RGB)
newpic = cv2.bitwise_or(newpic_white,newpic_yellow)
examplePic_gray = cv2.cvtColor(newpic, cv2.COLOR_BGR2GRAY)
absolute_Sobel = np.absolute(cv2.Sobel(examplePic_gray, cv2.CV_64F,2,4,ksize=9))
examplePic_adjusted = np.uint8(255*absolute_Sobel/np.max(absolute_Sobel))
examplePic_binary = np.zeros_like(examplePic_adjusted)
examplePic_binary[(examplePic_adjusted > 30) & (examplePic_adjusted<=255)] = 1
#plt.imshow(examplePic_binary, cmap='gray')
#cv2.waitKey(0)
#cv2.imshow('firstPic',examplePic_binary)
return examplePic_binary
In [ ]:
#this cell was only used for testing
'''
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
import glob
%matplotlib inline
pic = cv2.imread('/home/gershom/Projects/udacity/carND/project4/CarND-Advanced-Lane-Lines/test_images/straight_lines1.jpg')
#pic = pic[:,:,::-1]
picHSV = cv2.cvtColor(pic, cv2.COLOR_BGR2HSV)
lowYellow = np.array([15,50,50])
highYellow = np.array([45,255,255])
mask = cv2.inRange(picHSV, lowYellow, highYellow)
res = cv2.bitwise_and(picHSV, picHSV, mask=mask)
#now, let's do it for white
#lowWhite = np.array([0,0,215])
#highWhite = np.array([255,60,255])
lowWhite = np.array([0,0,155])
highWhite = np.array([255,120,255])
maskWhite = cv2.inRange(picHSV, lowWhite, highWhite)
resWhite = cv2.bitwise_and(picHSV,picHSV, mask=maskWhite)
newpic_white = cv2.cvtColor(resWhite,cv2.COLOR_HSV2RGB)
newpic = cv2.cvtColor(res,cv2.COLOR_HSV2RGB)
yellow_and_white_pic = cv2.bitwise_or(newpic_white,newpic)
examplePic_gray = cv2.cvtColor(yellow_and_white_pic,cv2.COLOR_BGR2GRAY)
#examplePic_gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
absolute_Sobel = np.absolute(cv2.Sobel(examplePic_gray, cv2.CV_64F,2,4,ksize =9))
examplePic_adjusted = np.uint8(255*absolute_Sobel/np.max(absolute_Sobel))
examplePic_binary = np.zeros_like(examplePic_adjusted)
examplePic_binary[(examplePic_adjusted > 30) & (examplePic_adjusted<=255)] = 1
img_size = (examplePic_binary.shape[1],examplePic_binary.shape[0])
src = np.float32([[300,650],[1010,650],[590,450],[695,450]])
dst = np.float32([[200,650],[1100,650],[200,10],[1100,10]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(examplePic_binary, M, img_size,flags=cv2.INTER_LINEAR)
#print(picHSV)
print(np.any(maskWhite))
#plt.imshow(yellow_and_white_pic)
#cv2.waitKey(0)
plt.imshow(warped,cmap='gray')
'''
In [62]:
import math
currentCycle = 0
left_fit = [0,0,0]
right_fit = [0,0,0]
curve_counter = 0
final_curvature = 0
current_displacement = 0
#this function is run on every frame to draw lane lines and compute curvature
def getLines(picture):
global cMat
global dist
#if currentCycle == 0:
#cv2.imwrite('./WriteUp_pics/distorted.jpg',picture[:,:,::-1])
#undistorting the camera's pictures
picture = cv2.undistort(picture,cMat,dist,None,cMat)
#if currentCycle == 0:
#cv2.imwrite('./WriteUp_pics/undistorted.jpg',picture[:,:,::-1])
global currentCycle
global left_fit
global right_fit
global left_fit_curv
global right_fit_curv
global ploty
global curve_counter
global final_curvature
global current_displacement
refined_picture = refinePicture(picture)
#if currentCycle == 0:
#cv2.imwrite('./WriteUp_pics/binary.jpg', refined_picture*255)
examplePic_binary = refined_picture
examplePic = picture
img_size = (refined_picture.shape[1],refined_picture.shape[0])
src = np.float32([[300,650],[1010,650],[590,450],[695,450]])
dst = np.float32([[200,650],[1100,650],[200,10],[1100,10]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(examplePic_binary, M, img_size,flags=cv2.INTER_LINEAR)
#if currentCycle == 0:
#print(np.any(warped))
#cv2.imwrite('./WriteUp_pics/warped.jpg',warped*255)
binary_warped = warped
#this comparison is unnecessary but jupyter notebook's autoindenting is a pain and I don't want to have to correct the next ~50 lines
if currentCycle < 2:
histogram = np.sum(warped[warped.shape[0]/2:,:], axis=0)
out_img = np.dstack((warped,warped,warped))*255
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
window_height= np.int(warped.shape[0]/nwindows)
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on the mean of the pixel x positions
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
#blargh indicates that some pictures do not have sufficient edge pixels to compute a new line, and just use the lines
#computed from the previous frame
if (len(lefty) == 0) or (len(leftx) == 0) or (len(righty) == 0) or (len(rightx) == 0):
print("blargh")
else:
current_left_fit = np.polyfit(lefty, leftx, 2)
current_right_fit = np.polyfit(righty, rightx, 2)
if currentCycle == 1:
if np.linalg.norm(current_left_fit-left_fit)>150:
#'jumpy left' indicates that the left lane line changes greatly from the previous frame's
print('jumpy left')
else:
left_fit = current_left_fit
if np.linalg.norm(current_right_fit - right_fit)>150:
#'jumpy right' indicates that the right lane line changes greatly from the previous frame's
print('jumpy right')
else:
right_fit = current_right_fit
else:
left_fit = current_left_fit
right_fit = current_right_fit
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255,0,0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0,0,255]
#this was used during an older version of the drawer and remains for testing
#if currentCycle == 0:
#print(len(ploty))
#print(len(left_fitx))
#for i in range(0,len(left_fitx)):
#if (int(left_fitx[i]) < 1280) and (int(right_fitx[i]) < 1280):
#out_img[ploty[i],int(left_fitx[i])] = [255,255,0]
#out_img[ploty[i],int(right_fitx[i])] = [255,255,0]
#cv2.imwrite('./WriteUp_pics/binary_lanes.jpg',out_img)
#now, let's calculate curvature
y_eval = np.max(ploty)
left_deriv_1 = 2*left_fit[0]*y_eval + left_fit[1]
left_deriv_2 = 2*left_fit[0]
right_deriv_1 = 2*right_fit[0]*y_eval+ right_fit[1]
right_deriv_2 = 2*right_fit[0]
left_curve_radius = ((1+ left_deriv_1**2)**1.5)/np.absolute(left_deriv_2)
right_curve_radius = ((1+ right_deriv_1**2)**1.5)/np.absolute(right_deriv_2)
#our lanes are 3.7 m wide and 30 m long (i assume from car to horizon)
#now, calculating real radius of curvature; this takes into account the length and width of roads in expected pictures
ym_per_pixel = 30/720
xm_per_pixel = 3.7/900
# Fit a second order polynomial to each
if (len(lefty) == 0) or (len(leftx) == 0) or (len(righty) == 0) or (len(rightx) == 0):
print("blargh")
else:
left_fit_curv = np.polyfit(lefty*ym_per_pixel, leftx*xm_per_pixel, 2)
right_fit_curv = np.polyfit(righty*ym_per_pixel, rightx*xm_per_pixel, 2)
y_eval = np.max(ploty)
left_deriv_1 = 2*left_fit_curv[0]*y_eval + left_fit_curv[1]
left_deriv_2 = 2*left_fit_curv[0]
right_deriv_1 = 2*right_fit_curv[0]*y_eval+ right_fit_curv[1]
right_deriv_2 = 2*right_fit_curv[0]
left_curve_radius = ((1+ left_deriv_1**2)**1.5)/np.absolute(left_deriv_2)
right_curve_radius = ((1+ right_deriv_1**2)**1.5)/np.absolute(right_deriv_2)
#calculating current displacement
real_center = 640
right_bottom = right_fitx[-1]
left_bottom = left_fitx[-1]
#print("the left and right bottom are" + str(left_bottom) + " " + str(right_bottom))
current_center = (right_bottom+ left_bottom)/2
#the next block allows new curvatures to only be shared once every 10 frames; allows easier viewing while the video plays
if curve_counter == 0:
final_curvature = np.mean([left_curve_radius,right_curve_radius])
current_displacement = round((real_center-current_center)*3.2/900,3)
if curve_counter > 10:
curve_counter = 0
else:
curve_counter = curve_counter + 1
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
Minv = cv2.getPerspectiveTransform(dst,src)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
newwarp = cv2.warpPerspective(color_warp, Minv, (examplePic.shape[1], examplePic.shape[0]))
result = cv2.addWeighted(examplePic, 1, newwarp, .5,0)
result_text = "The radius of curvature is " + str(round(final_curvature)) + "(m)"
if current_displacement < 0:
displacement_text = "The displacement is " + str(abs(current_displacement)) + "(m)" + " to the left of the center of the lane"
else:
displacement_text = "The displacement is " + str(abs(current_displacement)) + "(m)" + " to the right of the center of the lane"
result_font = 3 #cv2.FONT_HERSEY_COMPLEX
cv2.putText(result,result_text, (50,150), result_font, .8, (0,0,0))
cv2.putText(result,displacement_text, (50,100), result_font, .8, (0,0,0))
if currentCycle == 0:
cv2.imwrite('./WriteUp_pics/finished_frame.jpg',result[:,:,::-1])
currentCycle = 1
#print("checkpoint Delta")
return result
In [63]:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
import glob
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
global currentCycle
#selects designated video and runs getLines function on every frame
outputVid = './project_video_output.mp4'
exampleVid = VideoFileClip('./project_video.mp4')
#exampleVid2 = exampleVid.subclip(35,36)
outputClip = exampleVid.fl_image(getLines)
%time outputClip.write_videofile(outputVid,audio=False)
print("done")
currentCycle = 0
In [ ]:
In [ ]: