In [1]:
import cv2
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pipeline as pl
from sklearn import datasets, linear_model
import copy
import sys
from tqdm import tqdm
from moviepy.editor import VideoFileClip
In [2]:
# Prepare main data containers
path = {'calibration': './camera_cal/',
'example': './examples/',
'test': './test_images/'}
files = {key:[] for key in path}
images = {key:[] for key in path}
cal_images = {key:[] for key in path}
# Get filenames of all images
files['calibration'] = glob(path['calibration'] + 'calibration*.jpg')
files['example'] = glob(path['example'] + '*.jpg')
files['test'] = glob(path['test'] + '*.jpg')
# Load all images
for key in files:
print("Load {} images...".format(key))
for file in files[key]:
images[key].append(mpimg.imread(file))
video_filename = '/home/timo/Documents/udacity/carnd1/CarND-Advanced-Lane-Lines/project_video.mp4'
In [3]:
def get_birdseye_view_parameters(verbose=0):
if verbose==1:
print("Load birdseye view parameters...")
src_rect = np.zeros((4, 2), dtype = "float32")
src_rect[0,:] = [225, 700] # left bottom
src_rect[1,:] = [595, 450] # left top
src_rect[2,:] = [690, 450] # right top
src_rect[3,:] = [1055,700] # right bottom
dst_rect = np.zeros((4, 2), dtype = "float32")
dst_rect[0,:] = [325, 720] # left bottom
dst_rect[1,:] = [325,-100] # left top
dst_rect[2,:] = [955,-100] # right top
dst_rect[3,:] = [955, 720] # right bottom
M = cv2.getPerspectiveTransform(src_rect, dst_rect)
# Compute the inverse perspective transform:
Minv = cv2.getPerspectiveTransform(dst_rect, src_rect)
if verbose==1:
print("Done.")
return M, Minv
def get_final_calibration_parameters(calibration_images, verbose=0):
if verbose==1:
print("Load calibration view parameters...")
camera_matrices, distortion_coefficients, singuar_values = pl.get_calibration_parameters((calibration_images), verbose=0)
mtx, dist = camera_matrices[-1,::], distortion_coefficients[-1]
if verbose==1:
print("Done.")
return mtx, dist
def undistort_image(image):
return cv2.undistort(image, mtx, dist, None, mtx)
def detect_edges(image):
return pl.lane_detection(image, verbose=0)
def transform_to_birdseye_view(image, M, target_dim=(1280, 720)):
return cv2.warpPerspective(image, M, target_dim, flags=cv2.INTER_LINEAR)
def transform_to_camera_view(image, Minv, target_dim=(1280, 720)):
return cv2.warpPerspective(image, Minv, target_dim, flags=cv2.INTER_LINEAR)
def windowed_line_search(image, verbose=0):
result, left_fit, right_fit, textfields, textcoords = find_lines(image, verbose=verbose)
return result, left_fit, right_fit, textfields, textcoords
def overlay(frame, line_image):
result = cv2.addWeighted(frame.astype(float), 1, line_image.astype(float), 0.3, 0)
return result.astype(np.uint8)
def put_text_annotations(image, textfields, textcoords, font):
color = (255,255,255)
for textfield, coordinate in zip(textfields, textcoords):
result = cv2.putText(image, textfield, coordinate, font, 1, color, 2, cv2.LINE_AA)
return result
In [4]:
# Get camera calibration parameters
mtx, dist = get_final_calibration_parameters(calibration_images=copy.deepcopy(images['calibration']), verbose=0)
In [5]:
from Lane import Lane
lane = Lane(nb_memory_items=25)
M, Minv = get_birdseye_view_parameters(verbose=0)
def create_project_video():
def draw_lines(image):
#xcenter = lane.get_centerx_pixel()
result = undistort_image(image)
result = detect_edges(result)
result = transform_to_birdseye_view(result, M, (1280, 720))
result = lane.line_search(result, verbose=0)
textfields, textcoords = lane.get_radius_annotations()
result = transform_to_camera_view(result, Minv, (1280, 720))
result = overlay(image, result)
result = put_text_annotations(result, textfields, textcoords, font=cv2.FONT_HERSHEY_SIMPLEX)
return result
video = VideoFileClip(video_filename)
lanes_video = video.fl_image( draw_lines )
%time lanes_video.write_videofile("lanes_video.mp4", audio=False)
return lanes_video
lanes_video = create_project_video()
In [ ]: