In [1]:
%matplotlib inline
from matplotlib import pyplot as plt
from os import path
import numpy as np
import cv2
import pandas as pd
# display a list of images with titles
def show_images(images,titles=None, scale=1.3):
"""Display a list of images"""
n_ims = len(images)
if titles is None: titles = ['(%d)' % i for i in range(1,n_ims + 1)]
fig = plt.figure()
n = 1
for image,title in zip(images,titles):
a = fig.add_subplot(1,n_ims,n) # Make subplot
if image.ndim == 2: # Is image grayscale?
plt.imshow(image)
else:
plt.imshow(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
a.set_title(title)
plt.axis("off")
n += 1
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * n_ims / scale)
plt.show()
# finding the eye
def find_eye(image, thresh = 4, size=256):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Canny edge finder
edges = np.array([])
edges = cv2.Canny(gray, thresh, thresh * 3, edges)
# Find contours
# second output is hierarchy - we are not interested in it.
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Now let's get only what we need out of it
hull_contours = cv2.convexHull(np.vstack(np.array(contours)))
hull = np.vstack(hull_contours)
def createMask((rows, cols), hull):
# black image
mask = np.zeros((rows, cols), dtype=np.uint8)
# blit our contours onto it in white color
cv2.drawContours(mask, [hull], 0, 255, -1)
return mask
mask = createMask(image.shape[0:2], hull)
# returning the hull to illustrate a few issues below
return mask, hull
The images supplied vary drastically in their palettes. To enable feature extraction we need to first normalize them to the same color scheme. We pick a reference image and apply color transfer to the rest:
In [2]:
img_path = "/kaggle/retina/train/sample"
# this is the image into the colors of which we want to map
ref_image = cv2.imread(path.join(img_path, "6535_left.jpeg"))
# images picked to illustrate different problems arising during algorithm application
image_names = ["16_left.jpeg", "10130_right.jpeg", "21118_left.jpeg"]
image_paths = map(lambda t: path.join(img_path, t), image_names)
images = map(lambda p: cv2.imread(p), image_paths)
# let's see what we've got
image_titles = map(lambda i: path.splitext(i)[0], image_names)
show_images(images, image_titles, scale = 0.8)
show_images([ref_image], ["reference image"], scale = 0.8)
In [3]:
#Pyramid Down & blurr
# Easy-peesy
def pyr_blurr(image):
return cv2.GaussianBlur(cv2.pyrDown(image), (7, 7), 30.)
ref_image = pyr_blurr(ref_image)
images = map(lambda i: pyr_blurr(i), images)
In [4]:
# here is the histogram of the reference image green channel:
hist_green = cv2.calcHist([ref_image], [1], None, [256], np.array([0, 255]))
fig = plt.figure()
plt.bar(np.arange(256), hist_green, width=2, color='g', edgecolor='none')
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * 2)
plt.show()
The above histogram has two peaks with lots of 0 values - not surprising, since we are computing it over the entire image. We need to mask off the background
In [5]:
# we should really take it over the masked region, to remove the irrelevent background
mask, hull = find_eye(ref_image)
hist_green = cv2.calcHist([ref_image], [1], mask, [256], np.array([0, 255]))
fig = plt.figure()
plt.bar(np.arange(256), hist_green, width=2, color='g', edgecolor='none')
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * 2)
plt.show()
Let's find the CDF. This code is heavily influenced by the LINQ way of doing everything at once without writing loops. I am starting to do better in Python, though, I promise.
In [6]:
old_images = images
images = [ref_image]
# always return colors between 0 and 255
def saturate (v):
return map(lambda a: min(max(round(a), 0), 255), v)
# given a list of images, create masks for them
def get_masks(images):
return map(lambda i: find_eye(i)[0], images)
# list of masks
maskHull = get_masks(images)
# list of lists of channels
channels = map(lambda i: cv2.split(i), images)
# match mask to channel
imMask = zip(channels, maskHull)
# area of the image (the |I| constant in the CDF formaula above)
nonZeros = map(lambda m: cv2.countNonZero(m), maskHull)
# grab three histograms - one for each channel
histPerChannel = map(lambda (c, mask): \
[cv2.calcHist([cimage], [0], mask, [256], np.array([0, 255])) for cimage in c], imMask)
# compute the cdf's.
# they are normalized & saturated: values over 255 are cut off.
cdfPerChannel = map(lambda (hChan, nz): \
[saturate(np.cumsum(h) * 255.0 / nz) for h in hChan], zip(histPerChannel, nonZeros))
# let's see a cdf over the green channel (plot the first image CDF)
fig = plt.figure()
plt.bar(np.arange(256), cdfPerChannel[0][1], width=2, color='g', edgecolor='none')
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * 2)
plt.show()
In [7]:
# code above neatly in one function
def calc_hist(images, masks):
channels = map(lambda i: cv2.split(i), images)
imMask = zip(channels, masks)
nonZeros = map(lambda m: cv2.countNonZero(m), masks)
# grab three histograms - one for each channel
histPerChannel = map(lambda (c, mask): \
[cv2.calcHist([cimage], [0], mask, [256], np.array([0, 255])) for cimage in c], imMask)
# compute the cdf's.
# they are normalized & saturated: values over 255 are cut off.
cdfPerChannel = map(lambda (hChan, nz): \
[saturate(np.cumsum(h) * 255.0 / nz) for h in hChan], \
zip(histPerChannel, nonZeros))
return np.array(cdfPerChannel)
# compute color map based on minimal distances beteen cdf values of ref and input images
def getMin (ref, img):
l = [np.argmin(np.abs(ref - i)) for i in img]
return np.array(l)
# compute and apply color map on all channels of the image
def map_image(image, refHist, imageHist):
# each of the arguments contains histograms over 3 channels
mp = np.array([getMin(r, i) for (r, i) in zip(refHist, imageHist)])
channels = np.array(cv2.split(image))
mappedChannels = np.array([mp[i,channels[i]] for i in range(0, 3)])
return cv2.merge(mappedChannels).astype(np.uint8)
# compute the histograms on all three channels for all images
def histogram_specification(ref, images, masks):
cdfs = calc_hist(images, masks)
mapped = [map_image(images[i], ref[0], cdfs[i, :, :]) for i in range(len(images))]
return mapped
In [8]:
# the fruit of our labor
images = old_images
refMask = get_masks([ref_image])
refHist = calc_hist([ref_image], refMask)
masks = get_masks(images)
histSpec = histogram_specification(refHist, images, masks)
print "Reference Image"
show_images([ref_image], ["Ref"], scale = 0.9)
print "Original Images"
show_images(images, titles = image_titles, scale = 0.9)
print "Mapped Images"
show_images(histSpec, titles = image_titles, scale = 0.9)
At this point it seems like a good idea to filter out the background noise which appears to be present in image 21118_left: to at least make sure the background is entirely black, so that future steps of the algorithm could mask it out easily.
Not a problem:
In [9]:
def mask_background(image, mask):
# copy to preserve the original
im = image.copy()
im[mask == 0, :] = 0
return im
maskedBg = [mask_background(h, m) for (h, m) in zip(histSpec, masks)]
print "Masked Background"
show_images(maskedBg, titles=image_titles, scale=0.9)
Now what?! This does not look desirable at all! Since we were histogramming using a mask, and our mask appears to have consumed so much of the actual image, we need to do something about it. Let's take a look at the mask. This is a problem we handled in the previous post.
In [10]:
mask, _ = find_eye(images[2], 1)
show_images([mask], scale = 0.9)
let's apply all possible masks and see how they influence the resulting image:
In [11]:
mask, _ = find_eye(images[2], 4)
masks[2] = mask
histSpec = histogram_specification(refHist, [images[2]], [masks[2]])
print "Defective mask: We apply the mask we got by thresholding the Canny filter at 4 prior to color transfer"
show_images(histSpec, titles = image_titles[2:], scale = 0.9)
mask, _ = find_eye(images[2], 1)
masks[2] = mask
histSpec = histogram_specification(refHist, [images[2]], [masks[2]])
print "Corrected Mask: Applying the mask above (Canny threshold = 1) prior to color transfer "
show_images(histSpec, titles = image_titles[2:], scale = 0.9)
maskedBg = [mask_background(h, m) for (h, m) in zip(histSpec, [masks[2]])]
print "Masked Background: Actually mask the background by applying appropriate masks"
show_images(maskedBg, titles=image_titles[2:], scale=0.9)