In [1]:
%load_ext autoreload
%autoreload 2
In [2]:
import cv2
import numpy as np
from cv_utils import *
from tests.test_simple import *
from matplotlib import pyplot as plt
%matplotlib inline
In [7]:
image_data = helper_getimage("cake.jpg")
In [11]:
helper_writetoTEST_DATA("resources/cake_rgb.jpg", convert_to_rgb(image_data))
In [5]:
convert_to_rgb??
In [12]:
expected = helper_getimage("cake_rgb.jpg")
plt.imshow(expected)
Out[12]:
In [13]:
image_data = helper_getimage("cake.jpg")
expected = helper_getimage("cake_rgb.jpg")
result = cv_utils.convert_to_rgb(image_data)
In [15]:
plt.imshow(image_data)
plt.show()
plt.imshow(expected)
plt.show()
plt.imshow(result)
plt.show()
In [21]:
image_data = helper_getimage("cake.jpg")
plt.imshow(image_data)
plt.show()
result = cv_utils.convert_to_hsv(image_data)
plt.imshow(result)
plt.show()
In [ ]:
In [28]:
image_data = helper_getimage("cake.jpg")
plt.imshow(image_data)
plt.show()
result = cv_utils.convert_to_gray(image_data)
plt.imshow(result, cmap="gray")
plt.show()
In [29]:
helper_writetoTEST_DATA("resources/cake_gray.jpg", result)
In [34]:
#image data is bgr, plt expects rgb, so plt draws a bluetone in red colors
image_data = helper_getimage("cake.jpg")
plt.imshow(image_data)
plt.show()
result = cv_utils.convert_bgr_to_bluetone(image_data)
plt.imshow(result)
plt.show()
In [35]:
#image data is bgr, plt expects rgb, so plt draws a bluetone in red colors
image_data = helper_getimage("cake.jpg")
plt.imshow(image_data)
plt.show()
result = cv_utils.convert_bgr_to_greentone(image_data)
plt.imshow(result)
plt.show()
In [37]:
#image data is bgr, plt expects rgb, so plt draws a redtone in blue colors
image_data = helper_getimage("cake.jpg")
plt.imshow(image_data)
plt.show()
result = cv_utils.convert_bgr_to_redtone(image_data)
plt.imshow(result)
plt.show()
In [96]:
data[:,:,2].shape
Out[96]:
In [102]:
data = np.array([np.repeat(np.expand_dims(np.arange(0, 255, 1), 1), 255, 1)
,np.repeat(np.expand_dims(np.arange(0, 255, 1), 1), 255, 1).T
,np.ones((255,255))])
data = np.swapaxes(data, 0, 1)
data = np.swapaxes(data, 1, 2)
data = data.astype(np.uint8)
plt.imshow(cv_utils.convert_bgr_to_bluetone(data))
plt.show()
plt.imshow(cv_utils.convert_bgr_to_greentone(data))
plt.show()
plt.imshow(cv_utils.convert_bgr_to_redtone(data))
plt.show()
In [132]:
data.shape
Out[132]:
In [3]:
data = helper_dummychannels()
plt.imshow(cv_utils.convert_bgr_to_bluetone(data))
plt.show()
plt.imshow(cv_utils.convert_bgr_to_greentone(data))
plt.show()
plt.imshow(cv_utils.convert_bgr_to_redtone(data))
plt.show()
In [6]:
np.repeat(255, 2,0)
Out[6]:
In [7]:
np.repeat(255, 256, 0)
Out[7]:
In [8]:
np.repeat(256, 256, 0)
Out[8]:
In [9]:
np.all(np.equal(np.repeat(256, 256, 0), compute_histogram_3channel(data)[0]))
Out[9]:
In [10]:
np.repeat(256, 256, 0)
Out[10]:
In [11]:
expected = np.zeros((256))
expected[-1] = 65536 # 256*256
expected
Out[11]:
In [12]:
compute_histogram_3channel(data)[2]
Out[12]:
In [13]:
%%timeit
np.random.rand(10000,10000, 20)
In [14]:
%%timeit
np.random.rand(10000,10000, 20).ravel()
In [15]:
%%timeit
np.random.rand(10000,10000, 20).flatten()
In [23]:
np.bincount(data.ravel())
Out[23]:
In [20]:
expected = np.repeat(512, 256, 0)
expected[-1] = 66048 #(256*256)+512
In [30]:
np.all(np.equal(compute_histogram_1channel(data), expected))
Out[30]:
In [36]:
n_steps = 1
[x for x in range(n_steps)]
Out[36]:
In [46]:
n_steps = 1
init_arr = [-1]
init_arr= reduce(lambda arr, v: arr.append(v*v), range(n_steps), init_arr)
init_arr
Out[46]:
In [85]:
np.arange(3,0,-1)
Out[85]:
In [106]:
min((23,44))
Out[106]:
In [99]:
np.linspace(1,3,3)
Out[99]:
In [110]:
v = 256
s = 1 + 1
map(lambda x: v * (0.5**(x-1)), np.linspace(1,s,s))
Out[110]:
In [112]:
result = cv_utils.compute_downsamples(data,8)
shapes = map(lambda x: x.shape, result)
shapes
Out[112]:
In [111]:
expected = cv_utils.compute_downsamples(data,8)
print(len(expected))
for image in expected:
plt.imshow(image)
plt.show()
In [113]:
result = cv_utils.compute_upsamples(data,2)
shapes = map(lambda x: x.shape, result)
shapes
Out[113]:
In [118]:
expected = cv_utils.compute_upsamples(data,2)
print(len(expected))
for image in expected:
plt.imshow(image)
plt.show()
In [15]:
print(123)
In [51]:
cv_utils.coords_pixel_to_local((25.6,51.2), data)
Out[51]:
In [52]:
cv_utils.coords_pixel_to_local((-1,51.2), data)
Out[52]:
In [53]:
cv_utils.coords_pixel_to_local((128,512), data)
Out[53]:
In [70]:
cv_utils.coords_local_to_pixel((0.1, 0.2), data)
Out[70]:
In [71]:
cv_utils.coords_local_to_pixel((0.1, 1.0), data)
Out[71]:
In [72]:
cv_utils.coords_local_to_pixel((-1.0, 2.0), data)
Out[72]:
In [84]:
np.sqrt((2**2)+(2**2))
Out[84]:
In [93]:
np.subtract((1,2), (3,0))
Out[93]:
In [89]:
np.linalg.norm([(1, 0),(3, 2)])
Out[89]:
In [102]:
cv_utils.distance_betweencoords((1,1), (-1, -1))
Out[102]:
In [146]:
plt.imshow(data)
Out[146]:
In [156]:
helper_writetoTEST_DATA("resources/dummy_data_rect_fill.jpg",cv_utils.draw_rectangle(data, (10,100), (128, 200), (255,127,0), -1))
In [157]:
helper_writetoTEST_DATA("resources/dummy_data_rect_line.jpg",cv_utils.draw_rectangle(data, (10,100), (128, 200), (255,127,0), 10))
In [158]:
dummy_data = helper_dummychannels()
result = cv_utils.draw_rectangle(dummy_data, (10,100), (128, 200), (255,127,0), -1)
plt.imshow(result)
Out[158]:
In [216]:
res = cv_utils.draw_circle(cv_utils.draw_circle(data, (128,128), 64, (255,255,0), 10), (128,128), 50, (255,127,0), -1)
In [217]:
plt.imshow(res)
Out[217]:
In [218]:
helper_writetoTEST_DATA("resources/circle_and_arc.jpg", res)
In [220]:
plt.imshow(helper_getimage("circle_and_arc.jpg"))
Out[220]:
In [226]:
np.all(np.equal(result, helper_getimage("circle_and_arc.jpg")))
Out[226]:
In [224]:
dummy_data = helper_dummychannels()
#Lets draw a circle with no fill
result = cv_utils.draw_circle(dummy_data, (128,128), 64, (255,255,0), 10)
# Lets draw a circle inside of it
result = cv_utils.draw_circle(result, (128,128), 50, (255,127,0), -1)
plt.imshow(result)
Out[224]:
In [235]:
helper_writetoTEST_DATA("resources/line.jpg", res)
In [234]:
res = cv_utils.draw_line(data, (0,0), (128,128), (255,220,0), 10)
plt.imshow(res)
Out[234]:
In [67]:
helper_writetoTEST_DATA("resources/polygons_openclose.jpg", res)
In [80]:
np.all(np.equal(res,result))
Out[80]:
In [74]:
#these are same coordinates but with a different order
pts = np.array([
[50,50],
[100, 50],
[150, 150],
[50, 150],
], np.int32)
result = cv_utils.draw_polygon(data, pts.tolist(), (255, 255, 0),5,True)
pts = pts + 10
result = cv_utils.draw_polygon(result, pts.tolist(), (255, 127, 0),5,False)
plt.imshow(result)
Out[74]:
In [5]:
all_zeros_rgb = np.zeros((512,512,3), np.uint8)
#these are same coordinates but with a different order
pts = np.array([
[400,50],
[130,200],
[145,500],
[10,400]
], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(all_zeros_rgb, [pts], True, (255,127,0), 5)
plt.imshow(all_zeros_rgb)
Out[5]:
In [104]:
res = cv_utils.translate_inpixels(data, 20,20)
plt.imshow(res)
plt.show()
In [107]:
#helper_writetoTEST_DATA("resources/translated_1.jpg", res)
#helper_writetoTEST_DATA("resources/translated_2.jpg", res)
In [106]:
res = cv_utils.translate_inpixels(data, -20,-20)
plt.imshow(res)
plt.show()
In [111]:
res = cv_utils.translate_inlocal(data, 0.5, 0.5)
plt.imshow(res)
plt.show()
In [114]:
#helper_writetoTEST_DATA("resources/translated_3.jpg", res)
#helper_writetoTEST_DATA("resources/translated_4.jpg", res)
In [113]:
res = cv_utils.translate_inlocal(data, -0.5, 0.5)
plt.imshow(res)
plt.show()
In [122]:
data = helper_getimage("cake.jpg")
In [132]:
helper_writetoTEST_DATA("resources/cake_crop.jpg", res)
In [130]:
res = cv_utils.crop_inpixels(data, (400,80), (650,490))
plt.imshow(data)
plt.show()
plt.imshow(res)
plt.show()
In [136]:
map(int, (1.2, 1.0))
Out[136]:
In [147]:
res = cv_utils.crop_inlocal(data, (0.5,0.5), (1.0,1.0))
plt.imshow(data)
plt.show()
plt.imshow(res)
plt.show()
In [148]:
helper_writetoTEST_DATA("resources/cake_crop2.jpg", res)
In [139]:
height, width = data.shape[:2]
print(height, width)
topl_coord, bottomr_coord = (0.5,1.0), (0.5,1.0)
print([topl_coord[0] * width, topl_coord[1]*height])
print([bottomr_coord[0] * width, bottomr_coord[1]*height])
In [149]:
data = helper_dummychannels()
In [153]:
plt.imshow(data)
Out[153]:
In [161]:
res = cv_utils.rotate(data, 90, pivot_coords=None, scale=1.0)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/rotate_90.jpg", res)
res = cv_utils.rotate(data, -45, pivot_coords=None, scale=1.0)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/rotate_45.jpg", res)
res = cv_utils.rotate(data, 45, pivot_coords=(0,0), scale=1.0)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/rotate_45_corner.jpg", res)
res = cv_utils.rotate(data, 45, pivot_coords=None, scale=0.5)
plt.imshow(res)
plt.show()
helper_writetoTEST_DATA("resources/rotate_45_scale.jpg", res)
In [165]:
plt.imshow(data)
plt.show()
res = cv_utils.flip(data, 0)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/flip_horiz.jpg", res)
res = cv_utils.flip(data, 1)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/flip_vert.jpg", res)
In [3]:
data = helper_dummychannels()
plt.imshow(data)
plt.show()
In [16]:
In [13]:
res = cv_utils.scale_inlocal(data, 0.5, 0.75)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/scale1.jpg", res)
res = cv_utils.scale_inpixels(data, 300, 300)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/scale2.jpg", res)
In [639]:
mask1 = np.zeros((256,256), np.uint8)
mask2 = np.zeros((256,256), np.uint8)
cv2.rectangle(mask1, (0, 0), (168, 168), 255, -2)
cv2.rectangle(mask2, (98, 98), (256, 256), 255, -2)
plt.imshow(mask1)
plt.show()
plt.imshow(mask2)
plt.show()
In [640]:
And = cv2.bitwise_and(mask1, mask2)
plt.imshow(And)
Out[640]:
In [641]:
bitwiseOr = cv2.bitwise_or(mask1, mask2)
plt.imshow(bitwiseOr)
Out[641]:
In [642]:
bitwiseXor = cv2.bitwise_xor(mask1, mask2)
plt.imshow(bitwiseXor)
Out[642]:
In [14]:
bitwiseNot_sq = cv2.bitwise_not(mask1)
plt.imshow(bitwiseNot_sq)
Out[14]:
In [643]:
#helper_writetoTEST_DATA("resources/mask1.jpg", mask1)
#helper_writetoTEST_DATA("resources/mask2.jpg", mask2)
#helper_writetoTEST_DATA("resources/and.jpg", And)
#helper_writetoTEST_DATA("resources/or.jpg", bitwiseOr)
helper_writetoTEST_DATA("resources/xor.jpg", bitwiseXor)
#helper_writetoTEST_DATA("resources/not.jpg", bitwiseNot_sq)
In [17]:
mask1 = helper_getimage("mask1.jpg")
mask2 = helper_getimage("mask2.jpg")
expected = helper_getimage("and.jpg")
result = cv_utils.bit_and(mask1, mask2)
In [26]:
plt.imshow(data)
plt.show()
res = cv_utils.apply_mask(data, And)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/and_apply.jpg", res)
In [31]:
data = helper_getimage("cake.jpg")
plt.imshow(data)
plt.show()
In [74]:
res = cv_utils.blur_box(data, 100)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_box_100.jpg", res)
res = cv_utils.blur_box(data, 7)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_box_7.jpg", res)
In [81]:
res = cv_utils.blur_gausian(data, 101)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_gau_101.jpg", res)
res = cv_utils.blur_gausian(data, 7)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_gau_7.jpg", res)
In [86]:
res = cv_utils.blur_median(data, 101)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_median_101.jpg", res)
res = cv_utils.blur_median(data, 7)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/blur_median_7.jpg", res)
In [101]:
res = cv_utils.blur_bilateral(data, 9, 75, 75)
plt.imshow(res)
plt.show()
helper_writetoTEST_DATA("resources/blur_bilat.jpg", res)
In [135]:
dst = cv2.fastNlMeansDenoisingColored(data, None, 6, 6, 7, 21)
plt.imshow(dst)
Out[135]:
In [143]:
res = cv_utils.denoise(data, 1)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/denoise1.jpg", res)
Out[143]:
In [144]:
res = cv_utils.denoise(data, 33)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/denoise2.jpg", res)
Out[144]:
In [146]:
res = cv_utils.sharpen(data)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/sharpen.jpg", res)
In [302]:
data = helper_dummychannels()
plt.imshow(data)
Out[302]:
In [303]:
res = cv_utils.threshold_binary(data, 80, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_binary255.jpg", res)
res = cv_utils.threshold_binary(data, 80, 100)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_binary100.jpg", res)
In [304]:
res = cv_utils.threshold_binary_inverse(data, 80, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_binaryi255.jpg", res)
res = cv_utils.threshold_binary_inverse(data, 80, 100)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_binaryi100.jpg", res)
In [305]:
res = cv_utils.threshold_truncate(data, 200, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_trunc200.jpg", res)
res = cv_utils.threshold_truncate(data, 140, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_trunc140.jpg", res)
In [306]:
res = cv_utils.threshold_tozero(data, 127, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_tozero.jpg", res)
In [307]:
res = cv_utils.threshold_tozero_inverse(data, 127, 255)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_tozeroi.jpg", res)
In [251]:
cv2.adaptiveThreshold
In [308]:
data = helper_getimage("cake.jpg")
data = cv_utils.convert_to_gray(data)
plt.imshow(data)
Out[308]:
In [309]:
res = cv_utils.threshold_adaptive(data)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_ada.jpg", res)
res = cv_utils.threshold_adaptive(data, blocksize=7)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_ada7.jpg", res)
res = cv_utils.threshold_adaptive(data, blocksize=15)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_ada15.jpg", res)
In [ ]:
cv2.threshold
In [311]:
res = cv_utils.threshold_otsu(data, 240)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_otsu.jpg", res)
res = cv_utils.threshold_gaussianotsu(data, 240)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/tresh_gotsu.jpg", res)
In [336]:
data = helper_getimage("hello.png")
_, data = cv2.threshold(data, 0, 255, cv2.THRESH_BINARY_INV)
plt.imshow(data)
Out[336]:
In [340]:
res = cv_utils.morph_dilate(data, 5)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/dilate_5.jpg", res)
res = cv_utils.morph_erode(data, 5)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/erode_5.jpg", res)
res = cv_utils.morph_open(data, 5)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/open_5.jpg", res)
res = cv_utils.morph_close(data, 5)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/close_5.jpg", res)
res = cv_utils.morph_dilate(data, (5,5))
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/dilate_5x5.jpg", res)
res = cv_utils.morph_erode(data, (5,5))
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/erode_5x5.jpg", res)
res = cv_utils.morph_open(data, (5,5))
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/open_5x5.jpg", res)
res = cv_utils.morph_close(data, (5,5))
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/close_5x5.jpg", res)
In [343]:
data = helper_getimage("car.jpg")
plt.imshow(data)
Out[343]:
In [346]:
res = cv_utils.edge_sobel_x(data, 5)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/dilate_5.jpg", res)
In [359]:
res = cv_utils.edge_sobel_x(data, 5)
plt.imshow(res, cmap="gray")
plt.show()
helper_writetoTEST_DATA("resources/edge_sobelx.jpg", res)
res = cv_utils.edge_sobel_y(data, 5)
plt.imshow(res, cmap="gray")
plt.show()
helper_writetoTEST_DATA("resources/edge_sobely.jpg", res)
res = cv_utils.edge_laplacian(data)
plt.imshow(res, cmap="gray")
plt.show()
helper_writetoTEST_DATA("resources/edge_laplacian.jpg", res)
res = cv_utils.edge_canny(data, 50, 120)
plt.imshow(res, cmap="gray")
plt.show()
helper_writetoTEST_DATA("resources/edge_canny.jpg", res)
res = cv_utils.edge_canny_blur(data, 50, 120)
plt.imshow(res, cmap="gray")
plt.show()
helper_writetoTEST_DATA("resources/edge_canny_blur.jpg", res)
In [397]:
data = helper_getimage("affine.jpg")
plt.imshow(data)
Out[397]:
In [398]:
points_A = np.float32([[320,15], [700,215], [85,610]])
points_B = np.float32([[0,0], [420,0], [0,594]])
res = cv_utils.perspective_affine(data, points_A, points_B)
plt.imshow(res)
plt.show()
helper_writetoTEST_DATA("resources/affine2.jpg", res)
In [400]:
data = helper_getimage("keyboard.jpg")
plt.imshow(data)
Out[400]:
In [402]:
points_A = np.float32([[255,65], [935,330], [50,220], [730,700]])
points_B = np.float32([[0,0], [700,0], [0,300], [700,300]])
newimage_image_size = (700, 300)
res = cv_utils.perspective_nonaffine(data, points_A, points_B, newimage_image_size)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/nonaffine.jpg", res)
res = cv_utils.perspective_nonaffine(data, points_A, points_B)
plt.imshow(res)
plt.show()
#helper_writetoTEST_DATA("resources/nonaffine2.jpg", res)
In [36]:
# Lets create our image
image_data = np.zeros((400,930,3), np.uint8) + 255
cv2.rectangle(image_data, (50,100), (250,300), (255,127,0), -1)
#cv2.rectangle(image_data, (260,100), (460,300), (0,127,255), -1)
cv2.circle(image_data, (360,200), 100, (0,127,255), -1)
cv2.rectangle(image_data, (500,130), (640,270), (100,255,0), -1)
cv2.rectangle(image_data, (680, 100), (880, 300), (100,255,255), -1)
plt.imshow(image_data)
#helper_writetoTEST_DATA("resources/color_shapes.jpg", image_data)
In [26]:
data = helper_getimage("color_shapes.jpg")
plt.imshow(data)
Out[26]:
In [22]:
c, h = cv_utils.compute_contours(data, 254, 255)
In [37]:
image_data = data.copy()
cv2.drawContours(data, c, -1, (0,0,255), 10)
plt.imshow(data)
Out[37]:
In [38]:
helper_writetoTEST_DATA("resources/color_shapes_2.jpg", data)
In [30]:
plt.imshow(data)
Out[30]:
In [14]:
map(cv_utils.contour_centroid, c)
Out[14]:
In [15]:
map(cv_utils.contour_areas, c)
Out[15]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [9]:
image_data = helper_getimage("color_shapes.jpg")
expected = [19878.0, 31460.0, 40398.0, 40398.0]
contours, hierarchy = cv_utils.compute_contours(image_data, 254, 255)
result = map(cv_utils.contour_areas, contours)
print(expected)
print(result)
assert np.all(np.equal(result, expected))
#assert False
In [63]:
plt.imshow(image_data)
Out[63]:
In [64]:
result = map(cv_utils.contour_areas, contours)
result
Out[64]:
In [39]:
plt.imshow(helper_getimage("color_shapes.jpg"))
plt.show()
plt.imshow(helper_getimage("color_shapes_2.jpg"))
plt.show()
In [43]:
data = helper_getimage("house.png")
plt.imshow(data)
Out[43]:
In [71]:
contours, hier = cv_utils.compute_contours(data, 127,255)
plt.imshow(cv_utils.contours_highlight(data, contours))
Out[71]:
In [72]:
rect_coords = cv_utils.contour_boundingbox(contours[0])
plt.imshow(cv_utils.draw_rectangle(data, rect_coords[0], rect_coords[1], (0,255,0), 1))
rect_coords
Out[72]:
In [73]:
map(cv_utils.contour_boundingbox, contours)
Out[73]:
In [101]:
from functools import partial
approxs = [ cv_utils.contour_approximation(x,0.001) for x in contours ]
res = cv_utils.contours_highlight(data, approxs[:])
plt.imshow(res)
Out[101]:
In [107]:
#helper_writetoTEST_DATA("resources/house3.png", res)
In [138]:
data = helper_getimage("flower3.png")
plt.imshow(data)
Out[138]:
In [133]:
contours, hier = cv_utils.compute_contours(data, 176, 255)
plt.imshow(cv_utils.contours_highlight(data, contours[:4]))
Out[133]:
In [139]:
hulls = map(cv_utils.contour_hull_approximation, contours)
res = cv_utils.contours_highlight(data, hulls)
plt.imshow(res)
helper_writetoTEST_DATA("resources/flower4.png", res)
In [155]:
data = helper_getimage("shapes.png")
plt.imshow(data)
Out[155]:
In [169]:
Out[169]:
In [215]:
data = helper_dummychannels()
data.shape
Out[215]:
In [221]:
cv_utils.draw_rectangle?
In [11]:
data = np.zeros((200,500,3), np.uint8)
#data = helper_dummychannels()
pts = np.array([
[100,50],
[50, 150],
[150, 150],
], np.int32)
data = cv_utils.draw_polygon(data, pts, (0,255,0), thickness=1, isClosed=True, filled=True)
data = cv_utils.draw_rectangle(data, (200, 50), (300, 150), (0,255,0), thickness=-1)
data = cv_utils.draw_circle(data, (400, 100), 50, (0,255,0), thickness=-1)
plt.imshow(data)
#helper_writetoTEST_DATA("resources/shapes3", data)
In [12]:
template_data = np.zeros((200,200,3), np.uint8)
pts = np.array([
[130,50],
[50, 150],
[140, 150],
], np.int32)
template_data = cv_utils.draw_polygon(template_data, pts, (0,255,0), thickness=1, isClosed=True, filled=True)
plt.imshow(template_data)
#helper_writetoTEST_DATA("resources/shapes4", template_data)
In [13]:
template_c, template_h = cv_utils.compute_contours(template_data, 0, 255)
plt.imshow(cv_utils.contours_highlight(template_data, template_c, (255,0,0)))
Out[13]:
In [14]:
search_c, search_h = cv_utils.compute_contours(data, 0, 255)
len(search_c)
plt.imshow(cv_utils.contours_highlight(data, search_c, (255,0,0)))
Out[14]:
In [320]:
[ cv_utils.match_shape(x, template_c[0], cv2.CONTOURS_MATCH_I3) for x in search_c]
Out[320]:
In [ ]:
I need some help to understand the output of OpenCV's cv2.matchShapes. I read its documentation but I don't really know what Hu invariants are and I think that might be part of my problem.
I am using matchShapes to compare a skewed triangle to a set of 3 shapes (a triangle, a circle and a square). My problem is that the most similar seems to be either the square, or the circle.
My scenario: I have an image1: [ TRIANGLE SQUARE CIRCLE ]
Another image2: [ SKEWED_TRIANGLE ]
In the end I am getting how similiar the contour in image 2 is to each contour in image 1: [0.189, 6.194, 0.159]
Now, I ahave a bunch of questions:
a) How should I read the "match" metric? (I am assuming bigger is better)
b) should I just clip values to some range? Maybe only values in [0, 1] are relevant?
c) Am I doing something wrong in the code? (I tried the 3 different methods and different thresholds to canny edges)
Below is the code I am using.
In [1]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
## CREATING IMAGE 1 - A triangle, a square and a circle, next to each other.
im_1 = np.zeros((200,500,3), np.uint8)
#data = helper_dummychannels()
pts = np.array([
[100,50],
[50, 150],
[150, 150],
], np.int32)
pts = pts.reshape((-1,1,2))
cv2.fillPoly(im_1, [pts], (0,255,0))
cv2.rectangle(im_1, (200,50), (300, 150), (0,255,0), -1)
cv2.circle(im_1, (400, 100), 50, (0,255,0), -1)
plt.imshow(im_1) ## data has a triangle, rectangle and circle next to one another
plt.show()
## CREATING IMAGE 2 - A slightly skewed triangle
im_2 = np.zeros((200,200,3), np.uint8)
pts = np.array([
[130,50],
[50, 150],
[140, 150],
], np.int32)
pts = pts.reshape((-1,1,2))
cv2.fillPoly(im_2, [pts], (0,255,0))
plt.imshow(im_2)
plt.show()
# GRAYSCALING TO USE IN CANNY FILTER
gray_1 = cv2.cvtColor(im_1, cv2.COLOR_BGR2GRAY)
gray_2 = cv2.cvtColor(im_2, cv2.COLOR_BGR2GRAY)
#plt.imshow(gray_1)
#plt.show()
#plt.imshow(gray_2)
#plt.show()
# CANNY EDGES AND CONTOURS
edged_1 = cv2.Canny(gray_1, 30, 200)
_, contours_1, hierarchy_1 = cv2.findContours(edged_1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
edged_2 = cv2.Canny(gray_2, 30, 200)
_, contours_2, hierarchy_2 = cv2.findContours(edged_2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
## Making sure the contours are ok
plt.imshow(cv2.drawContours(im_1.copy(), contours_1, -1, (255,0,0), 5))
plt.show()
plt.imshow(cv2.drawContours(im_2.copy(), contours_2, -1, (255,0,0), 5))
plt.show()
## COMPUTING cv2.matchShapes - Which of the 3 shapes is most similar to the skewed triangle?
#matches = [ cv2.matchShapes(contours_2[0], x, cv2.CONTOURS_MATCH_I1, 0.0) for x in contours_1 ]
#matches = [ cv2.matchShapes(contours_2[0], x, cv2.CONTOURS_MATCH_I2, 0.0) for x in contours_1 ]
matches = [ cv2.matchShapes(contours_2[0], x, cv2.CONTOURS_MATCH_I3, 0.0) for x in contours_1 ]
print(matches)
In [33]:
data = helper_getimage("skyscrapper.jpg")
data = cv_utils.convert_to_gray(data)
data = cv_utils.edge_canny(data, 50, 150)
plt.imshow(data)
Out[33]:
In [34]:
lines = cv_utils.match_hough_lines(data, 223)
res = helper_getimage("skyscrapper.jpg")
map(lambda x: cv2.line(res,x[0],x[1],(0,0,255),2), lines)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/skyscrapper3.jpg", res)
In [38]:
lines = cv_utils.match_hough_lines(data, 450)
res = helper_getimage("skyscrapper.jpg")
map(lambda x: cv2.line(res,x[0],x[1],(0,0,255),2), lines)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/skyscrapper3.jpg", res)
Out[38]:
In [40]:
In [46]:
data = helper_getimage("skyscrapper2.jpg")
data = cv_utils.convert_to_gray(data)
data = cv_utils.edge_canny(data, 100, 170)
plt.imshow(data)
Out[46]:
In [94]:
In [93]:
lines = cv_utils.match_prob_hough_lines(data, 700, 100, 5)
res = helper_getimage("skyscrapper2.jpg")
map(lambda x: cv2.line(res,x[0],x[1],(0,0,255),2), lines)
plt.imshow(res)
#helper_writetoTEST_DATA("resources/skyscrapper4.jpg", res)
lines
Out[93]:
In [190]:
data = helper_getimage("circles.jpg")
data = cv_utils.crop_inlocal(data, [0,0], [0.37 ,0.37])
data = cv_utils.convert_to_gray(data)
data = cv_utils.threshold_binary(data, 10, 255)
plt.imshow(data)
Out[190]:
In [191]:
centers, radius = cv_utils.match_circle(data, 1)
res = helper_getimage("circles.jpg")
res = cv_utils.crop_inlocal(res, [0,0], [0.37 ,0.37])
map(lambda x: cv2.circle(res,x[0],x[1],(255,0,255),15), zip(centers, radius))
plt.imshow(res)
helper_writetoTEST_DATA("resources/circles2.jpg", res)
In [194]:
[x for x in zip(centers, radius)]
Out[194]:
In [195]:
data = helper_getimage("cereals2.jpg")
plt.imshow(data)
Out[195]:
In [230]:
centers, radius = cv_utils.match_blobs(data)
In [233]:
centers = [map(int, x) for x in centers]
radius = map(int, radius)
[x for x in zip(centers,radius)]
Out[233]:
In [229]:
res = helper_getimage("cereals2.jpg")
map(lambda x: cv2.circle(res,x[0],x[1],(255,0,255),15), zip(centers, radius))
plt.imshow(res)
#helper_writetoTEST_DATA("resources/circles2.jpg", res)
In [287]:
# Load image then grayscale
image_data = helper_getimage('car.jpg')
image_data = cv_utils.crop_inlocal(image_data, (0.5, 0.1), (1.0,0.6))
plt.imshow(image_data)
image_data.shape
Out[287]:
In [284]:
harris_corners = cv_utils.describe_corners(image_data, 3 ,3 ,0.05)
harris_corners = cv2.dilate(harris_corners, None)
image_data[harris_corners>0.025*harris_corners.max()]=[255,127,0]
plt.imshow(image_data)
#helper_writetoTEST_DATA("resources/harris_corners.jpg", image_data)
In [312]:
# Load image then grayscale
image_data = helper_getimage('car.jpg')
#image_data = cv_utils.crop_inlocal(image_data, (0.5, 0.1), (1.0,0.6))
plt.imshow(image_data)
image_data.shape
Out[312]:
In [323]:
corners = cv_utils.describe_good2track(image_data, 300)
corners
Out[323]:
In [364]:
# Load image then grayscale
image_data = helper_getimage('car.jpg')
image_data = cv_utils.crop_inlocal(image_data, (0.65, 0.0), (1.0,0.15))
plt.imshow(image_data)
image_data.shape
Out[364]:
In [365]:
c, r, k = cv_utils.describe_FAST(image_data)
In [366]:
plt.imshow(cv2.drawKeypoints(image_data.copy(), k, image_data.copy(), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
Out[366]:
In [398]:
# Load image then grayscale
image_data = helper_getimage('car.jpg')
image_data = cv_utils.crop_inlocal(image_data, (0.7, 0.1), (1.0,0.6))
plt.imshow(image_data)
Out[398]:
In [401]:
c, r, k = cv_utils.describe_BRIEF(image_data)
In [402]:
plt.imshow(cv2.drawKeypoints(image_data.copy(), k, image_data.copy(), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
Out[402]:
In [440]:
# Load image then grayscale
image_data = helper_getimage('car.jpg')
image_data = cv_utils.crop_inlocal(image_data, (0.7, 0.24), (0.9,0.35))
plt.imshow(image_data)
Out[440]:
In [444]:
len(c)
Out[444]:
In [442]:
c, r, k = cv_utils.describe_ORB(image_data)
In [443]:
plt.imshow(cv2.drawKeypoints(image_data.copy(), k, image_data.copy(), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
Out[443]:
In [466]:
data = helper_getimage("car.jpg")
plt.imshow(data)
Out[466]:
In [467]:
gradients, hog_feats = cv_utils.describe_HOG(data)
In [468]:
for i in range(gradients.shape[-1]):
plt.imshow(gradients[:,:,i])
plt.show()
In [469]:
data.shape, (data.shape[0] // 9, data.shape[1] // 9)
Out[469]:
In [470]:
gradients.shape
Out[470]:
In [476]:
from cv_utils import download as cv_download
In [477]:
cv_download.haar_frontfacedefault("haarcascade_frontalface_default.xml")
cv_download.haar_eye("haarcascade_eye.xml")
Out[477]:
In [504]:
data = helper_getimage("Patrick_Stewart_and_Hugh_Jackman_Press_Conference_Logan_Berlinale_2017_01.jpg")
plt.imshow(data)
Out[504]:
In [516]:
faces
Out[516]:
In [515]:
faces = cv_utils.match_face(data)
In [514]:
rects = map(lambda x : cv_utils.draw_rectangle(data, x[0], x[1], (255,0,255), 2), faces)
plt.imshow(rects[1])
Out[514]:
In [533]:
eyes = cv_utils.match_eyes(data)
In [542]:
rects = map(lambda x : cv_utils.draw_rectangle(data, x[0], x[1], (0,255,255), 2), eyes)
plt.imshow(rects[1])
Out[542]:
In [540]:
eyes[0]
Out[540]:
In [550]:
cv_download.dlib_facelandmarks("shape_predictor_68_face_landmarks.dat")
Out[550]:
In [554]:
!pip install dlib
In [555]:
import dlib
In [556]:
data = helper_getimage("Patrick_Stewart_and_Hugh_Jackman_Press_Conference_Logan_Berlinale_2017_01.jpg")
plt.imshow(data)
Out[556]:
In [561]:
landmarks = cv_utils.match_facial_landmarks(data)
In [590]:
l = landmarks[0]
tuple(l.tolist()[0])
Out[590]:
In [593]:
[cv2.circle(data, tuple(x), 5, (0,255,0)) for x in landmarks[0].tolist()]
plt.imshow(data)
Out[593]:
In [594]:
[cv2.circle(data, tuple(x), 5, (0,255,0)) for x in landmarks[1].tolist()]
plt.imshow(data)
Out[594]:
In [75]:
data = helper_getimage("cake.jpg")
plt.imshow(data)
Out[75]:
In [76]:
res = cv_utils.compute_grabcut(data, (300,0,700,500), 3)
plt.imshow(res)
Out[76]:
In [77]:
helper_writetoTEST_DATA("resources/cake_mask2.jpg", res)
In [78]:
plt.imshow(helper_getimage("cake_mask2.jpg"))
Out[78]:
In [79]:
np.all(np.equal(res, helper_getimage("cake_mask2.jpg")))
Out[79]:
In [68]:
np.random.seed(7)
print(np.random.randint(1000000))
np.random.seed()
print(np.random.randint(1000000))