In [1]:
import cv2
import numpy as np

In [ ]:


In [2]:
# Read the images to be aligned
im1 =  cv2.imread(r"C:\Users\Jay\OneDrive\AllenInstitute\Stitching\left.tif", cv2.IMREAD_UNCHANGED);
im2 =  cv2.imread(r"C:\Users\Jay\OneDrive\AllenInstitute\Stitching\right.tif", cv2.IMREAD_UNCHANGED);

print im2.shape
# Convert images to grayscale
#im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
#im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
im1_gray =  np.uint8(im1/256)
im2_gray =  np.uint8(im2/256) 
    
    
# Find size of image1
sz = im1.shape
print sz 
# Define the motion model
warp_mode = cv2.MOTION_TRANSLATION
 
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
    warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
    warp_matrix = np.eye(2, 3, dtype=np.float32)
 
# Specify the number of iterations.
number_of_iterations = 1000;
 
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
 
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations,  termination_eps)
 
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria)
 
print cc, warp_matrix

if warp_mode == cv2.MOTION_HOMOGRAPHY :
    # Use warpPerspective for Homography 
    im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
else :
    # Use warpAffine for Translation, Euclidean and Affine
    im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
 
# Show final results
cv2.imshow("Image 1", im1)
cv2.imshow("Image 2", im2)
cv2.imshow("Aligned Image 2", im2_aligned)
cv2.waitKey(0)


(2043L, 706L)
(2043L, 700L)
0.871538141082 [[   1.            0.          381.72052002]
 [   0.            1.          -24.04422569]]
Out[2]:
-1

In [4]:
print 5+5


10

In [ ]: