In [1]:
#aruco convert

In [292]:
import logging
logging.basicConfig(level=logging.DEBUG)
import cv, cv2
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm

In [293]:
import yaml
def opencv_matrix(loader, node):
    mapping = loader.construct_mapping(node)
    # need to pull out the data seperately
    seq = loader.construct_sequence(node.value[-1][1])
    mat = np.array(seq,dtype=np.float64)
    mat.resize(mapping["rows"], mapping["cols"])
    return mat

yaml.add_constructor(u"tag:yaml.org,2002:opencv-matrix", opencv_matrix)

In [294]:
#input_test = 'bigtest.png'
input_test = 'simple_board_1.png'
output_test = 'test_out_new.png'

# in order for this to work, you need to strip the %YAML:1.0 from the file generated by opencv calibration
camera_configuration_name = 'camera.yml'
camera_configuration = []
with open(camera_configuration_name) as fin:
    camera_configuration = yaml.load(fin.read())

print camera_configuration.keys()


['square_size', 'per_view_reprojection_errors', 'extrinsic_parameters', 'avg_reprojection_error', 'image_points', 'calibration_time', 'image_width', 'camera_matrix', 'nframes', 'board_height', 'image_height', 'flags', 'distortion_coefficients', 'board_width']

In [295]:
input_image = cv2.imread(input_test)
plt.imshow(input_image);



In [296]:
# convert to gray scale
grey_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
plt.imshow(grey_image, cmap=cm.gray);



In [297]:
# adaptive threshold

#cv::adaptiveThreshold(grey,out,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,_thresParam1,_thresParam2);

thresholdParameter1 = thresholdParameter2 = 7

thresholdedImage = cv2.adaptiveThreshold(grey_image ,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,thresholdParameter1,thresholdParameter2)

plt.imshow(thresholdedImage, cmap=cm.gray);



In [298]:
#extract contours
contours, hierarchy = cv2.findContours(thresholdedImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

In [299]:
# identify potential markers

MarkerCandidates = []
detectedMarkers = []

for n,contour in enumerate(contours):
    
    cols = input_image.shape[1]
    
    if len(contour) > int(cols / 5.0):
        
        #approxPolyDP(  Mat  (contours2[i]),approxCurve , double(contours2[i].size())*0.05 , true);
#        approxCurve = cv2.approxPolyDP(contour, 0.1*cv2.arcLength(cnt,True),True)
        approxCurve = cv2.approxPolyDP(contour, len(contour) * 0.05, True)
    
        if len(approxCurve) == 4:
            
            if cv2.isContourConvex(approxCurve):
                
                minDist = 1e10
                for i in range(0,4):
                    
                    _x2 = math.pow(approxCurve[i][0][0] - approxCurve[(i+1)%4][0][0], 2)
                    _y2 = math.pow(approxCurve[1][0][0] - approxCurve[(i+1)%4][0][1], 2)
                    
                    d = math.sqrt(_x2 + _y2)
                    
                    if d < minDist:
                        minDist = d
                        
                if minDist > 10:
                    
                    marker = []
                    for i in range(0,4):
                        marker.append([approxCurve[i][0][0], approxCurve[i][0][1]])
                    MarkerCandidates.append(marker)
                    
print MarkerCandidates
                    
                                  
                                  #float d= sqrt( (float)(approxCurve[i].x-approxCurve[(i+1)%4].x)*(approxCurve[i].x-approxCurve[(i+1)%4].x) +
                                   #    (approxCurve[i].y-approxCurve[(i+1)%4].y)*(approxCurve[i].y-approxCurve[(i+1)%4].y));


[[[295, 208], [293, 238], [318, 238], [318, 210]], [[282, 41], [106, 62], [85, 173], [278, 209]], [[219, 149], [269, 154], [266, 204], [216, 195]], [[171, 144], [211, 148], [206, 194], [165, 185]], [[133, 140], [165, 144], [158, 184], [125, 177]], [[99, 137], [127, 141], [119, 177], [92, 172]], [[177, 99], [215, 100], [210, 140], [172, 136]], [[170, 100], [165, 136], [133, 133], [139, 100]], [[270, 98], [268, 144], [219, 140], [223, 99]], [[138, 63], [135, 93], [107, 95], [111, 66]], [[174, 59], [171, 92], [140, 94], [143, 63]], [[218, 55], [214, 92], [177, 92], [181, 58]], [[271, 49], [269, 90], [222, 91], [227, 52]]]

In [301]:
#sort them anticlockwise

for candidate in MarkerCandidates:
    
    dx1 = candidate[1][0] - candidate[0][0]
    dy1 = candidate[1][1] - candidate[0][1]
    dx2 = candidate[2][0] - candidate[0][0]
    dy2 = candidate[2][1] - candidate[0][1]
    o = (dx1*dy2) - (dy1*dx2)
    
    if o < 0.0:

        print 're-sorting this marker'
        # swap with temp (slow)
        #t = candidate[1]
        #candidate[1] = candidate[3]
        #candidate[3] = t
        
        # fast swap
        # L[a], L[b] = L[b], L[a]
        candidate[1], candidate[3] = candidate[3], candidate[1]


re-sorting this marker

In [302]:
# remove elements whose corners are too close to each other
TooNearCandidates = []

for i in range(0, len(MarkerCandidates)):
    for j in range(i+1, len(MarkerCandidates)):
        
        d = 0.0
        
        for c in range(0,4):
            d += math.sqrt( (MarkerCandidates[i][c][0]-MarkerCandidates[j][c][0])*(MarkerCandidates[i][c][0]-MarkerCandidates[j][c][0])+(MarkerCandidates[i][c][1]-MarkerCandidates[j][c][1])*(MarkerCandidates[i][c][1]-MarkerCandidates[j][c][1]))
        d /=4
        
        if d < 10:
            TooNearCandidates.append([i,j])

In [313]:
def hammDistMarker(bits):
    
    ids = np.array([[1,0, 0, 0, 0],
         [1, 0, 1, 1, 1],
         [0, 1, 0, 0, 1],
         [0, 1, 1, 1, 0]])

    d = 0
    
    for y in range(0,5):    
        minSum = 1e5
            
        for p in range(0,4):
            sum = 0
            
            for x in range(0,5):
                
                if bits[y][x] != ids[p][x]:
                    sum += 1
        
            if minSum > sum:
                minSum = sum
        
        d += minSum
            
    return d

def getMatToID(bits):
    
    v = 0
    for i in range(0,5):
        v = v<<1
        if bits[i][1] == 1:
            v = v | 1
            
        v = v<<1
        if bits[i][3] == 1:
            v = v | 1
            
    return v
    
    
    
def getMarkerID(input_marker):
    
    swidth = int(input_marker.shape[0]/7)
    ret2, img_bw = cv2.threshold(input_marker, 125, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)

    for y in range(0,7):
        inc = 6
        if ( y==0 ) or (y==6):
            inc = 1
            #for (int x=0;x<7;x+=inc)
            for x in range(0,7, inc):        
                Xstart = x*swidth
                Ystart = y*swidth
                square = img_bw[Xstart:Xstart+swidth, Ystart:Ystart+swidth]
                nZ = cv2.countNonZero(square)
                if nZ > ( swidth*swidth) / 2:
                    return -1
        
    # bitwise representation
    bits = np.zeros((5,5))
    offset = 0
    
    for i in range(0,5):
        for j in range(0, 5):
            Xstart = (i+1)*swidth
            Ystart = (j+1)*swidth
            
            square = img_bw[Xstart:Xstart+swidth, Ystart:Ystart+swidth]
            nZ = cv2.countNonZero(square)
            if nZ > ((swidth*swidth) / 2) - offset:
                #bits[i,j] = nZ
                bits[i,j] = 1
                
    index = 0
    bits_cache = bits.copy()
    minDist = hammDistMarker(bits)
    for i in range(1,4):
        bits = bits.T
        d = hammDistMarker(bits)
        if d < minDist:        
            minDist = d
            bits_cache = bits
            index = i
            
    return getMatToID(bits_cache)

In [315]:
# mark for removal the pair with the smaller perimeter

toRemove = np.zeros((len(MarkerCandidates),1))

def perimeter(point_list):

    sum = 0
    for a in range(0,len(point_list)):
        b = (a+1)%len(point_list)
        sum += math.sqrt( math.pow(point_list[a][0]-point_list[b][0], 2)  + math.pow(point_list[a][1] - point_list[b][1], 2))
    
    return sum

for i in range(0, len(TooNearCandidates)):
    
    p1 = perimeter(MarkerCandidates[TooNearCandidates[i][0]])
    p2 = perimeter(MarkerCandidates[TooNearCandidates[i][1]])
    
    if p1 > p2:
        toRemove[TooNearCandidates[i][1]] = 1
    else:
        toRemove[TooNearCandidates[i][0]] = 1
        

def warp(input, size, points):

    if len(points) != 4:
        print 'error!'
        return None

    src = np.array(points, dtype=np.float32)
    
    pointsRes = np.zeros((4,2))
    pointsRes[1] = [size[1]-1, 0]
    pointsRes[2] = [size[1]-1, size[0]-1]
    pointsRes[3] = [0, size[0]-1]

    dst = np.array(pointsRes, dtype=np.float32)
    M = cv2.getPerspectiveTransform( src, dst)
    output = cv2.warpPerspective(grey_image, M, (size[1], size[0])) 
    return output
    

    
labeled_image = input_image.copy()  

def drawRect(marker, color):
    cv2.rectangle(labeled_image, (marker[0][0], marker[0][1]), (marker[2][0], marker[2][1]), color, 1)

all_markers = []
for i, candidate in enumerate(MarkerCandidates):
    if toRemove[i] == 0:
        
        canonicalMarker = warp(input_image, (50,50), candidate)

        marker_id = getMarkerID(canonicalMarker)
        
        
        if marker_id > 0:        
            print marker_id
            all_markers.append(marker_id)
            drawRect(candidate, (marker_id % 255, 255, marker_id % 255)),

print len(unique(all_markers))
plt.imshow(labeled_image)


262
339
3
3
339
675
521
3
675
521
834
6
Out[315]:
<matplotlib.image.AxesImage at 0x10d413310>

In [260]:



Out[260]:
789