In [2]:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
In [199]:
# Come back to opencv
dims = (4,11)
im_size = (640,480)
num_pts = dims[0] * dims[1]
# initialize camera parameters
intrinsics1 = np.zeros( (3,3) )
intrinsics2 = np.zeros( (3,3) )
distortion1 = np.zeros( (8,1) )
distortion2 = np.zeros( (8,1) )
# Set initial guess for intrinsic camera parameters (focal length = 0.35cm)
intrinsics1[0,0] = 583.3
intrinsics1[1,1] = 583.3
intrinsics1[0,2] = 320
intrinsics1[1,2] = 240
intrinsics1[2,2] = 1.0
intrinsics2[0,0] = 583.3
intrinsics2[1,1] = 583.3
intrinsics2[0,2] = 320
intrinsics2[1,2] = 240
intrinsics2[2,2] = 1.0
img1 = cv2.imread("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/data/raw/stereocalibration/kre/kre_cafe/calibration_frames_2012-08-01/cam1_frame_10.bmp",0)
img2 = cv2.imread("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/data/raw/stereocalibration/kre/kre_cafe/calibration_frames_2012-08-01/cam2_frame_10.bmp",0)
[found1,points1] = cv2.findCirclesGridDefault(img1,dims,flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
[found2,points2] = cv2.findCirclesGridDefault(img2,dims,flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
In [213]:
def object_points(dims,num_images,square_size):
'''determine 3d object points for each image
'''
width = dims[0]
height = dims[1]
num_pts = width*height
opts = []
for n in range(num_images):
temp = np.zeros( (num_pts,3) )
for i in range(height):
for j in range(width):
if i%2==0:
temp[i*width+j,0] = (i*(square_size/2.00))
temp[i*width+j,1] = j*square_size
temp[i*width+j,2] = 0
else:
temp[i*width+j,0] = (i*(square_size/2.00))
temp[i*width+j,1] = (j*square_size) + square_size/2.00
temp[i*width+j,2] = 0
opts.append(temp)
opts = np.array(opts, dtype = np.float32)
return opts
In [214]:
drawn_boards_1 = img1.copy()
drawn_boards_2 = img2.copy()
cv2.drawChessboardCorners(drawn_boards_1, dims, points1, found1)
cv2.drawChessboardCorners(drawn_boards_2, dims, points2, found2)
In [215]:
plt.imshow(drawn_boards_1, cmap='gray')
Out[215]:
In [216]:
opts = object_points(dims, 1, 4.35)
iptsF1 = [] #image point arrays to fill up
iptsF2 = []
temp1 = np.zeros( (num_pts,2) )
temp2 = np.zeros( (num_pts,2) )
if found1 and found2:
for i in range(num_pts):
temp1[i,0]=points1[i,0,0]
temp1[i,1]=points1[i,0,1]
temp2[i,0]=points2[i,0,0]
temp2[i,1]=points2[i,0,1]
iptsF1.append(temp1)
iptsF2.append(temp2)
# convert image points to numpy
iptsF1 = np.array(iptsF1, dtype = np.float32)
iptsF2 = np.array(iptsF2, dtype = np.float32)
In [217]:
opts
Out[217]:
In [218]:
iptsF1
Out[218]:
In [219]:
(cam1rms, intrinsics1, distortion1, rotv1, trav1) = cv2.calibrateCamera(opts, iptsF1, im_size, intrinsics1, distortion1,
flags=int(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
In [220]:
intrinsics1
Out[220]:
In [221]:
retval, rvec, tvec = cv2.solvePnP(opts, iptsF1, intrinsics1, distortion1)
In [222]:
board_points = np.array([[0., 0, 0],
[5,0,0],
[0,5,0],
[0,0,5]])
In [223]:
outpts, jac = cv2.projectPoints(board_points, rvec, tvec, intrinsics1, distortion1)
In [224]:
outpts
Out[224]:
In [225]:
tmp_im = cv2.cvtColor(img1.copy(), cv2.COLOR_GRAY2RGB)
cv2.circle(tmp_im, tuple(outpts[0].squeeze().astype('int')), 8, (255,255,255), 3)
cv2.circle(tmp_im, tuple(outpts[1].squeeze().astype('int')), 8, (255,0,0), 3)
cv2.circle(tmp_im, tuple(outpts[2].squeeze().astype('int')), 8, (0,255,0), 3)
cv2.circle(tmp_im, tuple(outpts[3].squeeze().astype('int')), 8, (0,0,255), 3)
plt.imshow(tmp_im)
Out[225]:
In [61]:
(stereorms, intrinsics1, distortion1, intrinsics2, distortion2, R, T, E, F) = cv2.stereoCalibrate(opts, iptsF1, iptsF2, im_size,
intrinsics1, distortion1, intrinsics2, distortion2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1e-7),
flags=(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
In [55]:
intrinsics1
Out[55]:
In [83]:
intrinsics1
Out[83]:
In [64]:
T
Out[64]:
In [98]:
cv2.projectPoints(np.array([[0,100,-100.0]]), cv2.Rodrigues(np.array([[1., 0,0],[0,1,0],[0,0,1]]))[0], np.zeros((3,1)), intrinsics1, np.zeros((1,4)))[0]
Out[98]:
In [99]:
opts
Out[99]:
In [7]:
frames = pd.read_hdf("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/testing/parser_output/frame_output.h5", 'frames')
task_data = pd.read_hdf("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/testing/parser_output/task_output.h5", 'task')
rt_data = pd.read_hdf("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/testing/parser_output/rt_output.h5", 'rt')
In [195]:
task_data['both', 'frame_time'] = np.nan
task_data['both', 'frame_count'] = np.nan
task_data['both', 'frame_time_diff'] = np.nan
In [3]:
earliest_time = task_data.index.levels[1].min()
latest_time = task_data.index.levels[1].max()
In [4]:
task_frames = frames[(frames['time']>earliest_time) & (frames['time']<latest_time)]
In [198]:
task_frames.shape
Out[198]:
In [5]:
count = 0
for i, frame in task_frames.iterrows():
time_diffs = task_data.index.levels[1] - frame['time']
time_diffs = time_diffs.to_series().reset_index(drop=True)
time_diffs.index = task_data.index
time_range_mask = np.abs(time_diffs) < 15
data_of_interest = task_data.loc[time_range_mask]
good_data = data_of_interest[data_of_interest['both', 'quality'] == 'GOOD']
indices_of_interest = data_of_interest.index
task_data.loc[indices_of_interest, ('both', 'frame_time_diff')] = time_diffs
indices_of_interest = good_data.index
if not time_diffs[indices_of_interest].empty:
smallest_time_diff_loc = np.abs(time_diffs.loc[indices_of_interest]).argmin()
task_data.loc[smallest_time_diff_loc, ('both', 'frame_time')] = frame['time']
task_data.loc[smallest_time_diff_loc, ('both', 'frame_count')] = frame['press num']
count += 1
if count > 100:
break
print frame
In [7]:
task_data.iloc[130:200]
Out[7]:
In [220]:
count
Out[220]:
In [218]:
pd.options.display.max_rows = 600
In [9]:
left_ndsref = np.array([[10, 10, 100.]]).T
right_ndsref = np.array([[7, 12, 100.]]).T
In [13]:
ipd=6.5
In [14]:
p1 = np.array([[-ipd/2, 0, 0]]).T
p2 = left_ndsref - p1
p2 = p2 / np.linalg.norm(p2)
p2 = p2 + p1
In [21]:
q1 = np.array([[ipd/2, 0, 0]]).T
q2 = right_ndsref - q1
q2 = q2 / np.linalg.norm(q2)
q2 = q2 + q1
In [52]:
X = np.matrix([[p1[0,0], p2[0,0], q1[0,0], q2[0,0]]])
Y = np.matrix([[p1[1,0], p2[1,0], q1[1,0], q2[1,0]]])
Z = np.matrix([[p1[2,0], p2[2,0], q1[2,0], q2[2,0]]])
A = Z.T
In [56]:
u = np.linalg.inv(A.T * A) * A.T * Y.T
u = np.array([[0, u, 0]])
In [61]:
p2_new = np.array([[p2[0,0], u[0,1]*p2[2,0], p2[2,0]]]).T
q2_new = np.array([[q2[0,0], u[0,1]*q2[2,0], q2[2,0]]]).T
In [62]:
q2_new = q2_new - q1
q2_new = q2_new / np.linalg.norm(q2_new)
q2_new = q2_new + q1
In [64]:
p2_new = p2_new - p1
p2_new = p2_new / np.linalg.norm(p2_new)
p2_new = p2_new + p1
In [66]:
P = np.array([[u[0,0], -1, u[0,1]]])
P = P / np.linalg.norm(P)
In [70]:
thp = np.degrees(np.arcsin(np.dot(P, p2)))
thq = np.degrees(np.arcsin(np.dot(P, q2)))
In [71]:
thp_new = np.degrees(np.arcsin(np.dot(P, p2_new)))
thq_new = np.degrees(np.arcsin(np.dot(P, q2_new)))
In [76]:
thq
Out[76]:
In [8]:
frames
Out[8]:
In [9]:
rt_data.loc[2]
Out[9]:
In [6]:
import json
In [25]:
d = [{"title":"radial_target_100_4","data":
{"radial_target_starttime": 2278283, "capture_start_offset":612,
"init_starttime":678397, "init_endtime":732606, "num_additional_frames":28}},
{"title":"radial_target_50_4","data":
{"radial_target_starttime": 2391324, "capture_start_offset":612,
"init_starttime":817661, "init_endtime": 834048, "num_additional_frames":0, "last_possible_time":2407214}}
]
In [26]:
with open("./jsontest.json", 'w') as f:
json.dump(d,f, indent=2)
In [27]:
!subl ./jsontest.json
In [28]:
with open("./jsontest.json", 'r') as f:
fixup_data = json.load(f)
In [30]:
fixup_data[0]
Out[30]:
In [38]:
with open("/Users/bwsprague/GradSchool/Research/BerkeleyVisionStats/data/raw/gaze/tki/tki_inside_fixup.json", 'r') as f:
fixup_data = json.load(f)
In [39]:
fixup_data
Out[39]:
In [ ]:
def fixup_frame_sync(frames, fixup_fpath):
import json
with open(fixup_fpath, 'r') as f:
fixup_info = json.load(f)
for info in fixup_info:
data = info["data"]
startframe = data["radial_target_starttime"]