In [39]:
import json
from math import sin, cos, tan, pi, radians, sqrt, degrees
import numpy as np
In [100]:
with open('isd.isd', 'r') as f:
isd = json.load(f)
Ian's:
o = 2.25613094079 p = 0.094332016311 k = -0.963037547862
XL = 1728357.70312 YL = -2088409.0061 ZL = 2082873.92806
In [41]:
print(isd['omega'])
print(isd['phi'])
print(isd['kappa'])
o = isd['omega']
p = isd['phi']
k = isd['kappa']
XL = 1728357.70312
YL = -2088409.0061
ZL = 2082873.92806
print(XL, YL, ZL)
In [42]:
opk_to_rotation(o, p, k)
Out[42]:
[[ 0.56849023 0.56123475 -0.60152673] [ 0.81728005 -0.30155688 0.49103641] [ 0.09419218 -0.7707652 -0.63011811]]
For a framing camera the interior orientation (intrinsic matrix) requires (at a minimum):
The example that we have been working on looks like a pinhole ground to image projection, defined as:
$$\begin{bmatrix} w \cdot u \\ w \cdot v \\ w \end{bmatrix} = \mathbf{K} \begin{bmatrix} \mathbf{Rt} \end{bmatrix} \begin{bmatrix} X\\ Y\\ Z\\ 1 \end{bmatrix} $$or
$$\begin{bmatrix} w \cdot u \\ w \cdot v \\ w \end{bmatrix} = \begin{bmatrix} f & s & u_{0} \\ 0 & \alpha f & v_{0} \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} r_{11} & r_{12} & r_{13} & t_{x} \\ r_{21} & r_{22} & r_{23} & t_{y} \\ r_{31} & r_{32} & r_{33} & t_{z} \\ \end{bmatrix} \begin{bmatrix} X\\ Y\\ Z\\ 1 \end{bmatrix} $$K is the intrinsic matrix (interior orientation), R is the extrinsic matrix (exterior orientation), and t is the translation. In the extrinsic matrix $\alpha$ (pixel aspect ratio) and $s$ (skew) are often assume to be unit and zero, respectively. $f$ is the focal length (in pixels) and ($u_{0}, v_{0}$) are the optical center (principal point).
The second resource below suggests that t can be thought of as the world origin in camera coordinates.
Here we define:
$$L = \begin{bmatrix} X_{L}\\ Y_{L}\\ Z_{L} \end{bmatrix} $$, where $(x, y, -f)$ are in image coordinates, $k$ is a scale factor, $\mathbf{M}$ is a 3x3 rotation matrix, and $(X,Y,Z)$ represent the object point.
In [43]:
def opk_to_rotation(o, p, k):
"""
Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
Parameters
----------
o : float
Omega in radians
p : float
Phi in radians
k : float
Kappa in radians
Returns
-------
: ndarray
(3,3) rotation array
"""
om = np.empty((3,3))
om[:,0] = [1,0,0]
om[:,1] = [0, cos(o), -sin(o)]
om[:,2] = [0, sin(o), cos(o)]
pm = np.empty((3,3))
pm[:,0] = [cos(p), 0, sin(p)]
pm[:,1] = [0,1,0]
pm[:,2] = [-sin(p), 0, cos(p)]
km = np.empty((3,3))
km[:,0] = [cos(k), -sin(k), 0]
km[:,1] = [sin(k), cos(k), 0]
km[:,2] = [0,0,1]
return km.dot(pm).dot(om)
def collinearity(f, M, camera_position, ground_position, principal_point=(0,0)):
XL, YL, ZL = camera_position
X, Y, Z = ground_position
x0, y0 = principal_point
x = (-f * ((M[0,0] * (X - XL) + M[0,1] * (Y - YL) + M[0,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + x0
y = (-f * ((M[1,0] * (X - XL) + M[1,1] * (Y - YL) + M[1,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + y0
return x, y, -f
def collinearity_inv(f, M, camera_position, pixel_position, elevation, principal_point=(0,0)):
XL, YL, ZL = camera_position
x, y = pixel_position
Z = elevation
x0, y0 = principal_point
X = (Z-ZL) * ((M[0,0] * (x - x0) + M[1,0] * (y - y0) + M[2,0] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + XL
Y = (Z-ZL) * ((M[0,1] * (x - x0) + M[1,1] * (y - y0) + M[2,1] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + YL
return X,Y
def pixel_to_focalplane(x, y, tx, ty):
"""
Convert from camera pixel space to undistorted focal plane space.
"""
focal_x = tx[0] + (tx[1] * x) + (tx[2] * y)
focal_y = ty[0] + (ty[1] * x) + (ty[2] * y)
return focal_x, focal_y
def distorted_mdisnac_focal(x, y, tx, ty):
# This is working when compared to ISIS3
#xp, yp = pixel_to_focalplane(x, y, tx, ty)
d = np.array([1, xp, yp, xp*xp, xp*yp, yp*yp,
xp**3, xp*yp**2, yp**3])
x = np.asarray(odtx).dot(d)
y = np.asarray(odty).dot(d)
return x, y
$u = (f/s_{x}) * (v1/v3) + pp_{x}$
$u = -0.007$
In [44]:
pixel_to_focalplane(0, 0, isd['transx'], isd['transy'])
Out[44]:
In [45]:
o = radians(2)
p = radians(5)
k = radians(15)
XL = 5000
YL = 10000
ZL = 2000
# Interior Orientation
x0 = 0.015 # mm
y0 = -0.02 # mm
f = 152.4 # mm
# Ground Points
X = 5100
Y = 9800
Z = 100
M = opk_to_rotation(o,p,k)
# This is correct as per Mikhail
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [0,0])
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [x0,y0])
# And now the inverse, find X, Y
Z = 100 # Provided by Mikhail - his random number
computedX, computedY = collinearity_inv(f, M, [XL, YL, ZL], [x, y], Z, (x0, y0))
assert(computedX == X)
assert(computedY == Y)
In [46]:
# Mikhail continued - this is the implementation used in the pinhole CSM (also working)
xo = X - XL # Ground point - Camera position in body fixed
yo = Y - YL
zo = Z - ZL
o = radians(2)
p = radians(5)
k = radians(15)
m = opk_to_rotation(o,p,k)
u = m[0][0] * xo + m[0][1] * yo + m[0][2] * zo
v = m[1][0] * xo + m[1][1] * yo + m[1][2] * zo
w = m[2][0] * xo + m[2][1] * yo + m[2][2] * zo
u /= w
v /= w
x0 = 0.015 # mm
y0 = -0.02 # mm
f = 152.4 # mm
print(-f * u, -f * v)
print(x0 -f * u , y0 - f * v)
In [47]:
# First from pixel to ground:
f = isd['focal_length']
# We know that the pixel size is 0.014^2 mm per pixel (14.4mm / 1024 pixels)
pixel_size = 0.014
x0 = 512 # Convert from pixel based principal point to metric principal point
y0 = 512
f = isd['focal_length']
o = isd['omega']
p = isd['phi']
k = isd['kappa']
M = opk_to_rotation(o,p,k)
camera_coords = [512, 512]
print(camera_coords)
# This is image to ground
X, Y = collinearity_inv(f, M, [XL, YL, ZL], camera_coords, 1455660, (x0, y0))
print('Ground: ', X, Y) # Arbitrary 1000m elevation - here is where iteration with intersection is needed.
# Now reverse! This is ground to image
# These are in mm and need to convert to pixels
x, y, f = collinearity(f, M, [XL, YL, ZL], [X, Y, 1455660], [x0,y0])
print(x,y)
print('Sensor Position (X,Y,Z): ', XL, YL, ZL)
print('')
In [48]:
print(XL, YL, ZL)
In [49]:
x = np.arange(9).reshape((3,3))
print(x)
print(x.T)
In [50]:
def groundToImage(f, camera_position, camera_orientation, ground_position, principal_point=(0,0)):
XL, YL, ZL = camera_position
X, Y, Z = ground_position
x0, y0 = principal_point
M = opk_to_rotation(*camera_orientation)
x = (-f * ((M[0,0] * (X - XL) + M[1, 0] * (Y - YL) + M[2, 0] * (Z - ZL))/
(M[0, 2] * (X - XL) + M[1, 2] * (Y - YL) + M[2,2] * (Z - ZL)))) + x0
y = (-f * ((M[0,1] * (X - XL) + M[1,1] * (Y - YL) + M[2,1] * (Z - ZL))/
(M[0,2] * (X - XL) + M[1,2] * (Y - YL) + M[2,2] * (Z - ZL)))) + y0
return x, y
# Rectangular camera position (lon, lat) in radians
XL = 1728357.70312
YL = -2088409.0061
ZL = 2082873.92806
camera_position = [XL, YL, ZL]
# Camera rotation
o = isd['omega']
p = isd['phi']
k = isd['kappa']
camera_orientation = [o,p,k]
f = 549.1178195372703
principal_point = [0,0]
X = 1129210.
Y = -1599310.
Z = 1455250.
ground_position = [X, Y, Z]
x, y = groundToImage(f, camera_position, camera_orientation, ground_position, principal_point)
po = isd['ccd_center'][0]
lo = isd['ccd_center'][1]
sy = isd['itrans_line'][2]
sx = isd['itrans_sample'][1]
print(x, y, po, lo, sy, sx)
l = y / sy + lo
s = x / sx + lo
print(l, s)
In [51]:
# From mm to pixels
x = 3.5
y = -3.5
transx = isd['transx'][1]
transy = isd['transy'][2]
s = x / transx + 512.5
l = y / transy + 512.5
l, s
Out[51]:
In [52]:
#From pixels to mm
l = 1024
s = 1024
itransl= isd['itrans_line'][2]
itranss = isd['itrans_sample'][1]
x = (l - 512.5) / itransl
y = (s - 512.5) / itranss
x, y
Out[52]:
In [53]:
isd['itrans_line']
Out[53]:
In [54]:
isd['transx'][0], isd['transy'][1]
Out[54]:
In [91]:
def intersect_ellipsoid(h, xc, yc, zc, xl, yl, zl):
"""
xc, yc, zc are the camera position
xl, yl, zl are the points transformed in the collinearity eqn.
"""
ap = isd['semi_major_axis'] * 1000 + h
bp = isd['semi_minor_axis'] * 1000 + h
k = ap**2 / bp**2
print('A', ap, bp, k)
at = xl**2 + yl**2 + k * zl**2
bt = 2.0 * (xl * xc + yl * yc + k * zl * zc)
ct = xc * xc + yc * yc + k * zc * zc - ap * ap
quadterm = bt * bt - 4.0 * at * ct
print('B', at, bt, ct, quadterm)
if 0.0 > quadterm:
quadterm = 0
scale = (-bt - math.sqrt(quadterm)) / (2.0 * at)
print(scale)
x = xc + scale * xl
y = yc + scale * yl
z = zc + scale * zl
print(x, y, z)
return x, y, z
In [96]:
def imageToGround(f, camera_position, camera_orientation, pixel_position, elevation, principal_point=(0,0)):
XL, YL, ZL = camera_position
x, y = pixel_position
Z = elevation
x0, y0 = principal_point
print(x-x0)
M = opk_to_rotation(*camera_orientation)
X = (Z-ZL) * ((M[0,0] * (x - x0) + M[0,1] * (y - y0) + M[0,2] * (-f))/
(M[2,0] * (x - x0) + M[2,1] * (y - y0) + M[2,2] * (-f))) + XL;
Y = (Z-ZL) * ((M[1,0] * (x - x0) + M[1,1] * (y - y0) + M[1,2] * (-f))/
(M[2,0] * (x - x0) + M[2,1] * (y - y0) + M[2,2] * (-f))) + YL;
# This does not try to solve for scale.
xl = M[0,0] * (x - x0) + M[0,1] * (y - y0) - M[0,2] * (-f)
yl = M[1,0] * (x - x0) + M[1,1] * (y - y0) - M[1,2] * (-f)
zl = M[2,0] * (x - x0) + M[2,1] * (y - y0) - M[2,2] * (-f)
print(xl, yl, zl)
h = 0
print(h, XL, YL, ZL)
x, y, z = intersect_ellipsoid(h, XL, YL, ZL, xl, yl, zl)
return x, y, z
XL = 1728357.70312
YL = -2088409.0061
ZL = 2082873.92806
camera_position = [XL, YL, ZL]
# Camera rotation
o = isd['omega']
p = isd['phi']
k = isd['kappa']
X = 1129210.
Y = -1599310.
Z = elevation = 1455250.
camera_orientation = [o,p,k]
#f = isd['focal_length']
principal_point = [0,0]
#image_coordinates = [5.768,5.768]
image_coordinates = [0,0]
image_coordiantes = [100,100]
groundx, groundy, groundz = imageToGround(f, camera_position, camera_orientation, image_coordinates, elevation, principal_point)
print(groundx, groundy, groundz)
print(X, Y, Z)
print(groundx - X, groundy - Y, groundz - Z)
# Sanity Checker Here - Inverse.
ground_position = [groundx, groundy, Z]
print('GP: ', ground_position)
groundToImage(f, camera_position, camera_orientation, ground_position, principal_point=(0,0))
Out[96]:
In [59]:
# Not invert and check for the sensor coordinates using these ground coordinates
# Should be equal to pixel_position in the previous cell
groundToImage(f, camera_position, camera_orientation, [groundx, groundy, elevation],principal_point)
Out[59]:
In [114]:
def distort(x, y, odtx, odty):
ts = np.array([1, x, y, x**2, x*y,y**2,
x**3, x*x*y, x*y*y, y**3])
nx = np.asarray(odtx).dot(ts)
ny = np.asarray(odty).dot(ts)
return nx, ny
def pixel_to_mm(l, s, isd):
itransl= isd['itrans_line'][2]
itranss = isd['itrans_sample'][1]
x = (l - 512.5) / itransl
y = (s - 512.5) / itranss
return x, y
x, y = pixel_to_mm(512.5, 512.5, isd)
nx, ny = distort(x,y,isd['odt_x'], isd['odt_y'])
print(nx, ny)
x, y = pixel_to_mm(100, 100, isd)
print(x, y)
nx, ny = distort(x, y, isd['odt_x'], isd['odt_y'])
print(nx, ny)
In [98]:
len(isd['odt_'])
Out[98]:
In [115]:
j = 0
k = 0
x = -5.77
y = -5.77
ts = np.array([1, x, y, x**2, x*y,y**2,
x**3, x*x*y, x*y*y, y**3])
for i in range(10):
j = j + ts[i] * isd['odt_x'][i]
k = k + ts[i] * isd['odt_y'][i]
print(j, k)
In [116]:
!cat isd.isd
In [ ]: