In [24]:
from math import sin, cos, sqrt
import numpy as np

In [25]:
# Adapted from the i2g_g2i collinearity noteboook
def opk_to_rotation(o, p, k):
    """
    Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
    
    Parameters
    ----------
    o : float
        Omega in radians
    p : float
        Phi in radians
    k : float
        Kappa in radians
        
    Returns
    -------
     : ndarray
       (3,3) rotation array
       
    """
    om = np.empty((3,3))
    om[:,0] = [1,0,0]
    om[:,1] = [0, cos(o), -sin(o)]
    om[:,2] = [0, sin(o), cos(o)]
    
    pm = np.empty((3,3))
    pm[:,0] = [cos(p), 0, sin(p)]
    pm[:,1] = [0,1,0]
    pm[:,2] = [-sin(p), 0, cos(p)]
    
    km = np.empty((3,3))
    km[:,0] = [cos(k), -sin(k), 0]
    km[:,1] = [sin(k), cos(k), 0]
    km[:,2] = [0,0,1]
    
    return km.dot(pm).dot(om)

In [26]:
# Ignoring distortion for now (do_distort = False)
def distort(xp, yp):
    # Distortion creates weird values for input 100,100? Look at IK again.
    do_distort = False
    if not do_distort:
        return xp, yp
    x_coefs = [
        0.0,
        1.0020558791381275,
        0.0,
        0.0,
        -5.448742222712919e-4,
        0.0,
        6.597498811862692e-6,
        0.0,
        6.683129056014682e-6,
        0.0
    ]
    y_coefs = [
        -1.3657975359540047e-5,
        0.0,
        1.0,
        8.85544334965699e-4,
        0.0,
        3.338939138331483e-4,
        0.0,
        7.74756721313425e-6,
        0.0,
        7.79484564042716e-6
    ]
    taylor = [1, xp, yp, xp*xp, xp*yp, yp*yp, xp*xp*xp, xp*xp*yp, xp*yp*yp, yp*yp*yp]
    distorted_x = np.dot(x_coefs, taylor)
    distorted_y = np.dot(y_coefs, taylor)
    return distorted_x, distorted_y

In [27]:
def rotate(v, m, inverse=False):
    """
    Rotate a given 3D vector by a given rotation matrix
    
    Parameters
    ----------
    v : list
        3 element list representing a 3D vector
    m : list
        3x3 (2D list) rotation matrix to use (see opk_to_rotation)
    inverse : bool
        If True, will perform an inverse rotation
        
    Returns
    -------
     : list
         Vector with rotation applied to it
    """
    if inverse:
        m = np.linalg.inv(m)
    return [
        m[0,0] * v[0] + m[0,1] * v[1] + m[0,2] * v[2],
        m[1,0] * v[0] + m[1,1] * v[1] + m[1,2] * v[2],
        m[2,0] * v[0] + m[2,1] * v[1] + m[2,2] * v[2]
    ]

In [28]:
def groundToImage(ground_point, sensor_point_b, focal_length, rotation_matrix):
    """
    Given a body-fixed ground point, get the line,sample on the image.
    Note: No error checking or intersection checking is performed.
          TODO: Intersection check: check if returned line,sample within image dimensions.
          
    Parameters
    ----------
    ground_point : list
        Body-fixed XYZ coordinate of ground point on target body.
    sensor_point_b : list
        Body-fixed XYZ coordinate of sensor.
    focal_length : float
        Sensor focal length.
    rotation_matrix : list
        3x3 matrix (2D array) to perform rotations between body-fixed and sensor frame, and vice versa.
        
    Returns
    -------
     : list
         Line, Sample value on the image.
    """
    
    # NOTES
    # _b suggests body-fixed coordinates
    # _c suggests sensor frame coordinates
    
    # Determine the center look direction (center_x, center_y, f) of the sensor in body-fixed
    # (e.g. the sensor's optical axis rotated into body-fixed frame)
    sensor_optical_axis_b = rotate([0.,0.,focal_length], rotation_matrix)
    
    # Find the look vector from the sensor optical center to the ground point in body-fixed
    look_ground_b = np.subtract(ground_point, sensor_point_b)
    
    # Normalize by scaling the sensor-to-ground look vector to the sensor optical axis body-fixed vector
    mag = np.sqrt(np.dot(sensor_optical_axis_b, sensor_optical_axis_b))
    mag_ground = np.sqrt(np.dot(look_ground_b, look_ground_b))
    
    look_ground_b = np.multiply((mag / mag_ground), look_ground_b)
    print("\tDEBUG Sensor-to-ground (body-fixed): ", look_ground_b)
    
    # Rotate the sensor-to-ground look vector from body-fixed to sensor frame (inverse rotation)
    look_ground_c = rotate(look_ground_b, rotation_matrix, True)
    print("\tDEBUG Sensor-to-ground (sensor frame): ", look_ground_c)
    
    # Scale the sensor-to-ground sensor frame vector so that it intersects the focal plane
    # (i.e. scale it so its Z-component equals the sensor's focal length)
    focal_plane_coord = np.multiply(look_ground_c, (focal_length / look_ground_c[2]))
    
    # Distortion (**this doesn't do anything right now**)
    distorted_x, distorted_y = distort(focal_plane_coord[0], focal_plane_coord[1])
        
    # Convert focal_plane mm to pixels
    # (this would be using itrans from ISD, or inverse of trans from ISD)
    pixels = [
        distorted_x * (1. / 0.014),
        distorted_y * (1. / 0.014)
    ]
    print("\tDEBUG Pixels: ", pixels)
    
    # Convert pixels to line,sample
    sample = pixels[0] + 512.0
    line = pixels[1] + 512.0
    
    return line,sample

In [29]:
# tests/data/EN1007907102M.cub/json image ISD
XL = 1728357.70312
YL = -2088409.0061
ZL = 2082873.92806

o = 2.25613094079
p = 0.094332016311
k = -0.963037547862

f = 549.1178195372703
pixel_pitch = 0.014 # transX/transY

# Compute the rotation matrix to use from omega, phi, kappa
M = opk_to_rotation(o,p,k)

# 512, 512 (distortion irrelevant as this is the center of the image)
print("groundToImage: result should be around 512,512")
X =  1129210.
Y = -1599310.
Z =  1455250.
print()
print("\nRESULT: ", groundToImage([X,Y,Z], [XL, YL, ZL], f, M))
print()
print('*'*80)
print()

# Distorted coordinates do not work for 100,100 ?
# X = 1121630.
# Y = -1606590.
# Z = 1453100.

# Undistorted coordinates work for 100,100
# -- These are obtained by turning off distortion in MdisNacSensorModel::imageToGround,
#    make and ./runTests, then look at the non-truth output values for imageToGround1 test.
print("groundToImage: result should be around 100,100")
uX = 1115920.
uY = -1603550.
uZ = 1460830.
print()
print("\nRESULT: ", groundToImage([uX,uY,uZ], [XL, YL, ZL], f, M))


groundToImage: result should be around 512,512

	DEBUG Sensor-to-ground (body-fixed):  [-330.30872618  269.63913713 -346.00760233]
	DEBUG Sensor-to-ground (sensor frame):  [0.0021944163992770882, -0.0016542996706334634, 549.1178195303936]
	DEBUG Pixels:  [0.15674402852175495, -0.11816426218958431]

RESULT:  (511.88183573781043, 512.15674402852176)

********************************************************************************

groundToImage: result should be around 100,100

	DEBUG Sensor-to-ground (body-fixed):  [-336.78799907  266.63070164 -342.07059558]
	DEBUG Sensor-to-ground (sensor frame):  [-5.7691080334312801, -5.7653407997259478, 549.05724471254155]
	DEBUG Pixels:  [-412.12460793998889, -411.8554901309912]

RESULT:  (100.1445098690088, 99.875392060011109)

In [30]:
# NOTES:
# * TODO: implement a intersection check in groundToImage (just check if result is in image dimensions)
# * TODO: fix distortion

In [ ]: