In [1]:
from operators import *
from two_stream import *
from collections import OrderedDict
In [30]:
obs = np.loadtxt("../notebooks/harvard_2003.txt")
In [31]:
passer = obs[:,1] == 1
plt.plot ( obs[passer,0], obs[passer,3], 'o')
plt.plot ( obs[passer,0], obs[passer,4], '+')
Out[31]:
In [4]:
state_config = OrderedDict ()
state_config['sigma_vis'] = CONSTANT
state_config['d_vis'] = CONSTANT
state_config['r_vis'] = CONSTANT
state_config['sigma_nir'] = CONSTANT
state_config['d_nir'] = CONSTANT
state_config['r_nir'] = CONSTANT
state_config['lai'] = VARIABLE
default_par = OrderedDict ()
default_par['sigma_vis'] = 0.17
default_par['d_vis'] = 1
default_par['r_vis'] = 0.1
default_par['sigma_nir'] = 0.7
default_par['d_nir'] = 2
default_par['r_nir'] = 0.18
default_par['lai'] = 1.5
# Define boundaries
parameter_min = OrderedDict()
parameter_max = OrderedDict()
min_vals= np.array ( 7*[ 0.001,] )
max_vals = np.array ( 7*[0.98, ] )
max_vals[-1] = 10.
min_vals[1] = 0.
max_vals[1] = 5.
min_vals[4] = 0.
max_vals[4] = 5.
for i, param in enumerate ( state_config.keys() ):
parameter_min[param] = min_vals[i]
parameter_max[param] = max_vals[i]
# Define the state grid. In time in this case
state_grid = np.arange ( 1, 366 )
state = State( state_config, state_grid, default_par, parameter_min, parameter_max)
In [91]:
sun_angles = obs[passer,2]
emulators = create_emulators ( sun_angles, min_vals, max_vals, n_train=250, n_validate=1000)
In [92]:
observations = np.zeros((365, 2))
mask = np.zeros((365,2))
bu = np.zeros((365,2))
doys = obs[:,0]
for i in state_grid:
if i in doys:
iloc = obs[:,0] == i
if passer[iloc]:
observations[i-1,:] = obs[iloc, 3:5]
mask[i-1, :] = [ 1, obs[iloc,2] ]
bu[i-1,:] = [ obs[iloc, 5], obs[iloc, 6] ]
else:
mask[i-1,:] = [0, 0]
else:
mask[i-1,:] = [0,0]
In [126]:
def select_emulator ( itime, mask, emulators):
sun_angle = mask[itime,1]
vis = emulators[sun_angle][0].predict
nir = emulators[sun_angle][1].predict
return vis, nir
class ObservationOperatorTwoStream ( object ):
"""A GP-based observation operator"""
def __init__ ( self, state_grid, state, observations, mask, emulators, bu ):
"""
observations is an array with n_bands, nt observations. nt has to be the
same size as state_grid (can have dummny numbers in). mask is nt*4
(mask, vza, sza, raa) array.
"""
self.state = state
self.nt, self.n_bands = observations.shape
self.observations = observations
self.mask = mask
self.state_grid = state_grid
self.emulators = emulators
self.bu = bu
def der_cost ( self, x_dict, state_config ):
"""The cost function and its partial derivatives. One important thing
to note is that GPs have been parameterised in transformed space,
whereas `x_dict` is in "real space". So when we go over the parameter
dictionary, we need to transform back to linear units. TODO Clearly, it
might be better to have cost functions that report whether they need
a dictionary in true or transformed units!
"""
i = 0
cost = 0.
n = 0
n = 0
for param, typo in state_config.iteritems():
if typo == CONSTANT:
n += 1
elif typo == VARIABLE:
n_elems = len ( x_dict[param] )
n += n_elems
der_cost = np.zeros ( n )
x_params = np.empty ( ( len( x_dict.keys()), self.nt ) )
j = 0
ii = 0
the_derivatives = np.zeros ( ( len( x_dict.keys()), self.nt ) )
for param, typo in state_config.iteritems():
if typo == FIXED or typo == CONSTANT:
x_params[ j, : ] = x_dict[param]
elif typo == VARIABLE:
x_params[ j, : ] = x_dict[param]
j += 1
for itime, tstep in enumerate ( self.state_grid ):
if self.mask[itime, 0] == 0:
# No obs here
continue
albedo_vis, albedo_nir = self.observations[ itime, :]
# We use the `get_emulator` method to select the required
# emulator for this geometry, spectral setting etc
obs_ops = self.get_emulator ( itime, self.mask, self.emulators )
sigma_obs_vis, sigma_obs_nir = self.bu [ itime, : ]
# forward model the proposal
x = x_params[:, itime]
model_albedo_vis, vis_var, vis_der = \
obs_ops[0] ( np.atleast_2d(x) )
model_albedo_nir, nir_var, nir_der = \
obs_ops[1] ( np.atleast_2d(x) )
# Calculate the actual cost
this_cost = 0.5*( model_albedo_vis - albedo_vis )**2/sigma_obs_vis**2 + \
0.5*( model_albedo_nir - albedo_nir )**2/sigma_obs_nir**2
# The partial derivatives of the cost function are then
this_der= (1./sigma_obs_vis**2)*( model_albedo_vis - \
albedo_vis )*vis_der + \
(1./sigma_obs_nir**2)*( model_albedo_nir - albedo_nir )*nir_der
cost += this_cost
the_derivatives[ :, itime] = this_der
j = 0
for i, (param, typo) in enumerate ( state_config.iteritems()) :
if typo == CONSTANT:
der_cost[j] = the_derivatives[i, 0]
j += 1
elif typo == VARIABLE:
n_elems = len ( x_dict[param] )
der_cost[j:(j+n_elems) ] = the_derivatives[i, :]
j += n_elems
return cost, der_cost
def der_der_cost ( self, x, state_config, epsilon=1.0e-9 ):
"""Numerical approximation to the Hessian"""
N = x.size
h = np.zeros((N,N))
df_0 = self.der_cost ( x, state_config )[1]
for i in xrange(N):
xx0 = 1.*x[i]
x[i] = xx0 + epsilon
df_1 = self.der_cost ( x, state_config )[1]
h[i,:] = (df_1 - df_0)/epsilon
x[i] = xx0
post_cov = np.linalg.inv (h)
post_sigma = np.sqrt ( post_cov.diagonal() )
return h, post_cov, post_sigma
In [127]:
obsop = ObservationOperatorTwoStream( state_grid, state, observations, mask, emulators, bu )
obsop.get_emulator = select_emulator
In [128]:
x_dict = {}
for k in default_par.iterkeys():
if state_config[k] == VARIABLE:
x_dict[k] = np.ones_like ( state_grid ) * default_par[k]
else:
x_dict[k] = 1.0* default_par[k]
In [129]:
obsop.der_cost(x_dict, state_config )
Out[129]:
In [130]:
mu_prior = OrderedDict()
inv_cov_prior = OrderedDict()
for k in state_config.iterkeys():
mu_prior[k]=np.array([default_par[k]])
print k
inv_cov_prior['sigma_vis'] = np.array([1./(0.12**2)])
inv_cov_prior['d_vis'] = np.array([1./(0.7**2)])
inv_cov_prior['r_vis'] = np.array([1./(0.01**2)])
inv_cov_prior['sigma_nir'] = np.array([1./(0.15**2)])
inv_cov_prior['d_nir'] = np.array([1./(1.5**2)])
inv_cov_prior['r_nir'] = np.array([1./(.2**2)])
inv_cov_prior['lai'] = np.array([1./(5**2)])
the_prior = Prior( mu_prior, inv_cov_prior )
print mu_prior.keys()
print inv_cov_prior.keys()
In [131]:
state.add_operator("Obs", obsop)
state.add_operator("Prior", the_prior )
In [135]:
retval = state.optimize(x_dict)
In [139]:
print retval[0][:6]
print default_par
In [134]:
plt.plot(retval[0][6:])
Out[134]:
In [ ]: