In [1]:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import galsim

In [28]:
# parameters
ntrain = 1000  # Number of training PSFs.  can generate ~100 per minute
naber = 8  # Number of Zernikes to include.  8 means through spherical aberration
nPCA = 50 # Number of PCA components to keep.

In [6]:
aberrations = (np.random.uniform(size=(ntrain,naber)) - 0.5) * 0.2  # +/- 0.1 waves

In [7]:
import time
t0 = time.time()
PSFims = [
    galsim.OpticalPSF(lam=700, diam=8.4, obscuration=0.5, aberrations=np.concatenate([[0]*4, a]))
    .drawImage(nx=32, ny=32, scale=0.005) 
    for a in aberrations
]
print("Generating {} PSFs took {} seconds.".format(len(aberrations), time.time()-t0))


Generating 1000 PSFs took 575.201368093 seconds.

In [8]:
fig = plt.figure(figsize=(10,8))
for i in range(20):
    ax = fig.add_subplot(4,5,i+1)
    ax.imshow(PSFims[i].array)
fig.show()


/Users/josh/src/lsstsw/miniconda/lib/python2.7/site-packages/matplotlib/figure.py:397: UserWarning: matplotlib is currently using a non-GUI backend, so cannot show the figure
  "matplotlib is currently using a non-GUI backend, "

In [9]:
Y = np.empty((ntrain, 32*32), dtype=float)
x = np.empty((ntrain, naber), dtype=float)
for i, (ab, im) in enumerate(zip(aberrations, PSFims)):
    Y[i] = im.array.flat / im.array.max()
    x[i] = ab

In [23]:
from sklearn import decomposition
pca = decomposition.PCA(n_components=1000, whiten=True)
pca.fit(Y)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.show()
plt.plot(np.log10(1-np.cumsum(pca.explained_variance_ratio_)))
plt.show()



In [30]:
pca = decomposition.PCA(n_components=nPCA, whiten=True)
pca.fit(Y)
Y_coeff = pca.transform(Y)
Y_proj = pca.inverse_transform(Y_coeff)

for i in range(3):
    fig = plt.figure(figsize=(8,2))
    ax = fig.add_subplot(1,3,1)
    ax.set_title("truth")
    Ytrue = Y[i].copy()
    Ytrue.shape = (32, 32)
    imtrue = ax.imshow(Ytrue)
    fig.colorbar(imtrue)
    
    ax = fig.add_subplot(1,3,2)
    ax.set_title("model")
    Ymodel = Y_proj[i].copy()
    Ymodel.shape = (32, 32)
    immodel = ax.imshow(Ymodel)
    fig.colorbar(immodel)
    
    ax = fig.add_subplot(1,3,3)
    ax.set_title("residual")
    imres = ax.imshow(Ymodel - Ytrue)
    fig.colorbar(imres)
    fig.tight_layout()
# Note, not a fair test since test points are in the training set.



In [48]:
# Now try leave-one-out training/testing
from sklearn import gaussian_process
for i in range(3):
    x1 = np.concatenate([x[:i], x[i+1:]])
    Y1 = np.concatenate([Y[:i], Y[i+1:]])
    pca = decomposition.PCA(n_components=nPCA, whiten=True)
    pca.fit(Y1)
    Y1_coeff = pca.transform(Y1)
    Y1_PCA = pca.inverse_transform(Y1_coeff)
    gp = gaussian_process.GaussianProcess()
    gp.fit(x1, Y1_coeff)
    Yi_gp_coeff = gp.predict(x[i].reshape(1, -1))
    Yi_gp_PCA = pca.inverse_transform(Yi_gp_coeff)
    
    fig = plt.figure(figsize=(14,2))

    ax = fig.add_subplot(151)
    ax.set_title("True")
    imtrue = ax.imshow(Y[i].reshape(32, 32))
    fig.colorbar(imtrue)
    
    ax = fig.add_subplot(152)
    ax.set_title("PCA projection")
    imPCA = ax.imshow(pca.inverse_transform(pca.transform(Y[i].reshape(1, -1))).reshape(32, 32))
    fig.colorbar(imPCA)
    
    ax = fig.add_subplot(153)
    ax.set_title("GP reconstruction")
    imGP = ax.imshow(Yi_gp_PCA.reshape(32, 32))
    fig.colorbar(imGP)
    
    ax = fig.add_subplot(154)
    ax.set_title("GP - PCA")
    imres1 = ax.imshow((Yi_gp_PCA - pca.inverse_transform(pca.transform(Y[i].reshape(1, -1)))).reshape(32, 32))
    fig.colorbar(imres1)
    
    ax = fig.add_subplot(155)
    ax.set_title("GP - True")
    imres2 = ax.imshow((Yi_gp_PCA - Y[i]).reshape(32, 32))
    fig.colorbar(imres2)



In [49]:
timeit pca.inverse_transform(gp.predict(x[0].reshape(1, -1)))


1000 loops, best of 3: 338 µs per loop

In [50]:
timeit pca.inverse_transform(gp.predict(x[0:10]))  # Scales better than linearly.


1000 loops, best of 3: 1.05 ms per loop

In [51]:
timeit pca.inverse_transform(gp.predict(x[0:100]))  # Scales better than linearly.


100 loops, best of 3: 7.4 ms per loop

In [52]:
timeit galsim.OpticalPSF(lam=700, diam=8.4, obscuration=0.5, aberrations=np.concatenate([[0]*4,aberrations[0]])).drawImage(nx=32, ny=32, scale=0.005)


1 loop, best of 3: 529 ms per loop

In [37]:
pca = decomposition.PCA(n_components=50, whiten=True)
pca.fit(Y)
Y_coeff = pca.transform(Y)
Y_PCA = pca.inverse_transform(Y_coeff)
gp = gaussian_process.GaussianProcess()

In [38]:
print(gp.beta0)
print(gp.nugget)
print(gp.theta0)
print(gp.get_params())


None
2.22044604925e-15
0.1
{'normalize': True, 'theta0': 0.1, 'optimizer': 'fmin_cobyla', 'verbose': False, 'storage_mode': 'full', 'nugget': 2.2204460492503131e-15, 'thetaU': None, 'regr': 'constant', 'random_start': 1, 'random_state': None, 'corr': 'squared_exponential', 'beta0': None, 'thetaL': None}

In [39]:
gp.fit(x1, Y1_coeff)
print(gp.beta0)
print(gp.nugget)
print(gp.theta0)
print(gp.get_params())


None
2.22044604925e-15
[[ 0.1]]
{'normalize': True, 'theta0': array([[ 0.1]]), 'optimizer': 'fmin_cobyla', 'verbose': False, 'storage_mode': 'full', 'nugget': array(2.220446049250313e-15), 'thetaU': None, 'regr': <function constant at 0x1154c0848>, 'random_start': 1, 'random_state': <mtrand.RandomState object at 0x107a31290>, 'corr': <function squared_exponential at 0x1154c0c80>, 'beta0': None, 'thetaL': None}

In [ ]: