Vignetting

Create toy model for looking into vignetting issue. Review vignetting paper to double-check how they did it.


In [2]:
import pyrtools as pt
%matplotlib inline
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('..')
import sfp
import numpy as np
import torch
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from scipy import interpolate
import itertools

from scipy import stats
import time
from torch.utils import data as torchdata

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df_path = '/mnt/winawerlab/Projects/spatial_frequency_preferences/BIDS/derivatives/first_level_analysis/stim_class/posterior/sub-wlsubj045/ses-04/sub-wlsubj045_ses-04_task-sfprescaled_v1_e1-12_summary.csv'


Failed to import duecredit due to No module named 'duecredit'
WARNING (theano.configdefaults): install mkl with `conda install mkl-service`: No module named 'mkl'

Example calculation of energy of stimuli and filters


In [3]:
stim = np.load('/mnt/winawerlab/Projects/spatial_frequency_preferences/BIDS/stimuli/task-sfp_stimuli.npy')

In [4]:
fig = pt.imshow(stim[72:80], col_wrap=4, zoom=.25, title=None);
#fig.savefig('stim.svg')


We have a function that does this (for all stimuli) and saves it, but this is to show how it's done


In [5]:
img_size = stim.shape[1:]
# this computation comes from the SteerablePyramidFreq code
max_ht = int(np.floor(np.log2(min(img_size))) - 2)
n_orientations = 6
energy = np.zeros((8, max_ht, n_orientations, *img_size),
                  dtype=np.float32)
filters = np.zeros((8, max_ht, n_orientations, *img_size), dtype=np.float32)

filled_filters = False
i=0
for j in range(32, 40):
    pyr = pt.pyramids.SteerablePyramidFreq(stim[j], order=n_orientations-1, is_complex=True)
    for k, l in itertools.product(range(max_ht), range(n_orientations)):
        energy[0,k,l,:,:] += sfp.image_computable.upsample(np.abs(pyr.pyr_coeffs[(k, l)])**2, img_size)
        # we only want to run this once per stimulus class
        if not filled_filters:
            if k > 0:
                lomask = pyr._lomasks[k-1]
            else:
                lomask = pyr._lo0mask
            filt = pyr._anglemasks[k][l] * pyr._himasks[k] * lomask
            pad_num = []
            for m in range(2):
                pad_num.append([(img_size[m] - filt.shape[m])//2, (img_size[m] - filt.shape[m])//2])
                if filt.shape[m] + 2*pad_num[m][0] != img_size[m]:
                    pad_num[m][0] += img_size[m] - (filt.shape[m] + 2*pad_num[m][0])
            filters[int(i), k, l, :, :] = np.pad(filt, pad_num, 'constant', constant_values=0)
    filled_filters = True
filter_mean = np.mean(filters, 0)
filters = filter_mean

And then we can examine some energy


In [6]:
fig = pt.imshow([energy[0,i,j,:,:] for i in range(5,8) for j in range(6)], col_wrap=6, zoom=.25, vrange='indep1');
#fig.savefig('energy.svg')


How's this work?

We've already computed the energy for each stimulus class (averaged across all phases) at each scale and orientation, as well as the Fourier transform of the filters used to do so.


In [195]:
energy = np.load('/mnt/winawerlab/Projects/spatial_frequency_preferences/BIDS/derivatives/stimuli_energy/task-sfprescaled_n6_energy.npy')
filters = np.load('/mnt/winawerlab/Projects/spatial_frequency_preferences/BIDS/derivatives/stimuli_energy/task-sfprescaled_n6_filters.npy')

Let's show that things work: let's take the data for one subject and show that the model can produce predictions for a given voxel in both image-computable and tuning curve modes


In [70]:
ds = sfp.model.FirstLevelDataset(df_path, device, model_mode='image-computable',)
dl = torchdata.DataLoader(ds, 3)

In order to run the image-computable mode, we need to pass the model the pre-computed energy and filters above


In [198]:
model = sfp.model.LogGaussianDonut(vary_amplitude=False, sigma=1, sf_ecc_intercept=.2, sf_ecc_slope=.3)
model.prepare_image_computable(energy, filters)


---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-198-37fdad891b9f> in <module>
      1 model = sfp.model.LogGaussianDonut(vary_amplitude=False, sigma=1, sf_ecc_intercept=.2, sf_ecc_slope=.3)
----> 2 model.prepare_image_computable(energy, filters)

~/Documents/spatial-frequency-preferences/sfp/model.py in prepare_image_computable(self, energy, filters, stim_radius_degree)
    351             raise Exception("For now, this only works on square input images!")
    352         self.image_size = energy.shape[-1]
--> 353         filters, energy = _cast_args_as_tensors([filters, energy], self.sigma.is_cuda)
    354         self.energy = energy.unsqueeze(0)
    355         # this is the l1 norm

~/Documents/spatial-frequency-preferences/sfp/model.py in _cast_args_as_tensors(args, on_cuda)
     72     for v in args:
     73         if not torch.is_tensor(v):
---> 74             v = _cast_as_tensor(v)
     75         if on_cuda:
     76             v = v.cuda()

~/Documents/spatial-frequency-preferences/sfp/model.py in _cast_as_tensor(x)
     61         x = x.values
     62     # needs to be float32 to work with the Hessian calculations
---> 63     return torch.tensor(x, dtype=torch.float32)
     64 
     65 

RuntimeError: $ Torch: not enough memory: you tried to allocate 10GB. Buy new RAM! at /opt/conda/conda-bld/pytorch-cpu_1549626403278/work/aten/src/TH/THGeneral.cpp:201

We then pass the model a tensor that contains the index into the stimulus class, the voxels' pRF location (in eccentricity and angle) and its sigma, for 3 different voxels


In [7]:
inputs, targets = next(iter(dl))

In [11]:
inputs[:,:5]


Out[11]:
tensor([[[0.0000, 3.3923, 1.5379, 0.5945],
         [1.0000, 3.3923, 1.5379, 0.5945],
         [2.0000, 3.3923, 1.5379, 0.5945],
         [3.0000, 3.3923, 1.5379, 0.5945],
         [4.0000, 3.3923, 1.5379, 0.5945]],

        [[0.0000, 2.7762, 1.5326, 0.4905],
         [1.0000, 2.7762, 1.5326, 0.4905],
         [2.0000, 2.7762, 1.5326, 0.4905],
         [3.0000, 2.7762, 1.5326, 0.4905],
         [4.0000, 2.7762, 1.5326, 0.4905]],

        [[0.0000, 3.1239, 1.5550, 0.5492],
         [1.0000, 3.1239, 1.5550, 0.5492],
         [2.0000, 3.1239, 1.5550, 0.5492],
         [3.0000, 3.1239, 1.5550, 0.5492],
         [4.0000, 3.1239, 1.5550, 0.5492]]])

And we see the model produces a prediction for each of the 3 voxels for each of the 48 stimulus classes


In [8]:
model.image_computable(inputs)


Out[8]:
tensor([[6.1728e+17, 3.8758e+17, 2.0870e+17, 7.9827e+16, 3.2646e+16, 9.0294e+15,
         3.1169e+15, 5.6713e+14, 1.2244e+14, 2.1880e+13, 7.1134e+17, 5.4744e+17,
         2.3957e+17, 1.0158e+17, 3.5182e+16, 1.0500e+16, 3.4506e+15, 6.5559e+14,
         1.3944e+14, 2.1608e+13, 7.1951e+17, 3.7637e+17, 1.9561e+17, 9.0586e+16,
         3.4354e+16, 8.8772e+15, 3.1077e+15, 5.9607e+14, 1.2714e+14, 2.1308e+13,
         8.1133e+17, 4.1678e+17, 2.1434e+17, 9.5383e+16, 3.5316e+16, 8.9741e+15,
         3.1075e+15, 5.9382e+14, 1.2598e+14, 2.1119e+13, 9.0469e+15, 8.9473e+15,
         9.4351e+15, 1.0016e+16, 1.0093e+16, 9.5270e+15, 9.0348e+15, 9.0979e+15],
        [3.1142e+17, 1.9307e+17, 9.7910e+16, 4.2768e+16, 1.4517e+16, 4.8640e+15,
         1.4974e+15, 2.6563e+14, 6.7331e+13, 9.3744e+12, 3.6496e+17, 2.8787e+17,
         1.1155e+17, 5.3053e+16, 1.6218e+16, 5.6065e+15, 1.6249e+15, 3.2712e+14,
         7.6775e+13, 9.7775e+12, 3.7868e+17, 1.8488e+17, 9.5330e+16, 4.7904e+16,
         1.5518e+16, 4.8654e+15, 1.4645e+15, 2.8219e+14, 6.9608e+13, 9.3103e+12,
         4.1617e+17, 2.0282e+17, 1.0223e+17, 4.9965e+16, 1.5905e+16, 4.8998e+15,
         1.4673e+15, 2.8086e+14, 6.9123e+13, 9.2315e+12, 4.8819e+15, 4.8677e+15,
         5.1221e+15, 5.4204e+15, 5.4497e+15, 5.1555e+15, 4.8954e+15, 4.8971e+15],
        [4.6902e+17, 2.9469e+17, 1.5308e+17, 6.2007e+16, 2.3638e+16, 6.8475e+15,
         2.3741e+15, 3.9701e+14, 9.9094e+13, 1.4926e+13, 5.4048e+17, 4.2410e+17,
         1.7225e+17, 7.9123e+16, 2.5544e+16, 8.0272e+15, 2.5850e+15, 4.7506e+14,
         1.1187e+14, 1.5022e+13, 5.5477e+17, 2.8384e+17, 1.4450e+17, 7.0273e+16,
         2.4878e+16, 6.8561e+15, 2.3394e+15, 4.2100e+14, 1.0249e+14, 1.4626e+13,
         6.1754e+17, 3.1283e+17, 1.5700e+17, 7.3678e+16, 2.5562e+16, 6.9165e+15,
         2.3413e+15, 4.1928e+14, 1.0164e+14, 1.4501e+13, 6.8785e+15, 6.8590e+15,
         7.2610e+15, 7.7009e+15, 7.7523e+15, 7.3200e+15, 6.9094e+15, 6.9064e+15]],
       grad_fn=<SumBackward2>)

We can also produce a prediction for a single voxel:


In [12]:
inputs, targets = ds[2]

In [13]:
model.image_computable(inputs)


Out[13]:
tensor([[4.6902e+17, 2.9469e+17, 1.5308e+17, 6.2007e+16, 2.3638e+16, 6.8475e+15,
         2.3741e+15, 3.9701e+14, 9.9094e+13, 1.4926e+13, 5.4048e+17, 4.2410e+17,
         1.7225e+17, 7.9123e+16, 2.5544e+16, 8.0272e+15, 2.5850e+15, 4.7506e+14,
         1.1187e+14, 1.5022e+13, 5.5477e+17, 2.8384e+17, 1.4450e+17, 7.0273e+16,
         2.4878e+16, 6.8561e+15, 2.3394e+15, 4.2100e+14, 1.0249e+14, 1.4626e+13,
         6.1754e+17, 3.1283e+17, 1.5700e+17, 7.3678e+16, 2.5562e+16, 6.9165e+15,
         2.3413e+15, 4.1928e+14, 1.0164e+14, 1.4501e+13, 6.8785e+15, 6.8590e+15,
         7.2610e+15, 7.7009e+15, 7.7523e+15, 7.3200e+15, 6.9094e+15, 6.9064e+15]],
       grad_fn=<SumBackward2>)

And just for a sanity check, we can still have the model work in the tuning-curve type way. The scale is completely different, but, since we're normalizing the predictions when computing the loss, this shouldn't matter.


In [14]:
ds = sfp.model.FirstLevelDataset(df_path, device, model_mode='tuning_curve',)
dl = torchdata.DataLoader(ds, 3)

In [15]:
inputs, targets = next(iter(dl))
model(inputs)


Out[15]:
tensor([[0.3033, 0.5283, 0.7989, 0.9916, 0.9253, 0.6847, 0.3954, 0.1739, 0.0591,
         0.0163, 0.3033, 0.5283, 0.7989, 0.9916, 0.9253, 0.6847, 0.3954, 0.1739,
         0.0591, 0.0163, 0.2651, 0.5795, 0.8202, 0.9856, 0.9337, 0.6706, 0.3910,
         0.1766, 0.0602, 0.0159, 0.2651, 0.5795, 0.8202, 0.9856, 0.9337, 0.6706,
         0.3910, 0.1766, 0.0602, 0.0159, 0.6843, 0.6780, 0.6780, 0.6843, 0.6843,
         0.6780, 0.6780, 0.6843],
        [0.3281, 0.5593, 0.8259, 0.9970, 0.9055, 0.6537, 0.3680, 0.1577, 0.0522,
         0.0140, 0.3281, 0.5593, 0.8259, 0.9970, 0.9055, 0.6537, 0.3680, 0.1577,
         0.0522, 0.0140, 0.2879, 0.6108, 0.8462, 0.9930, 0.9148, 0.6395, 0.3638,
         0.1602, 0.0532, 0.0137, 0.2879, 0.6108, 0.8462, 0.9930, 0.9148, 0.6395,
         0.3638, 0.1602, 0.0532, 0.0137, 0.6533, 0.6470, 0.6470, 0.6533, 0.6533,
         0.6470, 0.6470, 0.6533],
        [0.3129, 0.5404, 0.8096, 0.9940, 0.9178, 0.6726, 0.3846, 0.1674, 0.0564,
         0.0153, 0.3129, 0.5404, 0.8096, 0.9940, 0.9178, 0.6726, 0.3846, 0.1674,
         0.0564, 0.0153, 0.2739, 0.5917, 0.8305, 0.9888, 0.9266, 0.6585, 0.3803,
         0.1700, 0.0574, 0.0150, 0.2739, 0.5917, 0.8305, 0.9888, 0.9266, 0.6585,
         0.3803, 0.1700, 0.0574, 0.0150, 0.6722, 0.6659, 0.6659, 0.6722, 0.6722,
         0.6659, 0.6659, 0.6722]], grad_fn=<MulBackward0>)

To step through what's happening here's, let's look at an imaginary voxel with the following properties:


In [192]:
vox_ecc = 4
vox_ang = 0
vox_sig = .5

First, based on its location in the visual field, we know its tuning, which we can show. This is the weight at each spatial frequency


In [196]:
vox_tuning = model.create_image(vox_ecc, vox_ang)
fig = pt.imshow(vox_tuning.detach(), zoom=.25, title=None);
#fig.savefig('vox_tuning.svg')


Based on this tuning and filters we already passed the model, we can get the relative weights on each scale and orientation sub-band.


In [197]:
weights = model.image_computable_weights(vox_ecc, vox_ang)
plt.scatter(np.array(range(weights.nelement())), weights.detach())
#plt.savefig('weights.svg')


---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-197-391479e52a53> in <module>
----> 1 weights = model.image_computable_weights(vox_ecc, vox_ang)
      2 plt.scatter(np.array(range(weights.nelement())), weights.detach())
      3 #plt.savefig('weights.svg')

~/Documents/spatial-frequency-preferences/sfp/model.py in image_computable_weights(self, vox_ecc, vox_angle)
    496                                        self.image_size)
    497         vox_tuning = vox_tuning.unsqueeze(1).unsqueeze(1)
--> 498         return torch.sum(vox_tuning * self.filters, (-1, -2), keepdim=True).unsqueeze(1)
    499 
    500     def create_prfs(self, vox_ecc, vox_angle, vox_sigma):

RuntimeError: The size of tensor a (256) must match the size of tensor b (1080) at non-singleton dimension 4

With these weights, we take a weighted sum over the energy to get the feature map for this voxel.


In [138]:
reweighted_energy = (weights * model.energy).sum((2, 3))
fig = pt.imshow(reweighted_energy[0,0].detach(), zoom=.25);
#fig.savefig('reweighted_energy.svg')


And now we just create the pRF and drop it on the feature map.


In [76]:
prf = model.create_prfs(vox_ecc, vox_ang, vox_sig)
fig = pt.imshow(prf[0,0], zoom=.25);
#fig.savefig('prf.svg')


And we have a prediction!


In [26]:
pt.imshow((prf * reweighted_energy).detach()[0,5], zoom=.25);


We actually sum across the entire pRF, as shown below.


In [27]:
(prf * reweighted_energy[0,5]).sum((-1,-2))


Out[27]:
tensor([[1.6282e+16]], grad_fn=<SumBackward2>)

Simple tests

Couple simple tests to make sure we're handling normalization correctly:

  1. Given a uniform spatial frequency preference, a bunch of uniform gratings should all have the same prediction.

  2. If we're normalizing the pRFs correctly, voxels that are identical except for their pRF size should have the same prediction.


In [13]:
# we keep these small to make the computations faster
stim = []
stim_df = []
freqs = [.5, 1, 2, 4]
phases = np.linspace(0, 2*np.pi, 8, endpoint=False)
for i, f in enumerate(freqs):
    for p in phases:
        stim.append(sfp.utils.create_sin_cpd(256, 0, f, p, stim_rad_deg=3))
        stim_df.append(pd.DataFrame({'size_pix': 256, 'w_x': 0, 'w_y': f, 'phi': p, 'size_deg': 3, 'class_idx': 3*i}, index=[0]))
        stim.append(sfp.utils.create_sin_cpd(256, f, 0, p, stim_rad_deg=3))
        stim_df.append(pd.DataFrame({'size_pix': 256, 'w_x': f, 'w_y': 0, 'phi': p, 'size_deg': 3, 'class_idx': 3*i+1}, index=[0]))
        stim.append(sfp.utils.create_sin_cpd(256, np.sqrt(f)/2, np.sqrt(f)/2, p, stim_rad_deg=3))
        stim_df.append(pd.DataFrame({'size_pix': 256, 'w_x': np.sqrt(f)/2, 'w_y': np.sqrt(f)/2, 'phi': p, 'size_deg': 3, 'class_idx': 3*i+2}, index=[0]))

stim = np.array(stim)
stim_df = pd.concat(stim_df).reset_index(drop=True)

In [35]:
energy, filters = sfp.image_computable.calc_energy_and_filters(stim, stim_df)

In [107]:
model = sfp.model.LogGaussianDonut(vary_amplitude=False, sigma=100, sf_ecc_intercept=1, sf_ecc_slope=0)
model.prepare_image_computable(energy, filters, 3)

In [155]:
vox_ecc = [2, 1]
vox_ang = [0, 0]

First, based on its location in the visual field, we know its tuning, which we can show. This is the weight at each spatial frequency


In [156]:
vox_tuning = []
for e, a in zip(vox_ecc, vox_ang):
    vox_tuning.append(model.create_image(e, a).detach().numpy()[0])
pt.imshow(vox_tuning, zoom=2);



In [180]:
voxels = np.array([[vox_ecc[0], vox_ang[0], 1], [vox_ecc[1], vox_ang[1], 1]])
voxels = np.repeat(np.expand_dims(voxels, 1), 12, 1)

stim_idx = stim_df.class_idx.unique()
stim_idx = np.repeat(np.reshape(stim_idx, (1,12,1)), 2, 0)

inputs = np.dstack([stim_idx, voxels])

In [181]:
predictions = model.image_computable(torch.tensor(inputs, dtype=torch.float32)).detach().numpy()

In [182]:
results_df = stim_df.drop_duplicates('class_idx')

tmp = pd.DataFrame(predictions.T,)
tmp['class_idx'] = stim_df.class_idx.unique()

tmp = pd.melt(tmp, ['class_idx'], var_name='voxel', value_name='prediction')

tmp = tmp.set_index('voxel')
for i in range(len(vox_ecc)):
    tmp.loc[i, 'ecc'] = vox_ecc[i]
    tmp.loc[0, 'angle'] = vox_ang[i]
tmp = tmp.reset_index()

results_df = pd.merge(tmp, results_df)

In [183]:
weights = model.image_computable_weights(vox_ecc, vox_ang).squeeze()
for i in range(len(weights)):
    w = weights[i]
    plt.scatter(np.array(range(w.nelement())), w.detach())



In [188]:
## I THINK THIS COMES FROM THE ENERGY NOT BEING CORRECTLY NORMED

In [184]:
plt.plot(predictions.T);



In [179]:
pt.imshow(stim[::8,:,:], col_wrap=3);