This notebook loads in the data corresponding to figure 8, both the low and high magnification data sets.
We then compute:
In [30]:
from showit import image
%matplotlib inline
In [31]:
import seaborn as sns
sns.set_context('paper', font_scale=2.0)
sns.set_style('ticks')
In [3]:
from thunder.images import frombinary
path = 's3://neuro-datasets/svoboda-lab/2pRAM-paper/00'
# we only load in the first 100 out of 2628 frames as the data is large
data = frombinary(path + '/images', stop = 100)
In [4]:
data
Out[4]:
In [5]:
mean = data.mean().toarray()
In [6]:
image(mean, clim=(0, 2500), size = 12);
In [7]:
from json import loads
from regional import many
from requests import get
path = 'https://s3.amazonaws.com/neuro-datasets/svoboda-lab/2pRAM-paper/00'
r = get(path + '/sources/sources.json').content
sources = loads(r)
regions = many([x['coordinates'] for x in sources])
In [8]:
img = regions.mask(base=(mean/2500).clip(0,1), stroke='orange', fill = None)
image(img, size = 12);
In [9]:
from extraction.model import ExtractionModel
em = ExtractionModel(regions)
traces = em.transform(data)
In [10]:
from numpy import array
time = array([float(x)/4.38 for x in range(traces.shape[2])])
In [11]:
from matplotlib.pyplot import plot, xlim, xlabel, ylabel
plot(time, traces[0, 0].values.T.squeeze());
xlabel('time (s)', fontsize=14);
xlim([0, 25])
ylabel('F (a.u.)', fontsize=14);
In [12]:
records = data.uniform_filter(5).toseries()
In [13]:
records
Out[13]:
In [14]:
corr = records.correlate(traces[0, 0].values[0]).toarray()
In [15]:
image(corr, clim = (0, 0.7), size = 12);
In [16]:
from thunder.images import frombinary
path = 's3://neuro-datasets/svoboda-lab/2pRAM-paper/01'
# we only load in the first 100 out of 6000 frames as the data is large
data = frombinary(path + '/images' , stop = 100)
In [17]:
data
Out[17]:
In [18]:
from registration import CrossCorr
# use first 60 frames as a reference
reference = data[:60].mean().toarray()
# perform registration on each plane independently
algorithm = CrossCorr(axis=0)
model = algorithm.fit(data, reference)
registered = model.transform(data)
In [19]:
mean = registered.mean().toarray()
In [20]:
image(mean[3], clim=(0, 2500), size = 12);
In [21]:
from json import loads
from regional import many
from requests import get
path = 'https://s3.amazonaws.com/neuro-datasets/svoboda-lab/2pRAM-paper/01'
regions = []
for i in range(4):
r = get(path + '/sources/sources-%d.json' % i).content
sources = loads(r)
regions.append(many([x['coordinates'] for x in sources]))
In [22]:
plane = 3
img = regions[plane].mask(base=(mean[plane]/2500).clip(0,1), stroke='orange', fill = None)
image(img, size = 12);
In [23]:
from extraction.model import ExtractionModel
traces = []
for plane in range(4):
em = ExtractionModel(regions[plane])
trace = em.transform(registered[:,plane,:,:].squeeze()).normalize('window', window=500)
traces.append(trace)
In [24]:
from numpy import array
time = array([float(x)/9.66 for x in range(traces[0].shape[2])])
In [25]:
from matplotlib.pyplot import plot, xlim, xlabel, ylabel
plane = 0
neuron = 20
plot(time, traces[plane][0, neuron].values.T.squeeze());
xlabel('time (s)', fontsize=14);
xlim([0, 10])
ylabel('DFF', fontsize=14);
In [26]:
# compute maximum activity in region 1
avg = traces[0].max()
In [27]:
from matplotlib.pyplot import plot, xlim, xlabel, ylabel
plot(time, avg.values.T.squeeze());
xlabel('time (s)', fontsize = 14);
xlim([0, 10])
ylabel('DFF', fontsize = 14);
In [28]:
# compute correlation with timeseries of each individual neuron
corr = [t.correlate(avg).values for t in traces]
In [29]:
from numpy import linspace
from matplotlib.pyplot import hist, xlim, xlabel, ylabel
plane = 2
hist(corr[plane][0],bins=linspace(-.525,.525,22), rwidth=0.9, edgecolor='none', color = [0.7,0.7,0.7]);
xlim([-.35, .35])
xlabel('Correlation', fontsize = 14);
ylabel('Neurons', fontsize = 14);