The following notebook will walk you through a basic SIMA workflow.
It will demonstarate how to:
In [1]:
# iPython magic function to setup plotting
%matplotlib inline
import matplotlib.pyplot as plt
In [2]:
# Download workflow example data if needed
from StringIO import StringIO
from zipfile import ZipFile
from urllib import urlopen
from os.path import isdir
data_url = 'http://www.losonczylab.org/workflow_data.zip'
if not isdir('workflow_data'):
url = urlopen(data_url)
zipfile = ZipFile(StringIO(url.read()))
zipfile.extractall()
In [3]:
import sima
import sima.segment
sima.__version__
Out[3]:
In [4]:
# Generate the filenames with Python list comprehensions.
tiff_filenames = [
['workflow_data/Cycle{n1:02d}_Ch{n2}.tif'.format(n1=cycle, n2=channel)
for channel in range(1, 3)] for cycle in range(1, 16)
]
tiff_filenames
Out[4]:
In [5]:
# Finally, we construct a TIFF sequence using each of the TIFF stacks.
all_sequences = [
[sima.Sequence.create('TIFF', chan) for chan in cycle]
for cycle in tiff_filenames]
# Combine channels that were saved as separte TIFF stacks
sequences = [
sima.Sequence.join(*channel_sequences) for channel_sequences in all_sequences]
sequences
Out[5]:
In [6]:
dataset_path = 'workflow_data/dataset.sima'
# Remove an old dataset if there is one
from os.path import isdir
if isdir(dataset_path):
from shutil import rmtree
rmtree(dataset_path)
correction_approach = sima.motion.HiddenMarkov2D(num_states_retained=30,
max_displacement=[20, 30],
verbose=True)
dataset = correction_approach.correct(
sequences, dataset_path, channel_names=['tdTomato', 'GCaMP'],
trim_criterion=0.95)
In [7]:
# Check the output
dataset
Out[7]:
In [8]:
# Export the time averages for a manuscript figure.
dataset.export_averages(['workflow_data/tdTomato.tif',
'workflow_data/GCaMP.tif'])
In [9]:
# Display the time averages of each channel
_, axs = plt.subplots(1, 2)
axs[0].imshow(dataset.time_averages[0, :, :, 0], cmap='gray', interpolation='none', aspect=2)
axs[1].imshow(dataset.time_averages[0, :, :, 1], cmap='gray', interpolation='none', aspect=2)
axs[0].set_title('tdTomato')
axs[0].tick_params(labelleft=False, labelbottom=False)
axs[1].set_title('GCaMP')
axs[1].tick_params(labelleft=False, labelbottom=False)
In [10]:
# Generate the output filenames with Python list comprehensions.
# NOTE: Exporting the motion corrected TIFF stacks is not necessary
# if you plan to extract signals with SIMA.
output_filenames = [
[[channel.replace('.tif', '_corrected.tif') for channel in cycle]]
for cycle in tiff_filenames
]
output_filenames
Out[10]:
In [11]:
# Export the corrected frames for a presentation.
dataset.export_frames(output_filenames, fill_gaps=True)
In [12]:
# Segment the field of view into ROIs using the method for CA1 pyramidal cells
# and parameters that were determined based on the imaging magnification.
segmentation_approach = sima.segment.PlaneCA1PC(
channel='GCaMP',
num_pcs=30,
max_dist=(3, 6),
spatial_decay=(3, 6),
cut_max_pen=0.10,
cut_min_size=50,
cut_max_size=150,
x_diameter=14,
y_diameter=7,
circularity_threhold=.5,
min_roi_size=20,
min_cut_size=40
)
In [13]:
# ROIs will both be returned and also saved under the label given (in this case 'auto_ROIs')
auto_rois = dataset.segment(segmentation_approach, label='auto_ROIs')
In [14]:
# Check the results
plt.imshow(dataset.time_averages[0, :, :, 1], cmap='gray', interpolation='none', aspect=2)
xlim, ylim = plt.xlim(), plt.ylim()
for roi in dataset.ROIs['auto_ROIs']:
for poly in roi.coords:
plt.plot(poly[:, 0], poly[:, 1])
plt.xlim(xlim)
plt.ylim(ylim)
plt.tick_params(labelleft=False, labelbottom=False)
At this point you can also check/edit ROIs with the ROIBuddy GUI (https://github.com/losonczylab/roibuddy)
In [15]:
# Reload the dataset in case any changes have been made.
dataset = sima.ImagingDataset.load(dataset_path)
In [16]:
# Extract the signals. By default, the most recently created ROIs are used.
# Signals will also be saved under the given label ('GCaMP_signals' in this case)
extracted_signals = dataset.extract(signal_channel='GCaMP', label='GCaMP_signals')
In [17]:
# Check the ouput
"""Each extraction produces a dictionary with the following keys:
mean_frame -- the time averaged image of the extracted channel (zyx)
overlap -- coordinates (of flattened image) of pixels that are contained in multiple ROIs
raw -- raw extracted signal
rois -- a copy of the ROI objects used for the extraction
signal_channel -- index of channel that was extracted
timestamp -- date and time of the extraction
"""
signals = dataset.signals('GCaMP')['GCaMP_signals']
signals.keys()
Out[17]:
In [18]:
# List of arrays, one array per cylcle/trial...
len(signals['raw'])
Out[18]:
In [19]:
# Each array is number of ROIS by number of frames
signals['raw'][0].shape
Out[19]:
In [20]:
# Export the extracted signals to a CSV file.
# As with the corrected TIFF stacks, this step is not necessary if you are planning to
# continue the analysis in Python
dataset.export_signals('example_signals.csv', channel='GCaMP',
signals_label='GCaMP_signals')
In [21]:
# Perform a simple dF/F caluclation and plot the signal from an ROI,
# with a different color for each cycle
from builtins import range
from numpy import nanmean
def dff(trace):
return (trace - nanmean(trace)) / nanmean(trace)
raw_signals = dataset.signals('GCaMP')['GCaMP_signals']['raw']
for sequence in range(3): # plot data from the first 3 cycles
plt.plot(dff(raw_signals[sequence][34])) # plot the data from the 35th ROI
plt.xlabel('Frame')
plt.ylabel(r'$\Delta$F/F')
plt.title('Signal from ROI #35 for first 3 trials');
In [21]: