"The 64,000 um3 box imaged at high resolution had roughly 13.7 million cell profiles in its 1850 sections."
In [1]:
import numpy
import ndio.remote.neurodata as neurodata
from datetime import datetime
nd = neurodata()
token = 'kat11segments'
channel = 'annotation'
startTime = datetime.now()
In [2]:
# Get image properties
res = 3
image_size = nd.get_metadata(token)['dataset']['imagesize'][str(res)]
print image_size
total_voxels = image_size[0]*image_size[1]*image_size[2]
pixel_dim = 0.024*0.024*0.30 #in microns TODO: get from LIMS
total_volume = total_voxels*pixel_dim
print 'total volume: ' + str(total_volume) + ' um^3'
In [3]:
# Loop through each layer, counting the number of unique values.
# Definitely faster ways to do this (retrieving data in blocks), but this is most explicit and clear
unique_count = 0
num_pixels = 0
import skimage.measure
for i in range(1, image_size[2]+1): #Gets last slice
im = nd.get_volume(token, channel, 0, image_size[0], 0, image_size[1], i, i+1, resolution=res)
label, n = skimage.measure.label(im.cutout, return_num=True)#
unique_count += n
num_pixels += sum(sum(sum(im.cutout>0)))
print str(i).zfill(4),
In [4]:
print 'Number of contours: ' + str(unique_count)
print 'Number of labeled pixels: ' + str(num_pixels)
vol_labeled = num_pixels * pixel_dim
frac_labeled = float(num_pixels) / total_voxels
print 'Volume labeled: ' + str(vol_labeled) + ' um^3'
print 'The percentage of the volume that is labeled: ' + str(frac_labeled * 100)
print 'Number of unique contours: ' + str(unique_count)
print datetime.now() - startTime
In [5]:
# Count the total number of neuron fragments individually labeled in the volume
import numpy as np
from datetime import datetime
startTime = datetime.now()
token = 'kasthuri2015_ramon_v4'
channel = 'neurons'
res = 3
pixel_dim = 0.024*0.024*0.030 #can get from LIMS
import ndio.ramon as ramon
# Don't count all objects, because RAMONNeuron paint is already counted in RAMONSegments
# Segments in cylinder
segment_ids_cyl = nd.get_ramon_ids(token, channel, ramon_type=ramon.RAMONSegment)
# Segments in volume are not RAMONified, so doing the hard way
# TODO - RAMONIFY
token = 'kat11segments'
channel = 'annotation'
res = 3
image_size = nd.get_metadata(token)['dataset']['imagesize'][str(res)]
unique_count = []
for i in range(1, image_size[2]+1, 16): #TODO hardcoded z
print str(i).zfill(4),
z_start = i
z_stop = np.min([image_size[2]+1, i + 16])
im = nd.get_volume(token, channel, 0, image_size[0], 0, image_size[1], z_start, z_stop, resolution=res)
unique_count = np.concatenate([np.ravel(unique_count),np.ravel(np.unique(im.cutout))])
segment_ids_all = np.shape(np.unique(unique_count))[0] - 1 #remove 0 label
print datetime.now() - startTime
print 'Segments in cylinder: ' + str(np.shape(segment_ids_cyl)[0]) + ' Total segments: ' + str(segment_ids_all)
In [6]:
# The non-cellular (extracellular) space accounts for 6% of the total volume, less than half the extracellular space estimates from living brains.
import numpy as np
from datetime import datetime
startTime = datetime.now()
token = 'kasthuri2015_ramon_v4'
channel = 'neurons'
res = 3
pixel_dim = 0.024*0.024*0.030 #can get from LIMS
import ndio.ramon as ramon
# A priori known bounds for cylinders. Alternatively we could sweep over entire volume - this is more efficient.
# TODO: assume that all synapses are inside cylinders, which we know to be true - should do with manual masking or a
# RAMONId predicate query
xbox = [694,1794];
ybox = [1750, 2460];
zbox = [1004, 1379];
# These calls take about 60 seconds to execute
gcyl = nd.get_volume('kat11greencylinder','annotation', xbox[0], xbox[1], ybox[0], ybox[1], zbox[0], zbox[1], resolution = res)
rcyl = nd.get_volume('kat11redcylinder','annotation', xbox[0], xbox[1], ybox[0], ybox[1], zbox[0], zbox[1], resolution = res)
bcyl = nd.get_volume('kat11mojocylinder','annotation', xbox[0], xbox[1], ybox[0], ybox[1], zbox[0], zbox[1], resolution = res)
seg_masked = nd.get_volume(token, channel, xbox[0], xbox[1], ybox[0], ybox[1], zbox[0], zbox[1], resolution = res)
mask = (gcyl.cutout + rcyl.cutout + bcyl.cutout) > 0
mask_pixels = sum(sum(sum(mask)))
mask_volume = mask_pixels * pixel_dim
print 'Mask Pixels: ' + str(mask_pixels)
print 'Mask Volume: ' + str(mask_volume) + ' um^3'
seg_pixels = sum(sum(sum(seg_masked.cutout > 0)))
seg_volume = seg_pixels * pixel_dim
print 'Mask Pixels: ' + str(seg_pixels)
print 'Mask Volume: ' + str(seg_volume) + ' um^3'
print datetime.now() - startTime
print 1 - float(seg_volume) / mask_volume
In [7]:
### Compute cylinder volumes and annotated data
# We compute these cylinder volumes at low resolution, so reconstructed volume sizes are approximate
startTime = datetime.now()
token_green = 'kat11greencylinder'
token_red = 'kat11redcylinder'
token_mojo = 'kat11mojocylinder'
channel = 'annotation'
res = 7
pixel_dim = 0.003*(2**res)*0.003*(2**res)*0.030 #can get from LIMS
imsize = nd.get_image_size(token_green, resolution=res)
offset = nd.get_image_offset(token_green, resolution=res)
green = nd.get_volume(token_green, channel, offset[0], imsize[0],
offset[1], imsize[1], offset[2], imsize[2], resolution=res)
red = nd.get_volume(token_red, channel, offset[0], imsize[0],
offset[1], imsize[1], offset[2], imsize[2], resolution=res)
mojo = nd.get_volume(token_mojo, channel, offset[0], imsize[0],
offset[1], imsize[1], offset[2], imsize[2], resolution=res)
g_size = np.sum(np.ravel(green.cutout>0))
r_size = np.sum(np.ravel(red.cutout>0))
m_size = np.sum(np.ravel(mojo.cutout>0))
mask = green.cutout+red.cutout+mojo.cutout > 0
total_size = np.sum(np.ravel(mask))
print 'Cylinder 1 (Red) is approximately: {} um^3'.format(r_size * pixel_dim)
print 'Cylinder 1 (Green) is approximately: {} um^3'.format(g_size * pixel_dim)
print 'Cylinder 3 (Mojo) is approximately: {} um^3'.format(m_size * pixel_dim)
print 'The total (overlapping volume) of the cylinders is approximately: {} um^3'.format(total_size * pixel_dim)