In [1]:
from path import path # Used in examples
import runtrackpy
In [2]:
movdir = path('bigtracks-demo-movie').abspath()
print movdir
movdir.makedirs_p()
frames_extension = 'PNG'
frames_pattern = '*.' + frames_extension
In [3]:
import runtrackpy.test
import scipy.misc
for framenumber in range(20):
x, y, img = runtrackpy.test.fake_image(framenumber)
scipy.misc.imsave(movdir / 'example_%.4i.%s' % (framenumber, frames_extension), img)
In [4]:
print runtrackpy.track.__doc__
OK, let's load a sample frame, set some basic parameters, and try them out.
In [5]:
imgfiles = movdir.glob(frames_pattern)
im = imread(imgfiles[0])
def crop():
pass
# If you wish, set a restricted view here, so you can zoom in on details.
#xlim(0, 240)
#ylim(0, 240)
imshow(im)
gray()
colorbar()
crop()
In [6]:
gcf().set_size_inches(10, 10)
params = dict(bright=1, featsize=5, bphigh=2, threshold=0.5, maxdisp=3 * sqrt(8))
features = runtrackpy.track.identify_frame(im, params)
imshow(im)
plot(features.x, features.y, 'r+', markersize=10)
axis('image')
crop()
We can check things like the quality of subpixel resolution (insufficient statistics in the example dataset)
In [7]:
hist((features.x % 1, features.y % 1));
In [8]:
outputfile = movdir / 'bigtracks.h5'
if outputfile.exists():
outputfile.remove()
In [9]:
runtrackpy.track.track2disk(imgfiles, outputfile,
params,
#selectframes=range(1, 3), # To speed things up, we could do just 2 frames
progress=True)
In [10]:
import pantracks
bt = pantracks.BigTracks(movdir / 'bigtracks.h5')
In [11]:
bt.maxframe()
Out[11]:
Assuming the file is not too big, it may be easiest to just get the whole thing.
In [12]:
bt.get_all()
Out[12]:
However, the database is indexed by frame number and particle ID. This means that requesting data based on one (or both) of these quantities is very efficient, because the computer already knows where to look in the file, and does not have to read the whole thing. Therefore, for larger datasets it makes sense to load single frames as you need them:
In [13]:
ftr = bt.get_frame(1)
print ftr.frame.values[0] # Which frame was this, again?
ftr.head()
Out[13]:
Or, if you're in a hurry,
In [14]:
ftr = bt[1]
print ftr.frame.values[0]
You can iterate over all frames in the file thusly:
In [15]:
for fnum in bt.framerange():
ftr = bt.get_frame(fnum)
print fnum, len(ftr)
In [16]:
ftr = bt.query('(x < xmax) & (y < ymax)', {'xmax': 100, 'ymax': 100})
ftr.x.max(), ftr.y.max(), ftr.frame.unique()
Out[16]:
Finally, the pytables
Table
object is available if you are adventurous, enabling a ton of advanced capabilities. The following is equivalent to calling query_tracks
in the previous example, only now we're asking for certain particles.
In [17]:
import pandas
with bt:
ftr = pandas.DataFrame(bt.table.readWhere('(particle >= 2) & (particle <= 5)'))
ftr.particle.min(), ftr.particle.max(), ftr.frame.unique()
Out[17]:
Using the BigTracks
object as a context, as already seen above, means that the file is opened just once, and so it should speed up "batch" operations:
In [18]:
%%timeit
for fnum in range(10):
ftr = bt.get_frame(1)
In [19]:
%%timeit
with bt:
for i in range(10):
ftr = bt.get_frame(1)
In all cases, the file is never left open. This avoids the occurrence of Bad Things if its contents are subsequently changed.
In [20]:
btq = pantracks.bigtracks.compute_quality(bt, frame_interval=1)
btq.head()
Out[20]:
The plot shows fluctuations in the number of features identified in each frame, as well as the fraction of particles lost since the first frame.
In [21]:
pantracks.bigtracks.plot_quality(btq)
In [21]: