In [2]:
def poisson(n):
    return cumsum(exponential(size=n, scale=.01))

In [3]:
def brian(T1, T2, width=.02, bin=.001, T=None):
    n = int(np.ceil(width / bin))
    if (len(T1) == 0) or (len(T2) == 0):
        return None
    i = 0
    j = 0
    l = []
    for t in T1:
        while i < len(T2) and T2[i] < t - width: # other possibility use searchsorted
            i += 1
        while j < len(T2) and T2[j] < t + width:
            j += 1
        l.extend(T2[i:j] - t)
    H, _ = np.histogram(l, bins=np.arange(2 * n + 1) * bin - n * bin) #, new = True)

    return H

In [4]:
def compute1():
    correlograms = {}
    for i in xrange(nclusters):
        for j in xrange(i, nclusters):
            correlograms[(i,j)] = brian(trains[i], trains[j])
    return correlograms

In [28]:
def compute2():
    bin = .001
    width = .02
    n = int(np.ceil(width / bin))
    corr = {}
    # initialize the correlograms
    for i in xrange(nclusters):
        for j in xrange(nclusters):
            corr[(i,j)] = []
    # loop through all spikes, across all neurons, all sorted
    for i in xrange(nspikes):
        # current spike and cluster
        t0 = fulltrain[i]
        cl0 = clusters[i]
        j = i
        # go forward in time up to the correlogram half-width
        while j < nspikes:
            # next spike and cluster
            t1 = fulltrain[j]
            cl1 = clusters[j]
            # avoid computing symmetric pairs twice
            if cl1 >= cl0:
                # add the delay
                if t1 <= t0 + width:
                    corr[(cl0,cl1)].append(t1 - t0)
                else:
                    break
            j += 1
        # go backward in time up to the correlogram half-width
        j = i - 1
        while j >= 0:
            t1 = fulltrain[j]
            cl1 = clusters[j]
            # avoid computing symmetric pairs twice
            if cl1 >= cl0:
                if t1 >= t0 - width:
                    corr[(cl0,cl1)].append(t1 - t0)
                else:
                    break
            j -= 1
    for i in xrange(nclusters):
        for j in xrange(nclusters):
            corr[(i,j)], _ = histogram(corr[(i,j)], bins=np.arange(2 * n + 1) * bin - n * bin)
    return corr

In [21]:
nspikes = 50000
nclusters = 50
fulltrain = poisson(nspikes)
clusters = randint(low=0, high=nclusters, size=nspikes)
trains = [fulltrain[clusters==i] for i in xrange(nclusters)]

In [22]:
corr1 = compute1()
corr2 = compute2()
for i in xrange(nclusters):
    for j in xrange(i, nclusters):
        if not array_equal(corr1[(i,j)], corr2[(i,j)]):
            print i, j

In [7]:


In [23]:
%timeit -r 1 -n 1 compute1()


1 loops, best of 1: 8.43 s per loop

In [31]:
%timeit -r 1 -n 1 compute2()


1 loops, best of 1: 791 ms per loop

Other tests


In [10]:
# check which is quicker between unoptimized vectorized or optimized pure Python to find the subpart of the train
i = len(fulltrain)/2
t = fulltrain[i]

# first solution
def f1():
    x1 = fulltrain[(fulltrain >= t - .02) & (fulltrain <= t + .02)] - t
    return x1

# second solution
def f2():
    x2 = []
    k = i
    while True:
        t2 = fulltrain[k]
        if t2 <= t + .02:
            x2.append(t2 - t)
        else:
            break
        k += 1
    k = i-1
    while True:
        t2 = fulltrain[k]
        if t2 >= t - .02:
            x2.append(t2 - t)
        else:
            break
        k -= 1
    x2 = array(x2)
    x2.sort()
    return x2
    
print array_equal(f1(), f2())


True

In [11]:
timeit -r 1000 -n 3 f1()


3 loops, best of 1000: 55.1 us per loop

In [12]:
timeit -r 1000 -n 3 f2()


3 loops, best of 1000: 13.8 us per loop