In [1]:
%matplotlib inline
from matplotlib import pylab as pl
import cPickle as pickle
import pandas as pd
import numpy as np
import os
import random

In [53]:
import scipy.io
W=1600
good_jump = []
bad_jump = []
for target in ['Dog_5']: #['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']:
    print target
    for data_type in ['preictal', 'interictal']:
        last_data = None
        for segment in range(1,1000000):
            fname = '../seizure-data/%s/%s_%s_segment_%04d.mat'%(target,target,data_type,segment)
            try:
                data = scipy.io.loadmat(fname)
            except:
                break
            k = '%s_segment_%d'%(data_type,segment)
            sequence = data[k]['sequence'][0,0][0,0]
            d = data[k]['data'][0,0]

            if last_data is not None:
                if last_sequence+1 == sequence:
                    good_jump.append((last_data,d[:,:W].astype(float))) # copy is critical becayse d[:] is just a window
                else:
                    bad_jump.append((last_data,d[:,:W].astype(float)))
            last_sequence = sequence
            last_data = d[:,-W:].astype(float)


Dog_5

In [54]:
len(good_jump),len(bad_jump),np.vstack((good_jump[0][0][:,-1],good_jump[0][1][:,0])).shape


Out[54]:
(400, 78, (2, 15))

In [61]:
d.shape,400*10*60


Out[61]:
((15, 239766), 240000)

In [55]:
def dotscore(j1, j2):
    return -2.*np.dot(j1,j2)/(np.dot(j1,j1) + np.dot(j2,j2))
def mydist(v1, v2):
    m1 = v1[:,-W:].mean(axis=1)
    m2 = v2[:,:W].mean(axis=1)
    j1 = v1[:,-1] - m1
    j2 = v2[:,0] - m2
#     j1 = v1[:,-1]
#     j2 = v2[:,0]
    score = dotscore(j1,j2)
#     q1 = v1[:,-1] - v1[:,-2]
#     q2 = v2[:,1] - v2[:,0]
#     score = -np.dot(j1,j2)/np.sqrt(np.dot(j1,j1)*np.dot(j2,j2))
    return score #np.sqrt(score)

In [56]:
def proc(jumps, alpha=1):
    import scipy.spatial.distance
    jp = np.array([])
    for jump in jumps:
        j = mydist(jump[0], jump[1])
#         j = scipy.spatial.distance.pdist(np.vstack((jump[0][:,-1],jump[1][:,0])),metric='cosine')
        jp = np.hstack((jp,j))
    print jp.shape
    print 'mean',jp.mean()
#     jp = np.abs(jp - jp.mean())
    lm = np.log(2.)/np.median(jp) # or 1/jp.mean()
    print 'lambda',lm
    y,x,_=pl.hist(jp, bins=50, alpha=alpha,normed=True);
    print np.sum(jp <= -0.6),np.sum(jp > -0.6)
    #pl.plot(x,(x[1]-x[0])*len(jp)*lm*np.exp(-lm*x))

In [57]:
proc(good_jump)
proc(bad_jump, alpha=0.5)


(400,)
mean -0.808175878687
lambda -0.768294859436
364 36
(78,)
mean 0.00279304713503
lambda -58.3155056291
0 78

In [135]:


In [ ]: