In [1]:
%matplotlib inline
from matplotlib import pylab as pl
import cPickle as pickle
import pandas as pd
import numpy as np
import os
import random

In [2]:
import sys
sys.path.append('..')

Read precomputed features

uncommoent the relevant pipeline in ../seizure_detection.py and run

cd ..
./doall data

or

./doall td
./doall tt

In [3]:
FEATURES = 'gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70'

In [4]:
nbands = 0
nwindows = 0
for p in FEATURES.split('-'):
    if p[0] == 'b':
        nbands += 1
    elif p[0] == 'w':
        nwindows = int(p[1:])

nbands -= 1
nbands, nwindows


Out[4]:
(5, 60)

In [5]:
NUNITS = 1

In [6]:
from common.data import CachedDataLoader
cached_data_loader = CachedDataLoader('../data-cache')

In [7]:
def read_data(target, data_type):
    fname = 'data_%s_%s_%s'%(data_type,target,FEATURES)
    print fname
    return cached_data_loader.load(fname,None)

Predict


In [8]:
def process(X, percentile=[0.1,0.5,0.9],nunits=NUNITS):
    N, Nf = X.shape
    print '# samples',N,'# power points', Nf
    print '# channels', Nf / (nbands*nwindows)
    
    newX = []
    for i in range(N):
        nw = nwindows//nunits
        windows = X[i,:].reshape((nunits,nw,-1))
        sorted_windows = np.sort(windows, axis=1)
        features = np.concatenate([sorted_windows[:,int(p*nw),:] for p in percentile], axis=-1)
        newX.append(features.ravel())
    newX = np.array(newX)

    return newX

In [10]:
from sklearn import preprocessing
from nolearn.dbn import DBN
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

scale = StandardScaler()

min_max_scaler = preprocessing.MinMaxScaler() # scale features to be [0..1] which is DBN requirement

dbn = DBN(
    [-1, 300, -1], # first layer has size X.shape[1], hidden layer(s), last layer will have number of classes in y (2))
    learn_rates=0.3,
    learn_rate_decays=0.9,
    epochs=100,
    dropouts=[0.1,0.5],
    verbose=0,
    )


gnumpy: failed to import cudamat. Using npmat instead. No GPU will be used.

In [22]:
fpout = open('../submissions/141105-predict.4.csv','w')
print >>fpout,'clip,preictal'

In [23]:
for itarget, target in enumerate(['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']):
    pdata = read_data(target, 'preictal') # positive examples
    ndata = read_data(target, 'interictal') # negative examples
    X = np.concatenate((pdata.X, ndata.X))
    X = process(X)
    N, NF = X.shape
    tdata = read_data(target, 'test') # test examples
    Xt = process(tdata.X)
   
    # normalize train and test together (allowed)
    X = scale.fit_transform(np.concatenate((X, Xt)))
    X = np.clip(X,-3,3)
    X = min_max_scaler.fit_transform(X)
    Xt = X[N:,:]
    X = X[:N,:]
    
    dbn.set_params(layer_sizes=[NF,300,2]) # we need to reset each time because NF is different
    y = np.zeros(X.shape[0])
    y[:pdata.X.shape[0]] = 1
    # shuffle
    idxs=range(len(y))
    random.shuffle(idxs)
    X = X[idxs,:]
    y = y[idxs]
    # model
    dbn.fit(X,y)
    # predict
    
    y_proba = dbn.predict_proba(Xt)[:,1]
    # write results
    for i,p in enumerate(y_proba):
        print >>fpout,'%s_test_segment_%04d.mat,%.15f' % (target, i+1, p)
    pl.subplot(4,2,itarget+1)
    pl.hist(X.ravel(),bins=50)
    pl.hist(Xt.ravel(),bins=50,alpha=0.5);


data_preictal_Dog_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Dog_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 664 # power points 4800
# channels 16
data_test_Dog_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 502 # power points 4800
# channels 16
data_preictal_Dog_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Dog_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 822 # power points 4800
# channels 16
data_test_Dog_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 1000 # power points 4800
# channels 16
data_preictal_Dog_3_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Dog_3_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 1992 # power points 4800
# channels 16
data_test_Dog_3_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 907 # power points 4800
# channels 16
data_preictal_Dog_4_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Dog_4_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 1541 # power points 4800
# channels 16
data_test_Dog_4_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 990 # power points 4800
# channels 16
data_preictal_Dog_5_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Dog_5_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 680 # power points 4500
# channels 15
data_test_Dog_5_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 191 # power points 4500
# channels 15
data_preictal_Patient_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Patient_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 188 # power points 4500
# channels 15
data_test_Patient_1_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 195 # power points 4500
# channels 15
data_preictal_Patient_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
data_interictal_Patient_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 180 # power points 7200
# channels 24
data_test_Patient_2_gen-8_allbands2-usf-w60-b0.2-b4-b8-b12-b30-b70
# samples 150 # power points 7200
# channels 24

In [24]:
fpout.close()

In [ ]: