In [1]:
%matplotlib inline
from matplotlib import pylab as pl
import cPickle as pickle
import pandas as pd
import numpy as np
import os
import random
In [2]:
import sys
sys.path.append('..')
uncommoent the relevant pipeline in ../seizure_detection.py and run
cd ..
./doall data
or
./doall td
./doall tt
In [3]:
FEATURES = 'gen-8.5_medianwindow1-bands2-usf-w60-b0.2-b4-b8-b12-b30-b70-0.1-0.5-0.9'
In [4]:
NUNITS = 1
In [5]:
from common.data import CachedDataLoader
cached_data_loader = CachedDataLoader('../data-cache')
In [6]:
def read_data(target, data_type):
fname = 'data_%s_%s_%s'%(data_type,target,FEATURES)
print fname
return cached_data_loader.load(fname,None)
In [7]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression as LR
clf = RandomForestClassifier(n_estimators=3000, min_samples_split=1, bootstrap=False,max_depth=10,
#max_features='log2',
n_jobs=-1)#
In [8]:
fpout = open('../submissions/141101-predict.14.csv','w')
print >>fpout,'clip,preictal'
In [9]:
for target in ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']:
pdata = read_data(target, 'preictal') # positive examples
ndata = read_data(target, 'interictal') # negative examples
X = np.concatenate((pdata.X, ndata.X))
print X.shape
y = np.zeros(X.shape[0])
y[:pdata.X.shape[0]] = 1
# shuffle
idxs=range(len(y))
random.shuffle(idxs)
X = X[idxs,:]
y = y[idxs]
if NUNITS > 1:
NFu = X.shape[1]//NUNITS
y = np.repeat(y,NUNITS)
X = X.reshape(-1,NFu)
clf.fit(X,y)
# predict
tdata = read_data(target, 'test') # test examples
Xt = tdata.X
if NUNITS > 1:
Xt = Xt.reshape(-1,NFu)
print Xt.shape
y_proba = clf.predict_proba(Xt)[:,1]
if NUNITS > 1:
y_proba = y_proba.reshape(-1, NUNITS).max(axis=-1)
# write results
for i,p in enumerate(y_proba):
print >>fpout,'%s_test_segment_%04d.mat,%.15f' % (target, i+1, p)
In [10]:
fpout.close()
In [10]: