In [1]:
import numpy as np
import pandas
import scipy, scipy.spatial
import sklearn
import sys

from matplotlib import pyplot as plt

%matplotlib inline

In [2]:
y = pandas.read_table("~/Downloads/data/ml/label_train.txt", sep=" ", dtype='int', header=None)

ndim= 900
y.head()


Out[2]:
0
0 161
1 163
2 56
3 119
4 138

In [3]:
ymin = 1
ysplit = 131
ymax = 156

In [4]:
np.unique(y[0], return_counts=True)


Out[4]:
(array([  1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,
         14,  15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,
         27,  28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,
         40,  41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,
         53,  54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,
         66,  67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,
         79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,
         92,  93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104,
        105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
        118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
        131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
        144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
        157, 158, 159, 160, 161, 162, 163, 164]),
 array([  1263,   1261,   1255,   1256,   1252,   1235,   1240,   1264,
          1256,   1281,   1245,   1278,   1278,   1253,   1255,   1255,
          1291,   1277,   1308,   1285,   1322,   1309,   1318,   1322,
          1327,   1339,   1361,   1361,   1335,   1396,   1359,   1393,
          1373,   1356,   1398,   1416,   1386,   1398,   1396,   1404,
          1430,   1398,   1416,   1406,   1420,   1445,   1433,   1445,
          1454,   1451,   1481,   1482,   1477,   1474,   1478,   1486,
          1512,   1492,   1557,   1557,   1548,   1530,   1574,   1582,
          1606,   1611,   1666,   1650,   1704,   1739,   1735,   1743,
          1728,   1796,   1737,   1810,   1822,   1864,   1847,   1838,
          1857,   1913,   1910,   1917,   2006,   1992,   2033,   2063,
          2072,   2063,   2096,   2128,   2134,   2206,   2215,   2212,
          2258,   2279,   2287,   2319,   2356,   2435,   2438,   2491,
          2486,   2485,   2502,   2555,   2594,   2629,   2575,   2587,
          2777,   2875,   2897,   2884,   2978,   3087,   3179,   3368,
          3388,   3421,   3409,   3453,   3536,   3586,   3615,   3696,
          3821,   3802,   3934,   4059,   4069,   4253,   4819,   4939,
          5038,   5259,   5310,   6080,   6487,   6623,   7256,   8279,
          9069,   9221,   9707,   9998,  10557,  10645,  11484,  12382,
         12858,  16548,  18562,  21943,  30679,  34092,  45439,  60513,
         64478,  65211,  92241, 130122]))

In [4]:
yuniq,ycount = np.unique(y[0], return_counts=True)

print(np.sum(ycount[np.where(np.in1d(yuniq, range(ymin, ysplit)))[0]]))
print(np.sum(ycount[np.where(np.in1d(yuniq, range(ysplit, ymax+1)))[0]]))


247846
229379

In [5]:
import pickle

cstat = pickle.load(open( "../data/sum_features.dat", "rb" ) )

In [6]:
### Calclulate Standardized Mean Difference Between Classes

def calStandMeanDiff(y, cstat, yneg, ypos):
    sx  = np.zeros(shape=ndim, dtype=float)
    ssx = np.zeros(shape=ndim, dtype=float)


    n1 = np.sum(np.in1d(y, yneg))
    n2 = np.sum(np.in1d(y, ypos))
    sys.stderr.write("Number of samples in NegClass: %d and PosClass: %d \n"%(n1, n2))

    for yi in yneg:
        sx += cstat[yi][0]
        ssx += cstat[yi][1]
    r1_mean = sx / float(n1)
    r1_var = (ssx - 2*sx*r1_mean + r1_mean**2) / float(n1)

    tot_mean = sx
    tot_var  = ssx
    
    sx  = np.zeros(shape=ndim, dtype=float)
    ssx = np.zeros(shape=ndim, dtype=float)
    for yi in ypos:
        sx += cstat[yi][0]
        ssx += cstat[yi][1]
    r2_mean = sx / float(n2)
    r2_var = (ssx - 2*sx*r2_mean + r2_mean**2) / float(n2)

    tot_mean += sx
    tot_var  += ssx
    tot_mean = tot_mean / float(n1 + n2)
    tot_var  = (tot_var - 2*tot_var*tot_mean + tot_mean**2) / float(n1 + n2)

    rdiff = (r1_mean - r2_mean) / np.sqrt(tot_var)

    return (rdiff)


## unit test:
mean_test = calStandMeanDiff(y, cstat, np.arange(ymin,ysplit), np.arange(ysplit, ymax+1)) 
print(np.sum(mean_test > 0.001))


64
Number of samples in NegClass: 247846 and PosClass: 229379 

Classify items belonging to first half (1) Second half (-1)

Finding Good Features


In [7]:
rdiff = calStandMeanDiff(y, cstat, np.arange(ymin,ysplit), np.arange(ysplit, ymax+1))


## Good Features:
goodfeatures = np.where(rdiff > 0.001)[0]

goodfeatures


Number of samples in NegClass: 247846 and PosClass: 229379 
Out[7]:
array([ 18,  29,  35,  54,  78,  81,  86, 106, 115, 117, 131, 134, 138,
       150, 157, 203, 239, 247, 248, 253, 265, 286, 298, 304, 337, 340,
       344, 347, 349, 375, 377, 385, 450, 494, 496, 513, 526, 534, 555,
       561, 566, 587, 592, 637, 677, 697, 710, 738, 741, 744, 760, 771,
       773, 805, 813, 815, 817, 819, 830, 860, 864, 892, 897, 898])

Read a Random Sample


In [8]:
def readRandomSample(data_fname, y, size, goodfeat=None, acc_miny=None, acc_maxy=None):
    """ Read a random sample
    """
    if goodfeat is None:
        goodfeat = np.arange(ndim)
    Xsub = np.empty(shape=(size,goodfeat.shape[0]), dtype=float)
    ysub = np.zeros(shape=size, dtype=int)

    if acc_miny is None:
        acc_miny = np.min(y)
    if acc_maxy is None:
        acc_maxy = np.max(y)
        
    #yuniq, ycount = np.unique(y, return_counts=True)
    #tot_acceptable = np.sum(ycount[np.where((yuniq >= acc_miny) & (yuniq <= acc_maxy))[0]])
    
    acceptable_indx = np.where((y>=acc_miny) & (y<=acc_maxy))[0]
    assert(acceptable_indx.shape[0] > size)
    choice_indx = np.sort(np.random.choice(acceptable_indx, size, replace=False))
    #print(choice_indx.shape)
    #sys.stderr.write("Total Accetables: --> %d"%(tot_acceptable))
    
    #proba = 1.0 - size/float(tot_acceptable)
    
        
    with open(data_fname, 'r') as fp:
        n = 0
        nf = 0
        for line in fp:
#            if (y[n] >= acc_miny and y[n]<=acc_maxy):
#                if np.random.uniform(low=0, high=1) > proba and nf < size:
            if nf < size:
                if n == choice_indx[nf]:
                    line = line.strip().split()
                    ix = -1
                    for i,v in enumerate(line):
                        if np.any(goodfeat == i):
                            ix += 1
                            Xsub[nf,ix] = int(v)
                    ysub[nf] = y[n]

                    nf += 1
            n += 1
    return(Xsub, ysub)

In [23]:
## unit testing readRandomSample()
gf_test = goodfeatures
Xsub, ysub = readRandomSample('/home/vahid/Downloads/data/ml/data_train.txt', y[0], \
                              size=2000, goodfeat=gf_test, acc_miny=ymin, acc_maxy=ymax)

print(Xsub.shape)
print(np.unique(ysub))


(2000, 64)
[  1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36
  37  38  39  40  41  42  43  44  45  46  47  48  49  50  51  52  53  54
  55  56  57  58  59  60  61  62  63  64  65  66  67  68  69  70  71  72
  73  74  75  76  77  78  79  80  81  82  83  84  85  86  87  88  89  90
  91  92  93  94  95  96  97  98  99 100 101 102 103 104 105 106 107 108
 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
 145 146 147 148 149 150 151 152 153 154 155 156]

In [9]:
### Performance Evaluation
def evalPerformance(ytrue, ypred):
    tp = np.sum(ypred[np.where(ytrue ==  1)[0]] == 1)
    fp = np.sum(ypred[np.where(ytrue == -1)[0]] == 1)
    tn = np.sum(ypred[np.where(ytrue == -1)[0]] == -1)
    fn = ytrue.shape[0]-(tp+fp+tn)
    #sys.stderr.write('%d %d %d %d\n'%(tp,fp,tn,fn))
    prec = tp / float(tp + fp)
    recall  = tp / float(tp + fn)
    f1score = 2*tp/float(2*tp + fp + fn)

    return (prec, recall, f1score)

In [10]:
y = pandas.read_table('../data/label_tr.lower.txt', sep=' ', header=None, dtype='int')

print(np.unique(y[1]))

Xsub, ysub = readRandomSample('../data/data_tr.lower.txt', y[1], size=20000, \
                              goodfeat=goodfeatures, acc_miny=ymin, acc_maxy=ymax)

print(np.unique(ysub))

assert(np.sum(ysub < ymin) == 0)
assert(np.sum(ysub > ymax) == 0)
ysub[np.where(ysub < ysplit)[0]] = -1
ysub[np.where(ysub >= ysplit)[0]] =  1

print(np.sum(ysub == -1), np.sum(ysub==1))

#Xsub = Xsub[:, goodfeatures]
Xsub = (Xsub - np.mean(Xsub, axis=0)) / np.std(Xsub, axis=0)

Xsub.shape


[  1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36
  37  38  39  40  41  42  43  44  45  46  47  48  49  50  51  52  53  54
  55  56  57  58  59  60  61  62  63  64  65  66  67  68  69  70  71  72
  73  74  75  76  77  78  79  80  81  82  83  84  85  86  87  88  89  90
  91  92  93  94  95  96  97  98  99 100 101 102 103 104 105 106 107 108
 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
 145 146 147 148 149 150 151 152 153 154 155 156]
[  1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36
  37  38  39  40  41  42  43  44  45  46  47  48  49  50  51  52  53  54
  55  56  57  58  59  60  61  62  63  64  65  66  67  68  69  70  71  72
  73  74  75  76  77  78  79  80  81  82  83  84  85  86  87  88  89  90
  91  92  93  94  95  96  97  98  99 100 101 102 103 104 105 106 107 108
 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
 145 146 147 148 149 150 151 152 153 154 155 156]
(10380, 9620)
Out[10]:
(20000, 64)

Grid-Search (coarse)


In [ ]:
import sklearn.svm

ntot = Xsub.shape[0]
tr_idx = np.random.choice(ntot, size=ntot/2, replace=False)
ts_idx = np.setdiff1d(np.arange(ntot), tr_idx, assume_unique=True)
yts = ysub[ts_idx]

for c in [0.001, 0.01, 0.1, 1.0, 5.0]:
    for gm in [0.001, 0.01, 0.1, 1.0, 5.0]:
        clf = sklearn.svm.SVC(C=c, kernel='rbf', gamma=gm)
        clf.fit(Xsub[tr_idx, :], ysub[tr_idx])
        ypred = clf.predict(Xsub[ts_idx, :])
        prec, recall, f1score = evalPerformance(yts, ypred)
        print ("C=%.4f Gamma=%.4f  ==> Prec:%.3f  Recall:%.3f  F1Score:%.3f"%(c, gm, prec, recall, f1score))


C=0.0010 Gamma=0.0010  ==> Prec:nan  Recall:0.000  F1Score:0.000
C=0.0010 Gamma=0.0100  ==> Prec:nan  Recall:0.000  F1Score:0.000

In [18]:
import sklearn.svm

ntot = Xsub.shape[0]
tr_idx = np.random.choice(ntot, size=ntot/2, replace=False)
ts_idx = np.setdiff1d(np.arange(ntot), tr_idx, assume_unique=True)
yts = ysub[ts_idx]

for c in [1, 2, 5, 8, 10]:
    for gm in [0.005, 0.008, 0.01, 0.015, 0.05, 0.08]:
        clf = sklearn.svm.SVC(C=c, kernel='rbf', gamma=gm)
        clf.fit(Xsub[tr_idx, :], ysub[tr_idx])
        ypred = clf.predict(Xsub[ts_idx, :])
        prec, recall, f1score = evalPerformance(yts, ypred)
        print ("C=%.4f Gamma=%.4f  ==> Prec:%.3f  Recall:%.3f  F1Score:%.3f"%(c, gm, prec, recall, f1score))


C=1.0000 Gamma=0.0050  ==> Prec:0.725  Recall:0.831  F1Score:0.775
C=1.0000 Gamma=0.0080  ==> Prec:0.730  Recall:0.798  F1Score:0.762
C=1.0000 Gamma=0.0100  ==> Prec:0.734  Recall:0.779  F1Score:0.756
C=1.0000 Gamma=0.0150  ==> Prec:0.741  Recall:0.731  F1Score:0.736
C=1.0000 Gamma=0.0500  ==> Prec:0.743  Recall:0.515  F1Score:0.608
C=1.0000 Gamma=0.0800  ==> Prec:0.738  Recall:0.402  F1Score:0.521
C=2.0000 Gamma=0.0050  ==> Prec:0.726  Recall:0.840  F1Score:0.779
C=2.0000 Gamma=0.0080  ==> Prec:0.734  Recall:0.800  F1Score:0.766
C=2.0000 Gamma=0.0100  ==> Prec:0.736  Recall:0.778  F1Score:0.756
C=2.0000 Gamma=0.0150  ==> Prec:0.749  Recall:0.729  F1Score:0.739
C=2.0000 Gamma=0.0500  ==> Prec:0.756  Recall:0.524  F1Score:0.619
C=2.0000 Gamma=0.0800  ==> Prec:0.751  Recall:0.416  F1Score:0.536
C=5.0000 Gamma=0.0050  ==> Prec:0.727  Recall:0.821  F1Score:0.771
C=5.0000 Gamma=0.0080  ==> Prec:0.738  Recall:0.773  F1Score:0.755
C=5.0000 Gamma=0.0100  ==> Prec:0.746  Recall:0.754  F1Score:0.750
C=5.0000 Gamma=0.0150  ==> Prec:0.755  Recall:0.702  F1Score:0.727
C=5.0000 Gamma=0.0500  ==> Prec:0.758  Recall:0.513  F1Score:0.612
C=5.0000 Gamma=0.0800  ==> Prec:0.755  Recall:0.410  F1Score:0.531
C=8.0000 Gamma=0.0050  ==> Prec:0.727  Recall:0.808  F1Score:0.765
C=8.0000 Gamma=0.0080  ==> Prec:0.742  Recall:0.763  F1Score:0.753
C=8.0000 Gamma=0.0100  ==> Prec:0.749  Recall:0.738  F1Score:0.744
C=8.0000 Gamma=0.0150  ==> Prec:0.755  Recall:0.697  F1Score:0.724
C=8.0000 Gamma=0.0500  ==> Prec:0.759  Recall:0.511  F1Score:0.611
C=8.0000 Gamma=0.0800  ==> Prec:0.753  Recall:0.407  F1Score:0.529
C=10.0000 Gamma=0.0050  ==> Prec:0.729  Recall:0.799  F1Score:0.762
C=10.0000 Gamma=0.0080  ==> Prec:0.744  Recall:0.756  F1Score:0.750
C=10.0000 Gamma=0.0100  ==> Prec:0.748  Recall:0.733  F1Score:0.741
C=10.0000 Gamma=0.0150  ==> Prec:0.754  Recall:0.689  F1Score:0.720
C=10.0000 Gamma=0.0500  ==> Prec:0.759  Recall:0.508  F1Score:0.609
C=10.0000 Gamma=0.0800  ==> Prec:0.752  Recall:0.407  F1Score:0.528

In [ ]: