Large Scale Visual Object Recognition

Vahid Mirjalili


In [1]:
import numpy as np
import pandas
import scipy, scipy.spatial
import sklearn
import sys

from matplotlib import pyplot as plt

%matplotlib inline

In [2]:
df = pandas.read_table("../data/data_dev.txt", sep=" ", dtype='int', header=None)

df.head()


Out[2]:
0 1 2 3 4 5 6 7 8 9 ... 890 891 892 893 894 895 896 897 898 899
0 3 0 0 0 2 0 0 2 0 1 ... 0 0 0 0 0 0 0 3 0 2
1 0 0 0 0 0 0 0 1 5 0 ... 0 0 0 0 1 3 2 1 0 2
2 0 0 0 0 0 2 0 0 1 0 ... 0 0 0 0 1 0 0 0 3 0
3 2 0 0 1 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
4 1 0 0 2 27 0 0 1 0 4 ... 0 0 0 0 0 3 0 0 0 1

5 rows × 900 columns


In [3]:
y = pandas.read_table("../data/label_dev.txt", sep=" ", dtype='int', header=None)

y.head()


Out[3]:
0
0 117
1 73
2 161
3 162
4 160

Class Distribution


In [12]:
yl, yc = np.unique(y[0], return_counts=True)

print(yl.shape)
print(np.max(yc))

yc = yc / float(np.sum(yc))

fig1 = plt.figure(1, figsize=(12,6))
ax = fig1.add_subplot(1, 1, 1)
#line1 = plt.plot(yl, yc, 'b-')
line1 = ax.bar(yl, yc, width=1, color='r')
#plt.setp(line1, color='b', linewidth=3, marker='^', markersize=8)
plt.setp(line1, color='b', linewidth=0.1)

plt.setp(ax.get_xticklabels(), rotation='horizontal', fontsize=16)
plt.setp(ax.get_yticklabels(), rotation='vertical', fontsize=16)

plt.xlabel('Class Label', size=20)
plt.ylabel('Distribution', size=20)
plt.title('Class Frequency', size=20)
#plt.legend(['Y1', 'Y2'], loc='upper right') ## not supported by plotly yet

plt.axis([0, 166, 0, 0.14])

plt.show()


(149,)
16224

Split Test and Training Set


In [4]:
#def split_train(df, y):
np.random.seed(seed = 1234)
N, m = df.shape
train_idx = np.random.choice(N, size=0.8*N, replace=False)
test_idx = np.setdiff1d(np.arange(N), train_idx, assume_unique=True)
print(train_idx.shape, test_idx.shape)


((100000,), (25000,))

In [10]:
## KFold Cross-validation:

np.random.seed(seed=1234)

K = 10
ainx = np.random.randint(K, size=N)

for i in range(K):
    test_idx  = np.where(ainx == i)[0]
    train_idx = np.where(ainx != i)[0]
    print("%d %d %d"%(i, test_idx.shape[0], train_idx.shape[0]))


0 12531 112469
1 12500 112500
2 12421 112579
3 12471 112529
4 12525 112475
5 12653 112347
6 12547 112453
7 12467 112533
8 12469 112531
9 12416 112584

Naive Bayes Classification


In [7]:
from sklearn.naive_bayes import MultinomialNB

for alpha in [0.01, 0.1, 0.2, 0.5, 1.0, 2.0, 10.0, 100.0]:
    clf = MultinomialNB(alpha=alpha)
    clf.fit(df.iloc[train_idx,:], y.iloc[train_idx, 0])

    ypred = clf.predict(df.iloc[test_idx,:])
    res = np.sum(ypred == y.iloc[test_idx,0])
    
    print("%f %f"%(alpha, res))
    

for alpha in [0.01, 0.1, 0.2, 0.5, 1.0, 2.0, 10.0, 100.0]:
    clf = MultinomialNB(alpha=alpha, fit_prior=False)
    clf.fit(df.iloc[train_idx,:], y.iloc[train_idx, 0])

    ypred = clf.predict(df.iloc[test_idx,:])
    res = np.sum(ypred == y.iloc[test_idx,0])
    
    print("%f %f"%(alpha, res))


0.010000 2627.000000
0.100000 2630.000000
0.200000 2630.000000
0.500000 2626.000000
1.000000 2629.000000
2.000000 2633.000000
10.000000 2722.000000
100.000000 3992.000000
0.010000 2450.000000
0.100000 2448.000000
0.200000 2446.000000
0.500000 2449.000000
1.000000 2456.000000
2.000000 2462.000000
10.000000 2559.000000
100.000000 3885.000000

In [5]:
def make_SVMliteFormat(fname, X, y):
    assert (X.shape[0] == y.shape[0])
    assert (np.sum(y==1) + np.sum(y==-1) +np.sum(y==0) == y.shape[0])
    
    N, m = X.shape
    with open(fname, 'w') as fp:
        for i in range(N):
            fp.write("%d "%y[i])
            for j in range(m):
                if X.iloc[i,j] != 0:
                    fp.write("%d:%d " %(j+1, X.iloc[i,j]))
            fp.write("\n")
            
    
    Xsub = df.iloc[comb_inx,:]
    ysub =  y.iloc[comb_inx,0].values
    ysub[np.where(ysub == ci)[0]] = 1
    ysub[np.where(ysub != ci)[0]] = -1
    make_SVMliteFormat('../data/class_splits/c_%d.txt'%ci, Xsub, ysub)

Balencing Classes


In [6]:
yuniq = np.unique(y[0])

yuniq_dict = {}

for ci in yuniq:
    yuniq_dict[ci] = np.where(y[0] == ci)[0]

In [41]:
np.random.seed(seed=1234)

cls_dict = {}

for ci in yuniq:
    yinx = np.where(y[0] == ci)[0]
    nci = yinx.shape[0]
    others = np.setdiff1d(np.arange(N), yinx, assume_unique=True)
    comb_inx = yinx
    for cj in yuniq:
        if ci != cj:
            cj_inx = np.unique(np.random.choice(yuniq_dict[cj], size=np.random.uniform(low=0.01*nci, high=0.02*nci)))
            comb_inx = np.hstack([comb_inx, cj_inx])
            #print(cj_inx)
            
    sys.stderr.write("%d "%comb_inx.shape[0])

    np.random.shuffle(comb_inx)
    cls_dict[ci] = comb_inx


457 408 396 476 357 384 380 465 446 426 351 523 486 403 437 435 475 474 460 475 971 920 481 517 430 435 508 1008 470 443 489 441 475 504 498 470 492 548 976 480 1059 569 528 578 1112 1120 585 1171 1102 517 605 1198 645 577 584 1330 653 1163 664 739 647 711 674 656 1050 702 699 1276 717 731 696 1304 834 825 765 833 842 740 746 887 759 870 815 860 908 862 988 988 887 898 947 934 990 972 974 906 1014 1003 1168 1120 1185 1172 1259 1207 1112 1361 1295 1244 1164 1329 1380 1476 1399 1312 1441 1416 1533 1523 1646 1947 1815 1788 2032 2031 2253 2477 2600 2883 3138 3549 3501 3714 4187 4164 3871 4470 4597 5006 6251 7131 8545 11590 12535 16783 21375 22801 23151 30692 40733 

SVM Classifier

Binary Classification: One-vs-All


In [58]:
import sklearn.svm
clf = sklearn.svm.SVC(C=1.0, kernel='rbf', gamma=1.0)

for ci in yuniq[:]:
    tr_idx = np.random.choice(cls_dict[ci], size=0.8*cls_dict[ci].shape[0], replace=False)
    ts_idx = np.setdiff1d(cls_dict[ci], tr_idx, assume_unique=True)
    Xtr = df.iloc[tr_idx,:]
    ytr =  y.iloc[tr_idx,0].values
    
    ytr[np.where(ytr != ci)[0]] = -1
    clf.fit(Xtr, ytr)
    
    Xts = df.iloc[ts_idx,:]
    yts =  y.iloc[ts_idx,0].values
    yts[np.where(yts != ci)[0]] = -1
    ypred = clf.predict(Xts)
    print("Class %d   ==>  %.4f"%(ci, np.sum(yts == ypred)/float(ypred.shape[0])))


Class 1   ==>  0.6413
Class 2   ==>  0.6585
Class 3   ==>  0.6125
Class 4   ==>  0.6354
Class 5   ==>  0.5833
Class 6   ==>  0.6883
Class 7   ==>  0.6711
Class 8   ==>  0.6022
Class 9   ==>  0.6889
Class 10   ==>  0.5581
Class 11   ==>  0.6479
Class 13   ==>  0.6667
Class 14   ==>  0.6327
Class 15   ==>  0.5926
Class 16   ==>  0.6250
Class 17   ==>  0.6782
Class 21   ==>  0.6526
Class 22   ==>  0.6211
Class 24   ==>  0.6304
Class 26   ==>  0.5789
Class 32   ==>  0.6769
Class 33   ==>  0.7283
Class 35   ==>  0.6495
Class 36   ==>  0.6346
Class 37   ==>  0.6047
Class 38   ==>  0.6552
Class 39   ==>  0.5784
Class 40   ==>  0.7178
Class 41   ==>  0.5319
Class 42   ==>  0.6180
Class 43   ==>  0.6939
Class 44   ==>  0.6629
Class 45   ==>  0.6842
Class 46   ==>  0.6535
Class 47   ==>  0.6300
Class 48   ==>  0.6170
Class 49   ==>  0.6768
Class 50   ==>  0.7273
Class 51   ==>  0.6480
Class 52   ==>  0.6042
Class 53   ==>  0.6604
Class 54   ==>  0.6140
Class 56   ==>  0.6792
Class 58   ==>  0.6552
Class 59   ==>  0.6637
Class 60   ==>  0.6964
Class 61   ==>  0.6667
Class 62   ==>  0.6553
Class 63   ==>  0.6787
Class 64   ==>  0.6346
Class 65   ==>  0.6446
Class 66   ==>  0.6542
Class 67   ==>  0.6589
Class 68   ==>  0.6207
Class 70   ==>  0.6752
Class 71   ==>  0.6767
Class 72   ==>  0.6412
Class 73   ==>  0.6867
Class 74   ==>  0.6842
Class 75   ==>  0.6014
Class 76   ==>  0.6462
Class 77   ==>  0.6573
Class 78   ==>  0.5926
Class 79   ==>  0.6742
Class 80   ==>  0.7095
Class 81   ==>  0.6454
Class 82   ==>  0.6714
Class 83   ==>  0.7109
Class 84   ==>  0.6597
Class 85   ==>  0.6463
Class 86   ==>  0.5714
Class 87   ==>  0.6705
Class 88   ==>  0.6527
Class 89   ==>  0.6788
Class 90   ==>  0.6863
Class 91   ==>  0.6287
Class 92   ==>  0.6509
Class 93   ==>  0.6486
Class 94   ==>  0.6533
Class 95   ==>  0.6742
Class 96   ==>  0.6118
Class 97   ==>  0.6494
Class 98   ==>  0.6810
Class 99   ==>  0.6919
Class 100   ==>  0.6538
Class 101   ==>  0.6763
Class 102   ==>  0.6768
Class 103   ==>  0.7222
Class 104   ==>  0.6573
Class 105   ==>  0.5778
Class 106   ==>  0.5789
Class 107   ==>  0.6417
Class 108   ==>  0.6667
Class 109   ==>  0.6769
Class 110   ==>  0.6718
Class 111   ==>  0.6703
Class 112   ==>  0.6552
Class 113   ==>  0.7015
Class 114   ==>  0.6111
Class 115   ==>  0.6339
Class 116   ==>  0.6498
Class 117   ==>  0.6553
Class 118   ==>  0.7262
Class 119   ==>  0.6322
Class 120   ==>  0.6143
Class 121   ==>  0.6777
Class 122   ==>  0.6486
Class 123   ==>  0.6345
Class 124   ==>  0.6910
Class 125   ==>  0.7030
Class 126   ==>  0.6703
Class 127   ==>  0.7162
Class 128   ==>  0.6643
Class 129   ==>  0.7148
Class 130   ==>  0.6678
Class 131   ==>  0.6655
Class 132   ==>  0.6808
Class 133   ==>  0.6426
Class 134   ==>  0.6818
Class 135   ==>  0.6692
Class 136   ==>  0.7163
Class 137   ==>  0.6592
Class 138   ==>  0.6560
Class 139   ==>  0.6830
Class 140   ==>  0.6608
Class 141   ==>  0.6532
Class 142   ==>  0.6750
Class 143   ==>  0.6412
Class 144   ==>  0.6911
Class 145   ==>  0.6887
Class 146   ==>  0.6690
Class 147   ==>  0.6743
Class 148   ==>  0.7029
Class 149   ==>  0.6855
Class 150   ==>  0.6619
Class 151   ==>  0.6969
Class 152   ==>  0.6913
Class 153   ==>  0.6766
Class 154   ==>  0.6978
Class 155   ==>  0.6882
Class 156   ==>  0.6870
Class 157   ==>  0.6782
Class 158   ==>  0.6749
Class 159   ==>  0.6524
Class 160   ==>  0.6386
Class 161   ==>  0.6558
Class 162   ==>  0.6420
Class 163   ==>  0.6255
Class 164   ==>  0.6006

In [50]:
#np.sum(yt[np.where(yt == 1)[0]] == 1)
#np.sum(clf.predict(df.iloc[test_idx[:],:]) == 1)

ypred = clf.predict(df.iloc[test_idx[:],:])

ri = np.where(y.iloc[test_idx[:],0] == 1)

ypred[ri]


Out[50]:
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])

Dimensionality Reduction: Randomized PCA


In [ ]:


In [ ]:
from sklearn.decomposition import RandomizedPCA

rpca = RandomizedPCA()
rpca.fit(df)
print(rpca.explained_variance_ratio_)

In [9]:
from sklearn.decomposition import PCA
pca = PCA(n_components = 100)
pca.fit(df)
print(pca.explained_variance_ratio_)


[ 0.1090537   0.09269665  0.03314127  0.02559689  0.02315678  0.02055335
  0.01874672  0.01811402  0.01633916  0.01467925  0.0134548   0.01288432
  0.01182419  0.01023215  0.00996661  0.00904175  0.00831502  0.00810684
  0.00793453  0.00783997  0.00767987  0.00720876  0.00698262  0.00680726
  0.0065862   0.00623376  0.00582674  0.00557831  0.00540425  0.00538957
  0.0052885   0.00507574  0.00500071  0.00492366  0.00482907  0.00456399
  0.0044275   0.00422572  0.00406956  0.00404317  0.00397967  0.00386999
  0.00380222  0.00366672  0.0036173   0.00355934  0.0035096   0.00337614
  0.00334732  0.0032302   0.0031734   0.00311737  0.0030717   0.00300711
  0.00294291  0.00287181  0.00281295  0.00279004  0.00274067  0.00265329
  0.00260306  0.00258026  0.00253844  0.00247556  0.00240785  0.00239004
  0.00236809  0.00227202  0.00225695  0.00223831  0.00220435  0.00216181
  0.00211199  0.00207557  0.00205412  0.00198485  0.00197085  0.00195242
  0.00193161  0.00188504  0.00184469  0.00183054  0.00182035  0.00179624
  0.0017916   0.00173094  0.00171015  0.0016956   0.00165406  0.00161785
  0.00157301  0.0015629   0.00154312  0.00153126  0.00151692  0.001508
  0.00149214  0.00147004  0.00146304  0.0014454 ]

In [10]:
np.sum(pca.explained_variance_ratio_)


Out[10]:
0.72402368503209713

kNN (nearest neighbor)


In [13]:
import sys
sys.setrecursionlimit(10000)

for i in range(1):
    Xtrain = df.iloc[ainx == i,:300]
    print(i, Xtrain.shape)
    kdt1 = scipy.spatial.KDTree(Xtrain, leafsize=200)    

    print(kdt1)


(0, (12531, 300))
<scipy.spatial.kdtree.KDTree object at 0x7ffb5a1dff50>

In [21]:
ytrain = y.iloc[ainx == i,0]

Xtest = df.iloc[ainx == 1,:300]
ytest = y.iloc[ainx == 1,0]

In [31]:
for j in range(10):
    q = kdt1.query(Xtest.iloc[j,:], k=4)
    print(q[0])
    print(ytest.iloc[j], ytrain.iloc[q[1]].values)


[ 3.          5.83095189         nan         nan]
(115, array([ 58, 113, 152, 124]))
[ 14.86606875  16.61324773  16.76305461  17.20465053]
(164, array([157,  51, 160, 158]))
[ 2.23606798  7.28010989  7.41619849  9.2736185 ]
(62, array([159, 161, 161, 164]))
[ 4.79583152  6.8556546   7.07106781  7.61577311]
(110, array([ 90, 161, 144, 113]))
[ 18.62793601  18.81488772  18.94729532  19.4422221 ]
(162, array([164, 164, 148, 164]))
[ 13.3041347   14.59451952  14.93318452  15.58845727]
(158, array([164, 162, 155, 162]))
[  8.           9.21954446   9.48683298  10.04987562]
(145, array([162,  83, 164, 164]))
[  9.79795897  11.44552314  12.60952021          nan]
(163, array([113, 113, 159,  90]))
[  2.82842712  10.14889157  11.3137085   12.28820573]
(157, array([152, 150, 160, 164]))
[ 16.73320053  16.82260384  17.43559577          nan]
(162, array([161, 133, 114, 124]))

Space Transformation

Class Centers


In [8]:
labels = np.unique(y.iloc[train_idx,0], return_counts=False)
labels


Out[8]:
array([  1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  13,  14,
        15,  16,  17,  21,  22,  24,  26,  32,  33,  35,  36,  37,  38,
        39,  40,  41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,
        52,  53,  54,  56,  58,  59,  60,  61,  62,  63,  64,  65,  66,
        67,  68,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,  80,
        81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,  93,
        94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105, 106,
       107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
       120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
       133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
       146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
       159, 160, 161, 162, 163, 164])

In [18]:
center = np.empty(shape=(labels.shape[0], m), dtype=float)

Xtrain = df.iloc[train_idx, :]
ytrain = y.iloc[train_idx, :]

n = 0
for i in labels:
    mvec = Xtrain.iloc[np.where(ytrain[0] == i)[0], :].median().values
    center[n, :] = mvec
    
    n += 1

center.shape


Out[18]:
(149, 900)

In [23]:
import vecspace

ref = np.random.uniform(low=0.0, high=10.0, size=100*900).reshape((100,900))

vr = vecspace.Vectorify(ref, metric='euclidean')

XtrainVec = np.empty(shape=(Xtrain.shape[0], ref.shape[0]), dtype=float)

for i in np.arange(Xtrain.shape[0]):
    XtrainVec[i,:] = vr.vectorize(Xtrain.iloc[i,:])

In [20]:
np.savetxt('../data/trainvec.txt', XtrainVec, fmt='%.2f', delimiter=' ')

In [ ]: