In [96]:
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
import numpy as np
from skimage.io import imread, imshow, imsave
from skimage.feature import blob_doh
from skimage.color import rgb2gray
from skimage.transform import resize
from glob import glob
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split

In [2]:
%matplotlib inline

In [3]:
trainlabel = pd.read_csv('../Data/train.csv')

In [4]:
whaleIDset = set(trainlabel['whaleID'])

In [5]:
print(len(whaleIDset), len(trainlabel))


(447, 4544)

Make the training set


In [8]:
dloc = '../../BigData/kaggle-right-whale/imgs/'

In [82]:
features = []
foundlist = []
for i in range(0, 6000):
    si = str(i)
    tailloc = 'w_' + si + '_small_verysmall.jpg'
    dfile = dloc + tailloc
    if si[0] < '6':
        if 'w_' + si + '.jpg' in trainlabel['Image'].values:
            imdata = imread(dfile)
            features.append(imdata.flatten())
            trainlabelindex = trainlabel['Image'].values == 'w_' + si + '.jpg'
            trainlabelvalue = trainlabel['whaleID'].values[trainlabelindex][0]
            foundlist.append(trainlabelvalue)

In [83]:
print(len(foundlist), len(set(foundlist)))


(2662, 429)

In [84]:
features = np.array(features)

In [85]:
features.shape


Out[85]:
(2662, 1800)

In [86]:
labels = trainlabel['whaleID']

In [87]:
vectorizer = CountVectorizer(min_df=1)

In [88]:
X = features
y = vectorizer.fit_transform(foundlist).toarray()

In [89]:
y.shape


Out[89]:
(2662, 429)

In [90]:
clf = RandomForestClassifier(n_estimators = 10)

In [101]:
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)

In [104]:
clf.fit(Xtrain, ytrain)


Out[104]:
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
            max_depth=None, max_features='auto', max_leaf_nodes=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
            oob_score=False, random_state=None, verbose=0,
            warm_start=False)

In [105]:
ypredict = clf.predict(Xtest)

In [112]:
def llfun(act, pred):
    epsilon = 1e-15
    toohigh = pred > 1 - epsilon
    pred[toohigh] = 1 - epsilon
    toolow = pred < epsilon
    pred[toolow] = epsilon
    ll = sum(act * np.log(pred) + (1 - act)*np.log(1 - pred))
    ll = ll * -1.0/len(act)
    return ll

In [113]:
classbyclass = []
nclasses = ypredict[0, :].size
for i in range(nclasses):
    result = llfun(ytest[:, i], ypredict[:, i])
    classbyclass.append(result)

In [116]:
sum(classbyclass)


Out[116]:
34.538776394910919

In [95]:
scores = cross_val_score(clf, X, y, cv=3, scoring='log_loss')
print(scores, scores.mean(), scores.std())


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-95-c7df15bf5f59> in <module>()
----> 1 scores = cross_val_score(clf, X, y, cv=3, scoring='log_loss')
      2 print(scores, scores.mean(), scores.std())

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/cross_validation.pyc in cross_val_score(estimator, X, y, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch)
   1359                                               train, test, verbose, None,
   1360                                               fit_params)
-> 1361                       for train, test in cv)
   1362     return np.array(scores)[:, 0]
   1363 

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.pyc in __call__(self, iterable)
    657             self._iterating = True
    658             for function, args, kwargs in iterable:
--> 659                 self.dispatch(function, args, kwargs)
    660 
    661             if pre_dispatch == "all" or n_jobs == 1:

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.pyc in dispatch(self, func, args, kwargs)
    404         """
    405         if self._pool is None:
--> 406             job = ImmediateApply(func, args, kwargs)
    407             index = len(self._jobs)
    408             if not _verbosity_filter(index, self.verbose):

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.pyc in __init__(self, func, args, kwargs)
    138         # Don't delay the application, to avoid keeping the input
    139         # arguments in memory
--> 140         self.results = func(*args, **kwargs)
    141 
    142     def get(self):

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/cross_validation.pyc in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, error_score)
   1476 
   1477     else:
-> 1478         test_score = _score(estimator, X_test, y_test, scorer)
   1479         if return_train_score:
   1480             train_score = _score(estimator, X_train, y_train, scorer)

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/cross_validation.pyc in _score(estimator, X_test, y_test, scorer)
   1532         score = scorer(estimator, X_test)
   1533     else:
-> 1534         score = scorer(estimator, X_test, y_test)
   1535     if not isinstance(score, numbers.Number):
   1536         raise ValueError("scoring must return a number, got %s (%s) instead."

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/metrics/scorer.pyc in __call__(self, clf, X, y, sample_weight)
    121                                                  **self._kwargs)
    122         else:
--> 123             return self._sign * self._score_func(y, y_pred, **self._kwargs)
    124 
    125     def _factory_args(self):

/Users/rbussman/anaconda/lib/python2.7/site-packages/sklearn/metrics/classification.pyc in log_loss(y_true, y_pred, eps, normalize, sample_weight)
   1406 
   1407     # Clipping
-> 1408     Y = np.clip(y_pred, eps, 1 - eps)
   1409 
   1410     # This happens in cases when elements in y_pred have type "str".

/Users/rbussman/anaconda/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc in clip(a, a_min, a_max, out)
   1625         clip = a.clip
   1626     except AttributeError:
-> 1627         return _wrapit(a, 'clip', a_min, a_max, out)
   1628     return clip(a_min, a_max, out)
   1629 

/Users/rbussman/anaconda/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc in _wrapit(obj, method, *args, **kwds)
     43     except AttributeError:
     44         wrap = None
---> 45     result = getattr(asarray(obj), method)(*args, **kwds)
     46     if wrap:
     47         if not isinstance(result, mu.ndarray):

/Users/rbussman/anaconda/lib/python2.7/site-packages/numpy/core/numeric.pyc in asarray(a, dtype, order)
    460 
    461     """
--> 462     return array(a, dtype, copy=False, order=order)
    463 
    464 def asanyarray(a, dtype=None, order=None):

ValueError: could not broadcast input array from shape (888,2) into shape (888)

In [11]:
clf.fit(X, y)


Out[11]:
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
            max_depth=None, max_features='auto', max_leaf_nodes=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
            oob_score=False, random_state=None, verbose=0,
            warm_start=False)

In [12]:
test = pd.read_csv(dloc + 'test.csv')

In [14]:
Xtest = test

In [15]:
ypredict = clf.predict(Xtest)

In [16]:
ypredict


Out[16]:
array([2, 0, 9, ..., 3, 9, 2])

In [17]:
dfpredict = pd.DataFrame(ypredict)

In [22]:
dfpredict.columns = ['Label']

In [26]:
dfpredict['ImageId'] = np.arange(28000) + 1

In [27]:
dfpredict.to_csv(dloc + 'predict_RFbenchmark.csv', index=False)

In [80]:
files = glob('../../BigData/kaggle-right-whale/imgs/*_small.jpg')
for file in files:
    im1 = imread(file)
    imsmall = resize(im1, (20,30,3))
    splitfile = os.path.splitext(file)
    fsmall = splitfile[0] + '_verysmall' + splitfile[1]
    imsave(fsmall, imsmall)

In [ ]: