In [1]:
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
import numpy as np
import cv2
import urllib
from sklearn import cross_validation
import pandas as pd
from matplotlib import pyplot as plt

In [2]:
# Data is the Crowdflower.com Dress Patterns data set.
# https://www.crowdflower.com/wp-content/uploads/2016/07/dress_patterns.csv
# load dataset and one-hot encode output classes.
imgdf = pd.read_csv('dress_patterns.csv', header=0)
categories = pd.get_dummies(imgdf[['category']]).as_matrix()

In [48]:
#import glob
#filenames = glob.glob('/home/nick/Documents/LewisUniversity/MachineLearning/Project/images/*.png')

In [49]:
#fn2=[]
#for g in filenames:
#    fn2.append(g[-40:])

In [50]:
#imgdf.image_url[3][-40:] in fn2


Out[50]:
True

In [52]:
#for u in imgdf.image_url:
#    if u[-40:] in fn2:
#        continue
#    else:
#        #url = urllib.urlopen(u)
#        #resource = url.read()
#        #outfile = open("/home/nick/Documents/LewisUniversity/MachineLearning/Project/" + u[-40:],"wb")
#        #outfile.write(resource)
#        #outfile.close()
#        f = open('/home/nick/Documents/LewisUniversity/MachineLearning/Project/' + u[-40:] + '.txt', 'w')
#        f.write('file Not found')
#        f.close()

In [60]:
##imgdf.image_url[15701]
#filelist = pd.DataFrame(filenames)

In [5]:
#print X.shape
#print categories.shape
#print A.shape

#f = open('/home/nick/Documents/LewisUniversity/MachineLearning/Project/status/statuses.txt', 'a')
#f.write('hello'+'world'+'\n')
#f.close()

In [70]:
#dl = imgdf.image_url
#for u in fn2:
#    if 
#dl[1][-40:]


Out[70]:
'ca5ca27caca94f9fb0617c226477ae35.jpg.png'

In [75]:
#for locs in imgdf.image_url[0:2]:
#            fn = "/home/nick/Documents/LewisUniversity/MachineLearning/Project/images/" + locs[-40:]
#            url = urllib.urlopen(fn)
#            resource = url.read()
#            imgarr = np.asarray(bytearray(resource), dtype=np.uint8)
#            img = cv2.imdecode(imgarr,-1)
#            cv2.imshow("images", img)
#            cv2.waitKey(0)
#            cv2.destroyAllWindows()

In [3]:
redness = [0,0,255]
bound = np.array(redness, dtype = "uint8")

In [183]:
X = np.empty(shape=[1]+[3]+[50]+[50], dtype='float32')
A = np.empty(shape=[0]+[3]+[50]+[50], dtype='float32')


for locs in imgdf.image_url:
    
       try:
            fn = "/home/nick/Documents/LewisUniversity/MachineLearning/Project/images/" + locs[-40:]
            url = urllib.urlopen(fn)
            resource = url.read()
            imgarr = np.asarray(bytearray(resource), dtype=np.uint8)
            img = cv2.imdecode(imgarr,-1)
            
            #save the file locally too
            #outfile = open(locs[-40:],"wb")
            #outfile.write(resource)
            #outfile.close()

            # find the colors within the specified boundaries and apply
            mask = cv2.inRange(img, bound, bound)
            output = cv2.bitwise_and(img, img, mask = mask)

            # find the corners using goodFeaturesToTrack
            gray = cv2.cvtColor(output,cv2.COLOR_BGR2GRAY)
            corners = cv2.goodFeaturesToTrack(gray,8,0.01,10)
            corners = np.int0(corners)

            # Get the min and max corners for our rectangle definition.
            x1 = None
            x2 = None
            y1 = None
            y2 = None
            for i in corners:
                a = i[0][0]
                b = i[0][1]
                if x1 == None:
                    x1 = a
                    x2 = a
                if y1 == None:
                    y1 = b
                    y2 = b
                if x1 != None and a < x1:
                    x1 = a
                if x2 != None and a > x2:
                    x2 = a
                if y1 != None and b < y1:
                    y1 = b
                if y2 != None and b > y2:
                    y2 = b

            #crop the image to a square based on the middle of rectangle.
            # This is so we can have consistent shaped data across all observations.
            if (y2-y1) < (x2-x1):
                ymin = y1
                ymax = y2
                xmin = x1+(((x2-x1)-(y2-y1))/2)
                xmax = x2-(((x2-x1)-(y2-y1))/2)
            else:
                ymin = y1+(((y2-y1)-(x2-x1))/2)
                ymax = y2-(((y2-y1)-(x2-x1))/2)
                xmin = x1
                xmax = x2

            # show computed image range and display image
            #print ymin, ymax, xmin, xmax, ymax-ymin, xmax-xmin    
            crop_img = img[ymin:ymax, xmin:xmax] # Crop image

            # resize image to scaled 50 by 50
            resized_img = cv2.resize(crop_img, (50, 50)) 
            #hsv_img = cv2.cvtColor(resized_img, cv2.COLOR_RGB2HSV)

            # Had messed with using HSV color format, but didn't match with Keras examples.  Back to RGB.
            X_temp = resized_img/255.
            
            # re-arrange array to be a set of 3,50,50 instead of 50,50,3 for the RGB images.
            for i in range(3):
                for j in range(50):
                    for k in range(50):
                        X[0,i,j,k] = X_temp[j,k,i]
            
            
            #X = X_temp[:,:,:].flatten()
            #Append the record to the array.
            
            A = np.vstack([A, X])
            
            #Save a status so we can see how we are doing
            f = open('/home/nick/Documents/LewisUniversity/MachineLearning/Project/status/statuses.txt', 'a')
            f.write(locs[-40:]+'\n')
            f.close()
            

       except:
            #print "Bad file. ", fn
            f = open('/home/nick/Documents/LewisUniversity/MachineLearning/Project/status/badimages.txt', 'a')
            f.write(locs+'\n')
            f.close()

 

            #cv2.imshow("images", img)
            #cv2.waitKey(0)
            #cv2.destroyAllWindows()

In [184]:
A.shape


Out[184]:
(15615, 3, 50, 50)

In [185]:
# Because of a few bad images (grayscale, red rectangle on only 2 sides, etc.), need to remove these from the label set y.

badimages = pd.read_csv('/home/nick/Documents/LewisUniversity/MachineLearning/Project/status/badimages.txt', header=None)

#categories = pd.get_dummies(imgdf[['category']]).as_matrix()
#pd.get_dummies(imgdf[['category']])

cleanup = imgdf['image_url'].isin( badimages[0])
categories = pd.get_dummies(imgdf.loc[~cleanup].category).as_matrix()

In [8]:
#imgdf.loc[~cleanup]
#imgdf.category [~badimages]
#badimages

In [48]:
#imgdf.loc[~cleanup].category.shape
#categories[5]
#predicted[5]
#imgdf[['category']]
#categories
#pd.get_dummies(imgdf.loc[~cleanup].category)
#categories.idxmax(1)
#pd.DataFrame(categories).idxmax(1)
#categories.shape
#y_test.shape
#pd.DataFrame(y_test).idxmax(1)

In [187]:
#save the data set
#print locs[-40:]
np.save('/home/nick/Documents/LewisUniversity/MachineLearning/Project/visionmatrix',A)

In [188]:
#print X.shape
print categories.shape
print A.shape


(15615, 17)
(15615, 3, 50, 50)

In [ ]:


In [190]:
# split up data into train and test sets.
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    A, categories, test_size=0.3, random_state=20)

In [ ]:


In [191]:
print X_train.shape
print y_train.shape
print X_test.shape
print y_test.shape


(10930, 3, 50, 50)
(10930, 17)
(4685, 3, 50, 50)
(4685, 17)

Week 7: Implement Model


In [6]:
# re-load the saved data if needed
A = np.load('/home/nick/Documents/LewisUniversity/MachineLearning/Project/visionmatrix.npy')

In [192]:
#Let's start with the model parameters defined in the Week6 notebook for this data, changing the input shape as appropriate.
from keras.models import Sequential
from keras.layers import Dense, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.regularizers import l2, l1
from keras.optimizers import SGD

from keras import backend as K
K.set_image_dim_ordering('th')

model = Sequential()
model.add(Convolution2D(32, 5, 5, border_mode='valid', input_shape=(3, 50, 50), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(17, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [193]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
convolution2d_16 (Convolution2D) (None, 32, 46, 46)    2432        convolution2d_input_11[0][0]     
____________________________________________________________________________________________________
maxpooling2d_3 (MaxPooling2D)    (None, 32, 23, 23)    0           convolution2d_16[0][0]           
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 32, 23, 23)    0           maxpooling2d_3[0][0]             
____________________________________________________________________________________________________
flatten_12 (Flatten)             (None, 16928)         0           dropout_3[0][0]                  
____________________________________________________________________________________________________
dense_20 (Dense)                 (None, 128)           2166912     flatten_12[0][0]                 
____________________________________________________________________________________________________
dense_21 (Dense)                 (None, 17)            2193        dense_20[0][0]                   
====================================================================================================
Total params: 2171537
____________________________________________________________________________________________________

In [194]:
#Because of time constraints, we'll just run 10 epochs instead of say, 20.
model = model.fit(X_train, y_train,  batch_size = 256,
          nb_epoch = 10, verbose=2, validation_data=(X_test,y_test))


Train on 10930 samples, validate on 4685 samples
Epoch 1/10
49s - loss: 1.8180 - acc: 0.5196 - val_loss: 1.5847 - val_acc: 0.5895
Epoch 2/10
48s - loss: 1.4970 - acc: 0.6005 - val_loss: 1.4963 - val_acc: 0.6239
Epoch 3/10
51s - loss: 1.3796 - acc: 0.6298 - val_loss: 1.4129 - val_acc: 0.6356
Epoch 4/10
52s - loss: 1.3313 - acc: 0.6319 - val_loss: 1.4162 - val_acc: 0.6211
Epoch 5/10
54s - loss: 1.2337 - acc: 0.6573 - val_loss: 1.4069 - val_acc: 0.6442
Epoch 6/10
51s - loss: 1.1610 - acc: 0.6687 - val_loss: 1.5140 - val_acc: 0.6090
Epoch 7/10
52s - loss: 1.1224 - acc: 0.6770 - val_loss: 1.4472 - val_acc: 0.6333
Epoch 8/10
51s - loss: 1.0273 - acc: 0.7057 - val_loss: 1.4351 - val_acc: 0.6427
Epoch 9/10
49s - loss: 0.9726 - acc: 0.7134 - val_loss: 1.4945 - val_acc: 0.6301
Epoch 10/10
51s - loss: 0.8751 - acc: 0.7409 - val_loss: 1.4887 - val_acc: 0.6312

In [195]:
predicted = model.model.predict_classes(X_test)


4685/4685 [==============================] - 7s     

In [196]:
y_train.shape


Out[196]:
(10930, 17)

In [197]:
predicted


Out[197]:
array([9, 4, 9, ..., 9, 9, 9])

In [198]:
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix

# put the y_test back into a format of non-one-hot encoded for comparison
y_test_orig = pd.DataFrame(y_test).idxmax(1)
print "Convolution Network Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Convolution Network Results
Confusion Matrix: 
[[   5    6    2    0   44    0    0    0    0   64    3    0    0    0
     0    4    1]
 [   5   13    3    0   41    0    0    0    0   20    5    0    0    0
     0    7    2]
 [   0    1    2    0   38    0    0    0    0   26    3    0    0    0
     0    3    0]
 [   1    1    0    0   14    0    0    0    0   11    4    0    0    0
     0    4    0]
 [  18   30   13    0  443    1    0    0    0  228   14    0    0    4
     0   52   13]
 [   1    9    0    0   27    0    0    0    0   35    4    0    0    0
     0    8    2]
 [   0    0    0    0    4    0    0    0    0    6    0    0    0    0
     0    5    1]
 [   1    1    1    0   47    1    0    0    0   58    1    0    0    0
     0    3    2]
 [   1    0    0    0    3    0    0    0    2   15    0    0    0    0
     0    1    2]
 [  12    6    4    0   88    0    0    0    0 2385    5    0    0    1
     0   13    9]
 [   4    7    2    0   70    1    0    0    0   72   21    0    0    1
     0   16    1]
 [   0    1    0    0   14    0    0    0    0   18    0    0    0    0
     0    0    0]
 [   0    0    0    0    1    0    0    0    0    1    0    0    0    0
     0    0    0]
 [   2    7    4    0   47    0    0    0    0   58    4    0    0    2
     0   16    3]
 [   1    1    2    0    3    0    0    0    0    8    0    0    0    0
     1    3    0]
 [   2    7    1    0   43    0    0    0    0   67    4    0    0    1
     0   79    2]
 [   5    8    3    0   92    1    0    0    0   47    2    0    0    1
     0   11    4]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.09      0.04      0.05       129
          1       0.13      0.14      0.13        96
          2       0.05      0.03      0.04        73
          3       0.00      0.00      0.00        35
          4       0.43      0.54      0.48       816
          5       0.00      0.00      0.00        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       1.00      0.08      0.15        24
          9       0.76      0.95      0.85      2523
         10       0.30      0.11      0.16       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.20      0.01      0.03       143
         14       1.00      0.05      0.10        19
         15       0.35      0.38      0.37       206
         16       0.10      0.02      0.04       174

avg / total       0.54      0.63      0.57      4685

This model is by no means great, but it does predict with .63 recall and .54 precision.


In [199]:
# Let's try with the Keras documentation example
# apply a 5x5 convolution with 32 output filters on a 50x50 image:
model = Sequential()
model.add(Convolution2D(16, 3, 3, border_mode='valid', input_shape=(3, 50, 50), activation='relu'))

# add a 3x3 convolution on top, with 16 output filters:
#model.add(Convolution2D(16, 3, 3, border_mode='same', activation='relu'))

# and flatten it and add a dense layer to get it to match the 17 categories.
model.add(Flatten())
model.add(Dense(17, activation='relu'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [200]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
convolution2d_17 (Convolution2D) (None, 16, 48, 48)    448         convolution2d_input_12[0][0]     
____________________________________________________________________________________________________
flatten_13 (Flatten)             (None, 36864)         0           convolution2d_17[0][0]           
____________________________________________________________________________________________________
dense_22 (Dense)                 (None, 17)            626705      flatten_13[0][0]                 
====================================================================================================
Total params: 627153
____________________________________________________________________________________________________

In [201]:
model = model.fit(X_train, y_train,  batch_size = 256,
          nb_epoch = 5, verbose=2, validation_data=(X_test,y_test))


Train on 10930 samples, validate on 4685 samples
Epoch 1/5
15s - loss: 4.6151 - acc: 0.5170 - val_loss: 4.3438 - val_acc: 0.5385
Epoch 2/5
15s - loss: 4.3493 - acc: 0.5313 - val_loss: 4.2742 - val_acc: 0.5385
Epoch 3/5
16s - loss: 4.3029 - acc: 0.5313 - val_loss: 4.2773 - val_acc: 0.5385
Epoch 4/5
16s - loss: 4.2947 - acc: 0.5313 - val_loss: 4.2739 - val_acc: 0.5385
Epoch 5/5
17s - loss: 4.2916 - acc: 0.5313 - val_loss: 4.2709 - val_acc: 0.5385

In [202]:
predicted = model.model.predict_classes(X_test)


4672/4685 [============================>.] - ETA: 0s

In [203]:
# put the y_test back into a format of non-one-hot encoded for comparison
y_test_orig = pd.DataFrame(y_test).idxmax(1)
print "Convolution Network Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Convolution Network Results
Confusion Matrix: 
[[   0    0    0    0    0    0    0    0    0  129    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   96    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   73    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   35    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  816    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   86    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   16    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  115    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   24    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0 2523    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  195    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   33    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0    2    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  143    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   19    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  206    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  174    0    0    0    0
     0    0    0]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.00      0.00      0.00       129
          1       0.00      0.00      0.00        96
          2       0.00      0.00      0.00        73
          3       0.00      0.00      0.00        35
          4       0.00      0.00      0.00       816
          5       0.00      0.00      0.00        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       0.00      0.00      0.00        24
          9       0.54      1.00      0.70      2523
         10       0.00      0.00      0.00       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.00      0.00      0.00       143
         14       0.00      0.00      0.00        19
         15       0.00      0.00      0.00       206
         16       0.00      0.00      0.00       174

avg / total       0.29      0.54      0.38      4685

This is a much worse model, and it is always predicting a value of 7.


In [205]:
# Let's try something totally different, a neural network based on the homework from week 5: 
model = Sequential()
model.add(Flatten(input_shape=(3, 50, 50)))
model.add(Dense(output_dim=100,
                activation='sigmoid', W_regularizer=l2(0.01)))
model.add(Dense(output_dim=500, activation='sigmoid', W_regularizer=l2(0.01)))
model.add(Dense(output_dim=17, activation='sigmoid', W_regularizer=l2(0.01)))


# Compile model
sgd = SGD(lr=0.1)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

In [206]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
flatten_15 (Flatten)             (None, 7500)          0           flatten_input_4[0][0]            
____________________________________________________________________________________________________
dense_26 (Dense)                 (None, 100)           750100      flatten_15[0][0]                 
____________________________________________________________________________________________________
dense_27 (Dense)                 (None, 500)           50500       dense_26[0][0]                   
____________________________________________________________________________________________________
dense_28 (Dense)                 (None, 17)            8517        dense_27[0][0]                   
====================================================================================================
Total params: 809117
____________________________________________________________________________________________________

In [207]:
# Fit the model
model = model.fit(X_train, y_train, batch_size = 256,
          nb_epoch = 20, verbose=2, validation_data=(X_test,y_test))


Train on 10930 samples, validate on 4685 samples
Epoch 1/20
1s - loss: 5.5208 - acc: 0.5178 - val_loss: 1.7018 - val_acc: 0.5385
Epoch 2/20
1s - loss: 4.8105 - acc: 0.5313 - val_loss: 1.6959 - val_acc: 0.5385
Epoch 3/20
1s - loss: 4.3234 - acc: 0.5313 - val_loss: 1.6966 - val_acc: 0.5385
Epoch 4/20
1s - loss: 3.9117 - acc: 0.5313 - val_loss: 1.6977 - val_acc: 0.5385
Epoch 5/20
1s - loss: 3.5659 - acc: 0.5313 - val_loss: 1.6968 - val_acc: 0.5385
Epoch 6/20
1s - loss: 3.2751 - acc: 0.5313 - val_loss: 1.6926 - val_acc: 0.5385
Epoch 7/20
1s - loss: 3.0324 - acc: 0.5313 - val_loss: 1.6932 - val_acc: 0.5385
Epoch 8/20
1s - loss: 2.8245 - acc: 0.5313 - val_loss: 1.6977 - val_acc: 0.5385
Epoch 9/20
1s - loss: 2.6517 - acc: 0.5313 - val_loss: 1.6941 - val_acc: 0.5385
Epoch 10/20
1s - loss: 2.5054 - acc: 0.5313 - val_loss: 1.6951 - val_acc: 0.5385
Epoch 11/20
1s - loss: 2.3836 - acc: 0.5313 - val_loss: 1.6920 - val_acc: 0.5385
Epoch 12/20
1s - loss: 2.2798 - acc: 0.5313 - val_loss: 1.6921 - val_acc: 0.5385
Epoch 13/20
1s - loss: 2.1941 - acc: 0.5313 - val_loss: 1.7108 - val_acc: 0.5385
Epoch 14/20
1s - loss: 2.1216 - acc: 0.5313 - val_loss: 1.6953 - val_acc: 0.5385
Epoch 15/20
1s - loss: 2.0573 - acc: 0.5313 - val_loss: 1.6970 - val_acc: 0.5385
Epoch 16/20
1s - loss: 2.0051 - acc: 0.5313 - val_loss: 1.7081 - val_acc: 0.5385
Epoch 17/20
1s - loss: 1.9616 - acc: 0.5313 - val_loss: 1.6993 - val_acc: 0.5385
Epoch 18/20
1s - loss: 1.9246 - acc: 0.5313 - val_loss: 1.6971 - val_acc: 0.5385
Epoch 19/20
1s - loss: 1.8942 - acc: 0.5313 - val_loss: 1.7015 - val_acc: 0.5385
Epoch 20/20
1s - loss: 1.8683 - acc: 0.5313 - val_loss: 1.6971 - val_acc: 0.5385

In [208]:
predicted = model.model.predict_classes(X_test)


4685/4685 [==============================] - 0s     

In [209]:
# put the y_test back into a format of non-one-hot encoded for comparison
y_test_orig = pd.DataFrame(y_test).idxmax(1)
print "Neural Network Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Neural Network Results
Confusion Matrix: 
[[   0    0    0    0    0    0    0    0    0  129    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   96    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   73    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   35    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  816    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   86    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   16    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  115    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   24    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0 2523    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  195    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   33    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0    2    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  143    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   19    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  206    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  174    0    0    0    0
     0    0    0]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.00      0.00      0.00       129
          1       0.00      0.00      0.00        96
          2       0.00      0.00      0.00        73
          3       0.00      0.00      0.00        35
          4       0.00      0.00      0.00       816
          5       0.00      0.00      0.00        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       0.00      0.00      0.00        24
          9       0.54      1.00      0.70      2523
         10       0.00      0.00      0.00       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.00      0.00      0.00       143
         14       0.00      0.00      0.00        19
         15       0.00      0.00      0.00       206
         16       0.00      0.00      0.00       174

avg / total       0.29      0.54      0.38      4685

Again, this is a lousy model like the previous one, as it just predicts 9.


In [210]:
# let's try logistic regression.
# Stochastic Logistic Regression
model = Sequential()

# validation loss 
model.add(Flatten(input_shape=(3, 50, 50)))
model.add(Dense(output_dim=17, activation='sigmoid', W_regularizer=l1(0.01)))

# Compile model
sgd = SGD(lr=0.1)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])

In [211]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
flatten_16 (Flatten)             (None, 7500)          0           flatten_input_5[0][0]            
____________________________________________________________________________________________________
dense_29 (Dense)                 (None, 17)            127517      flatten_16[0][0]                 
====================================================================================================
Total params: 127517
____________________________________________________________________________________________________

In [214]:
# Fit the model
model = model.fit(X_train, y_train, batch_size = 256,
          nb_epoch = 100, verbose=0, validation_data=(X_test,y_test))

In [215]:
predicted = model.model.predict_classes(X_test)


4576/4685 [============================>.] - ETA: 0s

In [216]:
# put the y_test back into a format of non-one-hot encoded for comparison
y_test_orig = pd.DataFrame(y_test).idxmax(1)
print "Logistic Regression Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Logistic Regression Results
Confusion Matrix: 
[[   0    0    0    0    0    0    0    0    0  129    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   96    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   73    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   35    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  816    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   86    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   16    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  115    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   24    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0 2523    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  195    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   33    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0    2    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  143    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   19    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  206    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  174    0    0    0    0
     0    0    0]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.00      0.00      0.00       129
          1       0.00      0.00      0.00        96
          2       0.00      0.00      0.00        73
          3       0.00      0.00      0.00        35
          4       0.00      0.00      0.00       816
          5       0.00      0.00      0.00        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       0.00      0.00      0.00        24
          9       0.54      1.00      0.70      2523
         10       0.00      0.00      0.00       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.00      0.00      0.00       143
         14       0.00      0.00      0.00        19
         15       0.00      0.00      0.00       206
         16       0.00      0.00      0.00       174

avg / total       0.29      0.54      0.38      4685

These neural network based models are giving the same results, except for the convolutional network. We'll try it again for a logistic regression, but pre-flattening the data.


In [217]:
# reshape the training and test data
X_train_new = np.empty(shape=[X_train.shape[0]] + [7500], dtype='float32')
for i in range(X_train.shape[0]):
    X_train_new[i,:] = X_train[i,:,:,:].flatten()

X_test_new = np.empty(shape=[X_test.shape[0]] + [7500], dtype='float32')
for i in range(X_test.shape[0]):
    X_test_new[i,:] = X_test[i,:,:,:].flatten()

In [218]:
print X_test_new.shape
print X_train_new.shape


(4685, 7500)
(10930, 7500)

In [219]:
# Stochastic Logistic Regression
model = Sequential()

# validation loss 
model.add(Dense(output_dim=17, input_shape=[7500], 
                activation='sigmoid', W_regularizer=l2(0)))

# Compile model
sgd = SGD(lr=0.1)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

In [220]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
dense_30 (Dense)                 (None, 17)            127517      dense_input_5[0][0]              
====================================================================================================
Total params: 127517
____________________________________________________________________________________________________

In [223]:
# Fit the model
model = model.fit(X_train_new, y_train, batch_size = 256,
          nb_epoch = 100, verbose=0, validation_data=(X_test_new,y_test))

In [225]:
predicted = model.model.predict_classes(X_test_new)


4512/4685 [===========================>..] - ETA: 0s

In [226]:
# put the y_test back into a format of non-one-hot encoded for comparison
y_test_orig = pd.DataFrame(y_test).idxmax(1)
print "Logistic Regression Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Logistic Regression Results
Confusion Matrix: 
[[   0    0    0    0    0    0    0    0    0  129    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   96    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   73    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   35    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  816    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   86    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   16    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  115    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   24    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0 2523    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  195    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   33    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0    2    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  143    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0   19    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  206    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0  174    0    0    0    0
     0    0    0]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.00      0.00      0.00       129
          1       0.00      0.00      0.00        96
          2       0.00      0.00      0.00        73
          3       0.00      0.00      0.00        35
          4       0.00      0.00      0.00       816
          5       0.00      0.00      0.00        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       0.00      0.00      0.00        24
          9       0.54      1.00      0.70      2523
         10       0.00      0.00      0.00       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.00      0.00      0.00       143
         14       0.00      0.00      0.00        19
         15       0.00      0.00      0.00       206
         16       0.00      0.00      0.00       174

avg / total       0.29      0.54      0.38      4685

And the same results as with the previous logistic regression.


In [227]:
# We will try sklearn's logistic regression.
from sklearn.linear_model import LogisticRegression
# regular
y_train_orig = pd.DataFrame(y_train).idxmax(1)
y_test_orig = pd.DataFrame(y_test).idxmax(1)

lr = LogisticRegression()
lr.fit(X_train_new, y_train_orig)
predicted = lr.predict(X_test_new)

In [228]:
print "Logistic Regression Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Logistic Regression Results
Confusion Matrix: 
[[   3    1    1    0   20    0    0    4    0   92    3    0    0    0
     0    3    2]
 [   5    2    2    0   15    2    0    2    0   58    4    0    0    1
     0    4    1]
 [   3    2    3    0   14    0    0    0    0   48    2    0    0    0
     0    0    1]
 [   0    0    0    0   11    0    0    1    0   19    1    0    0    2
     0    1    0]
 [  26    4    2    1  172    6    0    9    0  506   30    1    0   11
     1   29   18]
 [   7    2    0    0   10    3    0    1    0   56    1    1    0    2
     0    2    1]
 [   1    0    0    0    3    1    0    0    0    9    0    0    0    1
     0    1    0]
 [   3    1    1    0   24    1    0    2    0   70    5    1    0    0
     0    5    2]
 [   0    0    0    0    5    0    0    1    1   17    0    0    0    0
     0    0    0]
 [  26    4    3    0  250    7    0    6    1 2117   43    0    0   17
     0   35   14]
 [   2    0    0    0   28    1    0    2    0  128   21    0    0    4
     0    7    2]
 [   1    1    0    0    5    0    0    0    0   25    0    0    0    0
     0    1    0]
 [   0    0    0    0    0    0    0    0    0    1    0    0    0    0
     0    1    0]
 [   2    2    1    0   18    2    0    1    0   99    3    0    0    8
     0    6    1]
 [   0    0    0    0    1    0    0    2    0    9    5    0    0    0
     1    1    0]
 [   6    0    2    0   28    6    0    2    0  122   21    0    0    8
     0    9    2]
 [   5    2    0    0   32    6    0    1    0  112    7    0    0    1
     0    1    7]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.03      0.02      0.03       129
          1       0.10      0.02      0.03        96
          2       0.20      0.04      0.07        73
          3       0.00      0.00      0.00        35
          4       0.27      0.21      0.24       816
          5       0.09      0.03      0.05        86
          6       0.00      0.00      0.00        16
          7       0.06      0.02      0.03       115
          8       0.50      0.04      0.08        24
          9       0.61      0.84      0.70      2523
         10       0.14      0.11      0.12       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       0.15      0.06      0.08       143
         14       0.50      0.05      0.10        19
         15       0.08      0.04      0.06       206
         16       0.14      0.04      0.06       174

avg / total       0.41      0.50      0.44      4685

With SciKitLearn's logistic regression algorithm, we get much better (but not good) precision than with the previous methods (excluding convolutional neural networks), but worse recall.


In [229]:
#Finally, we'll try the random forest, which is not known for being great with image data.
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV

rfc = RandomForestClassifier(random_state=47, n_estimators=100)
rfc.fit(X_train_new, y_train_orig)


Out[229]:
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
            max_depth=None, max_features='auto', max_leaf_nodes=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=1,
            oob_score=False, random_state=47, verbose=0, warm_start=False)

In [230]:
predicted = rfc.predict(X_test_new)

print "Decision Tree Results\nConfusion Matrix: "
print confusion_matrix(y_test_orig,predicted)
print "\n Classifcation Report"
print classification_report(y_test_orig,predicted)


Decision Tree Results
Confusion Matrix: 
[[   2    0    0    0   17    0    0    1    0  109    0    0    0    0
     0    0    0]
 [   0    2    1    0   25    0    0    0    0   67    0    0    0    0
     0    0    1]
 [   0    0    2    0   14    0    0    0    0   57    0    0    0    0
     0    0    0]
 [   0    0    0    0    9    0    0    0    0   26    0    0    0    0
     0    0    0]
 [   1    0    0    1  218    0    0    1    0  595    0    0    0    0
     0    0    0]
 [   0    0    0    0   14    2    0    0    0   70    0    0    0    0
     0    0    0]
 [   0    0    0    0    4    0    0    0    0   12    0    0    0    0
     0    0    0]
 [   0    0    0    0   24    0    0    0    0   90    0    1    0    0
     0    0    0]
 [   0    0    0    0    4    0    0    0    1   19    0    0    0    0
     0    0    0]
 [   1    0    0    0   51    0    0    0    0 2471    0    0    0    0
     0    0    0]
 [   0    0    0    0   23    0    0    0    0  163    9    0    0    0
     0    0    0]
 [   0    1    0    0    5    0    0    0    0   27    0    0    0    0
     0    0    0]
 [   0    0    0    0    0    0    0    0    0    2    0    0    0    0
     0    0    0]
 [   0    0    0    0   28    0    0    0    0  113    0    0    0    2
     0    0    0]
 [   0    0    0    0    2    0    0    0    0   16    0    0    0    0
     1    0    0]
 [   0    0    0    0   63    0    0    0    0  141    0    0    0    0
     0    2    0]
 [   0    0    0    0   53    1    0    0    0  118    0    0    0    0
     0    0    2]]

 Classifcation Report
             precision    recall  f1-score   support

          0       0.50      0.02      0.03       129
          1       0.67      0.02      0.04        96
          2       0.67      0.03      0.05        73
          3       0.00      0.00      0.00        35
          4       0.39      0.27      0.32       816
          5       0.67      0.02      0.04        86
          6       0.00      0.00      0.00        16
          7       0.00      0.00      0.00       115
          8       1.00      0.04      0.08        24
          9       0.60      0.98      0.75      2523
         10       1.00      0.05      0.09       195
         11       0.00      0.00      0.00        33
         12       0.00      0.00      0.00         2
         13       1.00      0.01      0.03       143
         14       1.00      0.05      0.10        19
         15       1.00      0.01      0.02       206
         16       0.67      0.01      0.02       174

avg / total       0.59      0.58      0.47      4685

The random forest is actually working much better than the neural networks and logistic regression models, and better precision than even the convolutional neural network. The convolutional neural network still has the best recall, however. It's still not a great model, but it has precision and recall better then 50%, however, most test records are being predicted as 9 as in the other models. Apparently, our data is unbalanced among the classes - there are tons of category 'plain' compared to others.


In [267]:
import matplotlib.pyplot as plt

g=imgdf.groupby(['category']).count()['_unit_id']

objects = g.index
y_pos = np.arange(len(objects))
performance = g

plt.bar(y_pos, performance, align='center', alpha=0.5)
#plt.xticks(y_pos, objects)
plt.xticks(y_pos, objects, rotation='vertical')
plt.ylabel('Class Counts')
plt.title('Dress Classes in Data')
 
plt.show()