In [4]:
from keras.models import Sequential
from keras.layers import Dense,Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Activation, Dropout, Reshape
from sklearn.preprocessing import MultiLabelBinarizer
from PIL import Image
from skimage import io
import pandas as pd
import numpy as np
import sklearn

csv = pd.read_csv('train_v2.csv')
dataSet = np.zeros(shape=(100,256,256,4))
dataSet2 = np.zeros(shape=(100,256,256,4))
y_train = csv.loc[:99,'tags']
y_test = csv.loc[100:199,'tags']
categories = ('agriculture', 'artisinal_mine', 'bare_ground', 'blooming', 'blow_down', 'clear', 'cloudy', 'conventional_mine', 'cultivation', 'habitation', 'haze', 'partly_cloudy', 'primary', 'road', 'selective_logging', 'slash_burn', 'water')

for item in np.arange(100):  
    y_train[item] = y_train[item].split()
    y_test[item+100] = y_test[item+100].split()

y_binary = MultiLabelBinarizer()
y_binary = y_binary.fit_transform(y_train)
y_binary2 = MultiLabelBinarizer()
y_binary2 = y_binary2.fit_transform(y_test)



for item in np.arange(100):
    pic = csv.loc[item, 'image_name']
    imArr = np.asarray(io.imread('train-tif-v2/' + pic + '.tif'),dtype="uint8" )
    dataSet[item] = imArr

for item in np.arange(101,200):
    pic2 = csv.loc[item, 'image_name']
    imArr2 = np.asarray(io.imread('train-tif-v2/' + pic + '.tif'),dtype="uint8" )
    dataSet2[item-101] = imArr2


print(y_binary.shape)
print(dataSet.shape)


(100, 15)
(100, 256, 256, 4)

In [ ]:
model = Sequential()
model.add(Conv2D(512, kernel_size=(2, 2), padding='same', input_shape=(256, 256, 4)))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Conv2D(1024, kernel_size=(2, 2), padding='same', input_shape=(256, 256, 4)))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Conv2D(1024, kernel_size=(2, 2), padding='same', input_shape=(256, 256, 4)))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(15))
model.add(Activation('sigmoid'))
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(dataSet, y_binary, epochs=20)


Epoch 1/20
100/100 [==============================] - 109s - loss: 15.4038 - acc: 0.2300    
Epoch 2/20
100/100 [==============================] - 103s - loss: 13.9778 - acc: 0.2800    
Epoch 3/20
100/100 [==============================] - 103s - loss: 14.7044 - acc: 0.2800    
Epoch 4/20
100/100 [==============================] - 103s - loss: 14.2329 - acc: 0.2800    
Epoch 5/20
100/100 [==============================] - 110s - loss: 13.9381 - acc: 0.2800    
Epoch 6/20
100/100 [==============================] - 103s - loss: 14.5440 - acc: 0.2800    
Epoch 7/20
100/100 [==============================] - 103s - loss: 14.4856 - acc: 0.2800    
Epoch 8/20
100/100 [==============================] - 105s - loss: 14.4717 - acc: 0.2800    
Epoch 9/20
100/100 [==============================] - 122s - loss: 13.9942 - acc: 0.2800    
Epoch 10/20
100/100 [==============================] - 116s - loss: 15.3341 - acc: 0.2800    
Epoch 11/20
100/100 [==============================] - 110s - loss: 15.6158 - acc: 0.2800    
Epoch 12/20
 32/100 [========>.....................] - ETA: 78s - loss: 14.8782 - acc: 0.3438

In [ ]: