In [1]:
%matplotlib inline

path = "data/galaxy/sample/"
#path = "data/galaxy/"

train_path = path + 'train/'
valid_path = path + 'valid/'
test_path = path + 'test/'
results_path = path + 'results/'
model_path = path + 'model/'

In [2]:
from utils import *


Using gpu device 0: Tesla K80 (CNMeM is disabled, cuDNN 5103)
/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/sandbox/cuda/__init__.py:600: UserWarning: Your cuDNN version is more recent than the one Theano officially supports. If you see any problems, try updating Theano or downgrading cuDNN to version 5.
  warnings.warn(warn)
Using Theano backend.

In [3]:
batch_size = 32
num_epoch = 1

In [4]:
import pandas as pd
df = pd.read_csv(path+ "train.csv")
df_val = pd.read_csv(path+ "valid.csv")

In [69]:
# custom iterator for regression
import Iterator; reload(Iterator)
from Iterator import DirectoryIterator

imgen = image.ImageDataGenerator()
batches = DirectoryIterator(train_path, imgen, 
                            class_mode=None, 
                            dataframe=df,
                            batch_size=4,
                            target_size=(128,128))

val_imgen = image.ImageDataGenerator()
val_batches = DirectoryIterator(valid_path, val_imgen, 
                                class_mode=None, 
                                dataframe=df_val,
                                batch_size=4,
                                target_size=(128,128))


Found 1500 images belonging to 1 classes.
Found 1000 images belonging to 1 classes.

In [75]:
model = Sequential([
        BatchNormalization(axis=1, input_shape=(3,128,128)),
        Flatten(),
        Dense(37, activation='softmax')
    ])

In [76]:
model.compile(Adam(), loss='mean_squared_error')
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, 
                 nb_val_samples=val_batches.nb_sample)


Epoch 1/2
1500/1500 [==============================] - 35s - loss: 0.0714 - val_loss: 0.0718
Epoch 2/2
1500/1500 [==============================] - 35s - loss: 0.0714 - val_loss: 0.0718
Out[76]:
<keras.callbacks.History at 0x7f309ca25110>

In [77]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
batchnormalization_8 (BatchNormal(None, 3, 128, 128)   6           batchnormalization_input_7[0][0] 
____________________________________________________________________________________________________
flatten_9 (Flatten)              (None, 49152)         0           batchnormalization_8[0][0]       
____________________________________________________________________________________________________
dense_10 (Dense)                 (None, 37)            1818661     flatten_9[0][0]                  
====================================================================================================
Total params: 1818667
____________________________________________________________________________________________________

In [78]:
np.round(model.predict_generator(batches, batches.N)[:5],2)


Out[78]:
array([[ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
         0.]], dtype=float32)

In [85]:
model = Sequential([
        BatchNormalization(axis=1, input_shape=(3,128,128)),
        Flatten(),
        Dense(37, activation='softmax')
    ])
model.compile(Adam(lr=1e-30), loss='mean_squared_error')
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, 
                 nb_val_samples=val_batches.nb_sample)


Epoch 1/2
1500/1500 [==============================] - 31s - loss: 0.0675 - val_loss: 0.0677
Epoch 2/2
1500/1500 [==============================] - 30s - loss: 0.0675 - val_loss: 0.0678
Out[85]:
<keras.callbacks.History at 0x7f309cc35b10>

In [86]:
np.round(model.predict_generator(batches, batches.N)[:5],2)


Out[86]:
array([[ 0.01,  0.  ,  0.01,  0.01,  0.05,  0.01,  0.  ,  0.01,  0.02,  0.02,  0.03,  0.04,  0.1 ,
         0.01,  0.01,  0.  ,  0.01,  0.06,  0.  ,  0.01,  0.03,  0.01,  0.28,  0.  ,  0.01,  0.  ,
         0.02,  0.01,  0.01,  0.02,  0.02,  0.01,  0.02,  0.01,  0.03,  0.08,  0.01],
       [ 0.  ,  0.01,  0.01,  0.07,  0.22,  0.01,  0.  ,  0.  ,  0.01,  0.02,  0.02,  0.03,  0.02,
         0.03,  0.05,  0.01,  0.  ,  0.02,  0.  ,  0.02,  0.11,  0.01,  0.19,  0.  ,  0.01,  0.01,
         0.02,  0.03,  0.01,  0.  ,  0.  ,  0.  ,  0.01,  0.  ,  0.02,  0.01,  0.01],
       [ 0.01,  0.  ,  0.  ,  0.03,  0.12,  0.01,  0.  ,  0.01,  0.03,  0.09,  0.01,  0.03,  0.01,
         0.02,  0.06,  0.  ,  0.  ,  0.04,  0.01,  0.15,  0.05,  0.01,  0.11,  0.  ,  0.02,  0.  ,
         0.02,  0.01,  0.01,  0.  ,  0.01,  0.  ,  0.03,  0.  ,  0.01,  0.03,  0.04],
       [ 0.01,  0.01,  0.  ,  0.04,  0.13,  0.01,  0.  ,  0.01,  0.01,  0.05,  0.01,  0.02,  0.02,
         0.08,  0.07,  0.01,  0.  ,  0.03,  0.01,  0.07,  0.1 ,  0.01,  0.06,  0.  ,  0.05,  0.01,
         0.02,  0.05,  0.01,  0.01,  0.  ,  0.  ,  0.04,  0.01,  0.01,  0.01,  0.02],
       [ 0.01,  0.  ,  0.01,  0.02,  0.07,  0.01,  0.  ,  0.01,  0.02,  0.04,  0.01,  0.05,  0.11,
         0.06,  0.02,  0.01,  0.  ,  0.05,  0.01,  0.02,  0.05,  0.02,  0.09,  0.  ,  0.03,  0.04,
         0.01,  0.05,  0.01,  0.  ,  0.01,  0.  ,  0.04,  0.01,  0.03,  0.02,  0.02]], dtype=float32)

In [ ]: