In [1]:
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam
from keras.layers.normalization import BatchNormalization

from read_dataset import read_ceps_with_train_test
import numpy as np
from create_data_ceps import createCepsSS
from read_saved_models import loadCepsStanderdScaler

from matplotlib import pyplot as plt
%matplotlib inline


Using TensorFlow backend.

In [2]:
X_train, X_test, y_train, y_test = read_ceps_with_train_test()

In [3]:
def getStanderizedData(data):
    data_shape = data.shape
    n = data_shape[0]
    reshaped_data = data.reshape(n, -1)
    saved_ss = loadCepsStanderdScaler()
    trasformed_data = saved_ss.transform(reshaped_data)
    ret_data = trasformed_data.reshape(data_shape)
    return ret_data

In [4]:
X_ss_train = getStanderizedData(X_train)
X_ss_test = getStanderizedData(X_test)

In [5]:
layer_1_outdim = 500
layer_1_dropout = 0.5
layer_2_outdim = 300
layer_2_dropout = 0.4
layer_3_outdim = 100
layer_3_dropout = 0.5
layer_4_outdim = 50
layer_4_dropout = 0.5

In [6]:
model = Sequential()

# this layerf from https://keras.io/getting-started/sequntial-model-guide/
# Dense(64) is fully-connected layer with 64 hidden units
model.add(Dense(layer_1_outdim, activation='relu', input_dim=20))
model.add(Dropout(layer_1_dropout))
model.add(BatchNormalization())

model.add(Dense(layer_2_outdim, activation='relu'))
model.add(Dropout(layer_2_dropout))
model.add(BatchNormalization())

model.add(Dense(layer_3_outdim, activation='relu'))
model.add(Dropout(layer_3_dropout))
model.add(BatchNormalization())

model.add(Dense(layer_4_outdim, activation='relu'))
model.add(Dropout(layer_4_dropout))
model.add(BatchNormalization())

model.add(Dense(10, activation='softmax'))

# sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
adam = Adam(lr=0.01, decay=1e-7)

model.compile(loss='categorical_crossentropy',
             optimizer=adam,
             metrics=['accuracy'])
epoch = 20

In [7]:
result = model.fit(X_ss_train, y_train, epochs=epoch, batch_size=100,  validation_split=0.2)


Train on 480 samples, validate on 120 samples
Epoch 1/20
480/480 [==============================] - 3s - loss: 2.4908 - acc: 0.1896 - val_loss: 2.1425 - val_acc: 0.2833
Epoch 2/20
480/480 [==============================] - 0s - loss: 2.0572 - acc: 0.2854 - val_loss: 2.1994 - val_acc: 0.3000
Epoch 3/20
480/480 [==============================] - 0s - loss: 1.7899 - acc: 0.3750 - val_loss: 2.2253 - val_acc: 0.3083
Epoch 4/20
480/480 [==============================] - 0s - loss: 1.8074 - acc: 0.3521 - val_loss: 2.2050 - val_acc: 0.3000
Epoch 5/20
480/480 [==============================] - 0s - loss: 1.6936 - acc: 0.3896 - val_loss: 2.1091 - val_acc: 0.3417
Epoch 6/20
480/480 [==============================] - 0s - loss: 1.6352 - acc: 0.3917 - val_loss: 2.0129 - val_acc: 0.3167
Epoch 7/20
480/480 [==============================] - 0s - loss: 1.5602 - acc: 0.4417 - val_loss: 1.9092 - val_acc: 0.3333
Epoch 8/20
480/480 [==============================] - 0s - loss: 1.5179 - acc: 0.4417 - val_loss: 1.8259 - val_acc: 0.3500
Epoch 9/20
480/480 [==============================] - 0s - loss: 1.4420 - acc: 0.4687 - val_loss: 1.7830 - val_acc: 0.3583
Epoch 10/20
480/480 [==============================] - 0s - loss: 1.3779 - acc: 0.5396 - val_loss: 1.7057 - val_acc: 0.3917
Epoch 11/20
480/480 [==============================] - 0s - loss: 1.3938 - acc: 0.5125 - val_loss: 1.6361 - val_acc: 0.4417
Epoch 12/20
480/480 [==============================] - 0s - loss: 1.3094 - acc: 0.5312 - val_loss: 1.6450 - val_acc: 0.4333
Epoch 13/20
480/480 [==============================] - 0s - loss: 1.2581 - acc: 0.5208 - val_loss: 1.6300 - val_acc: 0.4417
Epoch 14/20
480/480 [==============================] - 0s - loss: 1.2919 - acc: 0.5167 - val_loss: 1.4900 - val_acc: 0.4667
Epoch 15/20
480/480 [==============================] - 0s - loss: 1.2652 - acc: 0.5188 - val_loss: 1.4385 - val_acc: 0.5167
Epoch 16/20
480/480 [==============================] - 0s - loss: 1.2389 - acc: 0.5583 - val_loss: 1.4162 - val_acc: 0.4833
Epoch 17/20
480/480 [==============================] - 0s - loss: 1.1311 - acc: 0.5938 - val_loss: 1.4558 - val_acc: 0.4667
Epoch 18/20
480/480 [==============================] - 0s - loss: 1.1937 - acc: 0.5646 - val_loss: 1.4262 - val_acc: 0.4750
Epoch 19/20
480/480 [==============================] - 0s - loss: 1.1405 - acc: 0.6000 - val_loss: 1.4258 - val_acc: 0.5083
Epoch 20/20
480/480 [==============================] - 0s - loss: 1.1441 - acc: 0.5958 - val_loss: 1.4280 - val_acc: 0.5000

In [8]:
x = range(epoch)
plt.plot(x, result.history['acc'], label='train accuracy')
plt.plot(x, result.history['val_acc'], label='test accuracy')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))

score = model.evaluate(X_ss_test, y_test, batch_size=100)
print("")
print(model.metrics_names)
print(score)


100/400 [======>.......................] - ETA: 0s
['loss', 'acc']
[1.3157533407211304, 0.54249998927116394]

In [9]:
model_filepath = "./savedModels/ceps_standerized_dense_model.h5"
model.save(model_filepath)

In [10]:
"""
note

---
layer_1_outdim = 100
layer_1_dropout = 0.5
layer_2_outdim = 100
layer_2_dropout = 0.4
layer_3_outdim = 50
layer_3_dropout = 0.5
loss: 1.3307 - acc: 0.5479 - val_loss: 1.4328 - val_acc: 0.4250
test['loss', 'acc'][1.3474427759647369, 0.51999998837709427]
---

"""


Out[10]:
"\nnote\n\n---\nlayer_1_outdim = 100\nlayer_1_dropout = 0.5\nlayer_2_outdim = 100\nlayer_2_dropout = 0.4\nlayer_3_outdim = 50\nlayer_3_dropout = 0.5\nloss: 1.3307 - acc: 0.5479 - val_loss: 1.4328 - val_acc: 0.4250\ntest['loss', 'acc'][1.3474427759647369, 0.51999998837709427]\n---\n\n"

In [11]:
file_path = "../data/songData/genres/disco/disco.00006.ceps.npy"
ceps = np.load(file_path)

t_ceps = ceps.transpose()
t_num_ceps = len(t_ceps)
t_ceps_mean = np.mean(t_ceps[int(t_num_ceps / 10):int(t_num_ceps * 9 / 10)], axis=0)
predict_data = np.array([t_ceps_mean])
predict_data = getStanderizedData(predict_data)

In [12]:
predict_result = model.predict(predict_data)
print(predict_result)
print(np.argmax(predict_result))


[[ 0.0527634   0.01676753  0.22356561  0.16158544  0.00983731  0.0146053
   0.08344398  0.01231532  0.02595722  0.39915895]]
9