In [1]:
import pandas as pd
import numpy as np
import pickle
import os

from scipy.stats import kurtosis, skew
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, Activation, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.models import load_model
from sklearn import metrics
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, freqz


Using TensorFlow backend.

In [2]:
'''LABELS = [
    "HAZMEI'S SEXY POSE (NEUTRAL)", 
    "HAZMEI'S FLYING KISS (WAVE HANDS)",
    "HYUN'S MAD DRIVING SKILLS (BUS DRIVING)",
    "ENCIK'S IPPT 2.4KM (FRONT BACK)",
    "HYUN'S BALLET DANCE (SIDE STEP)",
    "HAZMEI'S BELLY BOUNCE (JUMPING)",
    "JUMPING JACK DR. WANG YE VERSION",
    "TURN CLAP",
    "SQUAT TURN CLAP",
    "WINDOW",
    "WINDOW 360",
    "MONEY (FINAL MOVE)"
] '''

LABELS = [
    "HAZMEI'S SEXY POSE (NEUTRAL)", 
    "HAZMEI'S FLYING KISS (WAVE HANDS)",
    "HYUN'S MAD DRIVING SKILLS (BUS DRIVING)",
    "ENCIK'S IPPT 2.4KM (FRONT BACK)",
    "HYUN'S BALLET DANCE (SIDE STEP)",
    "HAZMEI'S BELLY BOUNCE (JUMPING)",
    "JUMPING JACK DR. WANG YE VERSION",
    "TURN CLAP",
    "SQUAT TURN CLAP",
    "WINDOW",
    "WINDOW 360",
    "MONEY (FINAL MOVE)"
] 

SAMPLING_RATE = 50
WINDOW_SIZE = 2.5
WINDOW_READINGS = int(WINDOW_SIZE * SAMPLING_RATE)
DATAPATH = 'data_all/'

ORDER = 3       # Order 3
CUTOFF = 7    # desired cutoff frequency of the filter in Hz (take max/60)
FILTER_SAMPLING_RATE = 20

In [3]:
def normalize_data(data):
    data_norm = (data - data.mean()) / (data.max() - data.min())
    return np.array(data_norm)

def frequency(data):
    fourier = np.fft.fft(data)
    freqs = np.fft.fftfreq(len(data), d=1/SAMPLING_RATE)
    
def magnitude(x, y, z):
    x_sq = np.power(x, 2)
    y_sq = np.power(y, 2)
    z_sq = np.power(z, 2)

    xyz_sq = x_sq + y_sq + z_sq

    xyz_mag = np.sqrt(xyz_sq)
    return xyz_mag

def rms(x, axis=None):
    return np.sqrt(np.mean(np.power(x, 2), axis=axis))

def feature_extraction(x, y, z):
    #'''
    #mean, std
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals mean, std, mad
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    #max, min
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    #correlation
    features.extend((np.correlate(x, y)[0], np.correlate(x, z)[0], np.correlate(y, z)[0]))
    #energy
    features.extend((np.dot(x,x)/len(x), np.dot(y,y)/len(y), np.dot(z,z)/len(z)))
    #iqr
    #features.extend((np.subtract(*np.percentile(x, [75, 25])), np.subtract(*np.percentile(y, [75, 25])), np.subtract(*np.percentile(z, [75, 25]))))
    #Root Mean Square
    features.extend((rms(x), rms(y), rms(z)))
    #Skew, Kurtosis
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    #'''
    
    '''
    #Frequency Domain Features
    fourier_x = np.fft.fft(x)
    fourier_y = np.fft.fft(x)
    fourier_z = np.fft.fft(x)
    freqs = np.fft.fftfreq(WINDOW_READINGS)
    fourier_x = np.abs(fourier_x)
    fourier_y = np.abs(fourier_y)
    fourier_z = np.abs(fourier_z)
    #Mean Frequency, Skew, Kurtosis
    features.extend((np.mean(fourier_x), np.mean(fourier_y), np.mean(fourier_z)))
    features.extend((skew(fourier_x), skew(fourier_y), skew(fourier_z), kurtosis(fourier_x), kurtosis(fourier_y), kurtosis(fourier_z)))
    '''

    '''
    #Old Feature Extraction
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    '''
    
    return features

def add_noise(data):
    data_noise = data + np.random.uniform(size=len(data))
    data_noise = data_noise + np.random.laplace(loc=0.0, scale=1.0, size=len(data))
    return data_noise

def data_augmentation(X):
    X_noise = X
    for i in range(X.shape[0]):
        for j in range(X.shape[2]):
            X_noise[i][:][j] = add_noise(X_noise[i][:][j])
    return np.concatenate((X, X_noise), axis=0)

def feature_selection(X, augmentData=False):
    data = []
    for i in range(X.shape[0]):
        features = []
        for j in range(0, X.shape[2], 3):
            x = [X[i][u][j] for u in range(X.shape[1])]
            y = [X[i][u][j+1] for u in range(X.shape[1])]
            z = [X[i][u][j+2] for u in range(X.shape[1])]
            
            if augmentData:
                x_noise = add_noise(x)
                y_noise = add_noise(y)
                z_noise = add_noise(z)
                features.append(feature_extraction(x_noise, y_noise, z_noise))
            else:
                features.append(feature_extraction(x, y, z))

        data.append(features)
    return np.array(data)

def feature_engineering(X, augmentData=False):
    if augmentData:
        return np.concatenate((feature_selection(X, False), feature_selection(X, True)), axis=0)
    else:
        return feature_selection(X, False)
    
def shitHotLP(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order)
    v = butter_lowpass_filter(data, cutoff, fs, order)
    return v

def butter_lowpass(cutoff, fs, order):
    nyq = 0.5 * fs
    normal_cutoff = cutoff / nyq
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    return b, a

def butter_lowpass_filter(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order=order)
    y = lfilter(b, a, data)
    return y


def read_data(filename, label):
    raw_data = pd.read_csv(filename)
    raw_data = raw_data.iloc[100:-50, 0:9]
    raw_data = raw_data[:raw_data.shape[0]-(raw_data.shape[0]%WINDOW_READINGS)]
    #print(raw_data.shape)
    #print(filename, ': Minimum: ', np.min(raw_data))
    #print(filename, ': Maximum: ', np.max(raw_data))

    
    normalized_data = shitHotLP(raw_data, CUTOFF, FILTER_SAMPLING_RATE, ORDER)
    sampled_data = normalized_data.reshape(-1, WINDOW_READINGS, 9)
    #print(sampled_data.shape)
    return sampled_data, [label]*sampled_data.shape[0] 



def import_data(root_dirpath, test_data_size):
    X = np.zeros([1,WINDOW_READINGS,9])
    Xt = np.zeros([1,WINDOW_READINGS,9])
    y = []
    yt = []
    
    sub_directories = next(os.walk(root_dirpath))[1]    
    for sub_dir in sub_directories:
        files = next(os.walk(root_dirpath + sub_dir))[2]
        count = 0
        samples = 0
        for file in files:
            if not file or file == '.DS_Store':
                continue
            #print(file)
            temp_x, temp_y = read_data(root_dirpath + sub_dir + '/' + file, int(sub_dir))
            if (count < test_data_size):
                print(file, ' size: ', len(temp_y))
                Xt = np.concatenate((Xt, temp_x), axis=0)
                yt = yt + temp_y
            else:
                X = np.concatenate((X, temp_x), axis=0)
                y = y + temp_y
            count = count + 1 if len(temp_y) > 40 else count + 0.5
            samples += len(temp_y)

        print(LABELS[y[-1]], ': ', samples)
    y = np.array(y)
    yt = np.array(yt)
    return X[1:], y, Xt[1:], yt

In [4]:
#Prediction

def predict(model):
    Y_output = model.predict(Xt)
    Y_pred = np.argmax(Y_output, axis=1)
    print(np.array(Y_pred))
    print("")
    print("Accuracy Rate:")
    print(metrics.accuracy_score(Yt, Y_pred))
    print("")
    print("Confusion Matrix:")
    confusion_matrix = metrics.confusion_matrix(Yt, Y_pred)
    print(confusion_matrix)
    normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
    
    '''
    print("Precision: {}%".format(100*metrics.precision_score(Yt, Y_pred, average="weighted")))
    print("Recall: {}%".format(100*metrics.recall_score(Yt, Y_pred, average="weighted")))
    print("f1_score: {}%".format(100*metrics.f1_score(Yt, Y_pred, average="weighted")))
    print("")
    print("Confusion matrix (normalised to % of total test data):")
    print(normalised_confusion_matrix)
    '''
    
    # Plot Results: 
    plt.figure(figsize=(12, 12))
    plt.imshow(
        normalised_confusion_matrix, 
        interpolation='nearest', 
        cmap=plt.cm.rainbow
    )
    plt.title("Confusion matrix \n(normalised to % of total test data)")
    plt.colorbar()
    tick_marks = np.arange(len(LABELS))
    plt.xticks(tick_marks, LABELS, rotation=90)
    plt.yticks(tick_marks, LABELS)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()

    np.savetxt(DATAPATH+"accuracy.csv", [metrics.accuracy_score(Yt, Y_pred)], delimiter=",")
    np.savetxt(DATAPATH+"confusion_matrix.csv", metrics.confusion_matrix(Yt, Y_pred), delimiter=",")
    model.save(DATAPATH+'trained_nn_model.h5')

In [5]:
X, Y, Xt, Yt = import_data(DATAPATH, 1)
print(X.shape)
print(Y.shape)
print(Xt.shape)
print(Yt.shape)

#print(X)
#print(Y)
print(Yt)


hazmei_window_3.csv  size:  24
hazmei_window_1.csv  size:  49
WINDOW :  488
santos_natural.csv  size:  50
HAZMEI'S SEXY POSE (NEUTRAL) :  410
yz_final.csv  size:  75
MONEY (FINAL MOVE) :  172
hazmei_turnclap2.csv  size:  24
hazmei_turnclap.csv  size:  24
TURN CLAP :  192
hyun_jumpingjack.csv  size:  49
JUMPING JACK DR. WANG YE VERSION :  218
Rahman_waving.csv  size:  50
HAZMEI'S FLYING KISS (WAVE HANDS) :  399
santos_window360_2.csv  size:  24
hazmei_window360_1.csv  size:  24
WINDOW 360 :  408
hyun_squatturnclap_1.csv  size:  24
jerich_squatturnclap.csv  size:  24
SQUAT TURN CLAP :  112
hyun_sidestep.csv  size:  50
HYUN'S BALLET DANCE (SIDE STEP) :  396
Rahman_frontback.csv  size:  50
ENCIK'S IPPT 2.4KM (FRONT BACK) :  396
121017_hazmei_busdriver.csv  size:  49
HYUN'S MAD DRIVING SKILLS (BUS DRIVING) :  403
yz_jumping.csv  size:  50
HAZMEI'S BELLY BOUNCE (JUMPING) :  397
(3351, 125, 9)
(3351,)
(640, 125, 9)
(640,)
[ 9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5]

In [6]:
#Plot Data
Size = Xt.shape[1];
Sample = [230, 180]
legends = ['acc1x', 'acc1y', 'acc1z', 'acc1x', 'acc2y', 'acc3z', 'gyrox', 'gyroy', 'gyroz']

for i in Sample:
    print(Yt[i])
    plt.figure(figsize=(9,9))
    plt.plot(list(range(Size)), Xt[i][:][:], label=legends )
    plt.show()

    temp = Xt[i][:][:].T
    print(temp.shape)
    #print(temp[0])
    print(np.correlate(temp[0], temp[1])[0])

    fourier = np.fft.fft(temp[8])
    freqs = np.fft.fftfreq(temp.shape[1])
    plt.plot(freqs, fourier.real, freqs, fourier.imag)
    plt.show()

    plt.plot(freqs, np.abs(fourier)**2)
    plt.show()

    idx = np.argsort(freqs)
    plt.plot(freqs[idx], fourier[idx])
    plt.show()
    print(max(freqs))
    print(np.mean(np.abs(fourier)**2))



#print(fourier)
#print(freqs)
#print(np.abs(fourier))
#print(np.mean(fourier))
#print(freqs)
#print(np.abs(freqs[0:51]))


7
(9, 125)
2017.09565403
/Users/arshanrahman/anaconda/lib/python3.6/site-packages/numpy/core/numeric.py:531: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)
0.496
243210.435379
11
(9, 125)
3804.09287999
0.496
18987.7153056

In [7]:
#Raw Data with Data Augmentation
'''
X = data_augmentation(X)
Xt = data_augmentation(Xt)
Y = np.concatenate((Y, Y), axis=0)
Yt = np.concatenate((Yt, Yt), axis=0)
'''

#Feature Selection & Data Augmentation

#X = feature_engineering(X, False)
#Xt = feature_engineering(Xt, False)
#Y = np.concatenate((Y, Y), axis=0)
#Yt = np.concatenate((Yt, Yt), axis=0)





Yhot = to_categorical(Y)

print(X.shape)
print(Xt.shape)
print(Yhot.shape)
#print([Xt[i] for i in Sample])

np.savetxt("data/test_data_format.csv", Xt[0], delimiter=",")

#X = X.reshape(X.shape[0], X.shape[1]*X.shape[2])
#Xt = Xt.reshape(Xt.shape[0], Xt.shape[1]*Xt.shape[2])


(3351, 125, 9)
(640, 125, 9)
(3351, 12)

In [8]:
'''#Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Dense(70, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Dense(70))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Dense(70))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=50, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)'''


Out[8]:
'#Neural Network\n\ndata_dim = X.shape[2]\ntimesteps = X.shape[1]\nnum_classes = Yhot.shape[1]\nb_size = 32\n\nmodel = Sequential()\nmodel.add(Dense(70, input_shape=(timesteps, data_dim)))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(70))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(70))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(num_classes, activation=\'softmax\'))\n\nmodel.compile(loss=\'categorical_crossentropy\',\n              optimizer=\'adam\',\n              metrics=[\'accuracy\'])\n\nmodel.fit(X, Yhot, epochs=50, batch_size=b_size, validation_split=0.1, shuffle=True)\nresult = model.evaluate(X, Yhot)\nprint("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))\nprint("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))\n\npredict(model)'

In [9]:
#CNN Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Conv1D(128, 4, padding='valid', strides=1, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D())
model.add(Dropout(0.2))

model.add(Conv1D(128, 2, padding='valid', strides=1) )
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Conv1D(64, 2, padding='valid', strides=1) )
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D())
model.add(Dropout(0.2))

model.add(Conv1D(32, 1, padding='valid', strides=1) )
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(GlobalAveragePooling1D())
model.add(Dense(num_classes, activation='softmax'))


'''model = Sequential()
model.add(Conv1D(64, 4, padding='valid', activation='relu', strides=1, input_shape=(timesteps, data_dim)))
model.add(MaxPooling1D())
model.add(Dropout(0.2))
model.add(Conv1D(64, 2, padding='valid', activation='relu', strides=1) )
model.add(Conv1D(32, 2, padding='valid', activation='relu', strides=1) )
model.add(MaxPooling1D())
model.add(Dropout(0.2))
model.add(Conv1D(32, 1, padding='valid', activation='relu', strides=1) )
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))'''




model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=50, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)


Train on 3015 samples, validate on 336 samples
Epoch 1/50
3015/3015 [==============================] - 8s - loss: 1.4170 - acc: 0.6637 - val_loss: 2.7295 - val_acc: 0.0000e+00
Epoch 2/50
3015/3015 [==============================] - 8s - loss: 0.7172 - acc: 0.8196 - val_loss: 3.0610 - val_acc: 0.0000e+00
Epoch 3/50
3015/3015 [==============================] - 8s - loss: 0.4961 - acc: 0.8859 - val_loss: 3.1228 - val_acc: 0.0000e+00
Epoch 4/50
3015/3015 [==============================] - 7s - loss: 0.3938 - acc: 0.9108 - val_loss: 3.2751 - val_acc: 0.0000e+00
Epoch 5/50
3015/3015 [==============================] - 7s - loss: 0.2994 - acc: 0.9347 - val_loss: 3.4753 - val_acc: 0.0000e+00
Epoch 6/50
3015/3015 [==============================] - 7s - loss: 0.2555 - acc: 0.9390 - val_loss: 2.9588 - val_acc: 0.0417
Epoch 7/50
3015/3015 [==============================] - 8s - loss: 0.2124 - acc: 0.9519 - val_loss: 2.7221 - val_acc: 0.1339
Epoch 8/50
3015/3015 [==============================] - 7s - loss: 0.1862 - acc: 0.9569 - val_loss: 2.4333 - val_acc: 0.3363
Epoch 9/50
3015/3015 [==============================] - 7s - loss: 0.1619 - acc: 0.9632 - val_loss: 1.9229 - val_acc: 0.4643
Epoch 10/50
3015/3015 [==============================] - 7s - loss: 0.1595 - acc: 0.9589 - val_loss: 1.8387 - val_acc: 0.4286
Epoch 11/50
3015/3015 [==============================] - 7s - loss: 0.1344 - acc: 0.9648 - val_loss: 1.9041 - val_acc: 0.4732
Epoch 12/50
3015/3015 [==============================] - 7s - loss: 0.1289 - acc: 0.9675 - val_loss: 1.6018 - val_acc: 0.5536
Epoch 13/50
3015/3015 [==============================] - 7s - loss: 0.1152 - acc: 0.9718 - val_loss: 1.5632 - val_acc: 0.5417
Epoch 14/50
3015/3015 [==============================] - 8s - loss: 0.1257 - acc: 0.9692 - val_loss: 1.3861 - val_acc: 0.5744
Epoch 15/50
3015/3015 [==============================] - 7s - loss: 0.1112 - acc: 0.9768 - val_loss: 1.7726 - val_acc: 0.5208
Epoch 16/50
3015/3015 [==============================] - 9s - loss: 0.1047 - acc: 0.9715 - val_loss: 1.1165 - val_acc: 0.6548
Epoch 17/50
3015/3015 [==============================] - 9s - loss: 0.1217 - acc: 0.9672 - val_loss: 1.0947 - val_acc: 0.6756
Epoch 18/50
3015/3015 [==============================] - 8s - loss: 0.1030 - acc: 0.9711 - val_loss: 1.1066 - val_acc: 0.6756
Epoch 19/50
3015/3015 [==============================] - 8s - loss: 0.0976 - acc: 0.9741 - val_loss: 1.5738 - val_acc: 0.5536
Epoch 20/50
3015/3015 [==============================] - 8s - loss: 0.0977 - acc: 0.9761 - val_loss: 1.0634 - val_acc: 0.6905
Epoch 21/50
3015/3015 [==============================] - 8s - loss: 0.0911 - acc: 0.9774 - val_loss: 1.1871 - val_acc: 0.6429
Epoch 22/50
3015/3015 [==============================] - 8s - loss: 0.1093 - acc: 0.9682 - val_loss: 1.5184 - val_acc: 0.5923
Epoch 23/50
3015/3015 [==============================] - 8s - loss: 0.0895 - acc: 0.9761 - val_loss: 1.1917 - val_acc: 0.6458
Epoch 24/50
3015/3015 [==============================] - 8s - loss: 0.0742 - acc: 0.9804 - val_loss: 1.4567 - val_acc: 0.5774
Epoch 25/50
3015/3015 [==============================] - 8s - loss: 0.0750 - acc: 0.9804 - val_loss: 1.3372 - val_acc: 0.6429
Epoch 26/50
3015/3015 [==============================] - 8s - loss: 0.0805 - acc: 0.9771 - val_loss: 1.6395 - val_acc: 0.5476
Epoch 27/50
3015/3015 [==============================] - 7s - loss: 0.0773 - acc: 0.9801 - val_loss: 1.2781 - val_acc: 0.6577
Epoch 28/50
3015/3015 [==============================] - 7s - loss: 0.0719 - acc: 0.9788 - val_loss: 1.3564 - val_acc: 0.6161
Epoch 29/50
3015/3015 [==============================] - 8s - loss: 0.0662 - acc: 0.9818 - val_loss: 1.5069 - val_acc: 0.5387
Epoch 30/50
3015/3015 [==============================] - 8s - loss: 0.0761 - acc: 0.9761 - val_loss: 1.1506 - val_acc: 0.6815
Epoch 31/50
3015/3015 [==============================] - 7s - loss: 0.0698 - acc: 0.9808 - val_loss: 1.0735 - val_acc: 0.6875
Epoch 32/50
3015/3015 [==============================] - 8s - loss: 0.0750 - acc: 0.9784 - val_loss: 1.6146 - val_acc: 0.5000
Epoch 33/50
3015/3015 [==============================] - 8s - loss: 0.0720 - acc: 0.9804 - val_loss: 1.9456 - val_acc: 0.4643
Epoch 34/50
3015/3015 [==============================] - 8s - loss: 0.0919 - acc: 0.9735 - val_loss: 1.6179 - val_acc: 0.5595
Epoch 35/50
3015/3015 [==============================] - 8s - loss: 0.0630 - acc: 0.9837 - val_loss: 1.7136 - val_acc: 0.5268
Epoch 36/50
3015/3015 [==============================] - 9s - loss: 0.0677 - acc: 0.9804 - val_loss: 1.3167 - val_acc: 0.6280
Epoch 37/50
3015/3015 [==============================] - 9s - loss: 0.0684 - acc: 0.9791 - val_loss: 1.4058 - val_acc: 0.5774
Epoch 38/50
3015/3015 [==============================] - 8s - loss: 0.0587 - acc: 0.9851 - val_loss: 1.7006 - val_acc: 0.5298
Epoch 39/50
3015/3015 [==============================] - 8s - loss: 0.0544 - acc: 0.9877 - val_loss: 1.3455 - val_acc: 0.6280
Epoch 40/50
3015/3015 [==============================] - 8s - loss: 0.0495 - acc: 0.9884 - val_loss: 1.5796 - val_acc: 0.5268
Epoch 41/50
3015/3015 [==============================] - 7s - loss: 0.0582 - acc: 0.9828 - val_loss: 1.0704 - val_acc: 0.6994
Epoch 42/50
3015/3015 [==============================] - 8s - loss: 0.0503 - acc: 0.9841 - val_loss: 1.5202 - val_acc: 0.5744
Epoch 43/50
3015/3015 [==============================] - 8s - loss: 0.0469 - acc: 0.9867 - val_loss: 1.3705 - val_acc: 0.5833
Epoch 44/50
3015/3015 [==============================] - 8s - loss: 0.0627 - acc: 0.9821 - val_loss: 1.3256 - val_acc: 0.6399
Epoch 45/50
3015/3015 [==============================] - 8s - loss: 0.0549 - acc: 0.9854 - val_loss: 1.1325 - val_acc: 0.6935
Epoch 46/50
3015/3015 [==============================] - 8s - loss: 0.0502 - acc: 0.9861 - val_loss: 1.2855 - val_acc: 0.6458
Epoch 47/50
3015/3015 [==============================] - 8s - loss: 0.0449 - acc: 0.9897 - val_loss: 1.6859 - val_acc: 0.4970
Epoch 48/50
3015/3015 [==============================] - 8s - loss: 0.0545 - acc: 0.9834 - val_loss: 1.2387 - val_acc: 0.6518
Epoch 49/50
3015/3015 [==============================] - 8s - loss: 0.0439 - acc: 0.9864 - val_loss: 0.8730 - val_acc: 0.7619
Epoch 50/50
3015/3015 [==============================] - 8s - loss: 0.0611 - acc: 0.9834 - val_loss: 1.3075 - val_acc: 0.6310
3328/3351 [============================>.] - ETA: 0s
loss: 0.17%

acc: 94.99%
[ 9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 11 11
 11 11 11 11 11 11 11 11 11 11 11  2 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11  4 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10  8  8  8  8  7  8  8
  8  8  7  8  8  7  8  7  8  8  7  7  8  8  8  7  8  7  8  8  8  8  8  8  8
  8  7  8  8  8  8  8  8  8  8  8  8  8  8  8  8  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  2  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  3  3  3  3  3  3  3  4  3
  4  3  3  3  3  0  3  3  0  0  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  4  3  3  3  3  3  3  2  2  2  2  0  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  0  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  0  2  2  2  2  2  2  2  2  2  2  2  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  0  5  3  5  5  5  5  5  4  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  4]

Accuracy Rate:
0.9609375

Confusion Matrix:
[[50  0  0  0  0  0  0  0  0  0  0  0]
 [ 0 50  0  0  0  0  0  0  0  0  0  0]
 [ 3  0 46  0  0  0  0  0  0  0  0  0]
 [ 3  0  0 44  3  0  0  0  0  0  0  0]
 [ 0  0  1  0 49  0  0  0  0  0  0  0]
 [ 1  0  0  1  2 46  0  0  0  0  0  0]
 [ 0  0  0  0  0  0 49  0  0  0  0  0]
 [ 0  0  0  0  0  0  0 48  0  0  0  0]
 [ 0  0  0  0  0  0  0  9 39  0  0  0]
 [ 0  0  0  0  0  0  0  0  0 73  0  0]
 [ 0  0  0  0  0  0  0  0  0  0 48  0]
 [ 0  0  1  0  1  0  0  0  0  0  0 73]]

In [10]:
#LSTM Neural Network

'''data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(LSTM(64, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(Dropout(0.7))
model.add(LSTM(64, return_sequences=True))
model.add(Dropout(0.7))
model.add(LSTM(64, return_sequences=True))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=5, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)'''


Out[10]:
'data_dim = X.shape[2]\ntimesteps = X.shape[1]\nnum_classes = Yhot.shape[1]\nb_size = 32\n\nmodel = Sequential()\nmodel.add(LSTM(64, return_sequences=True, input_shape=(timesteps, data_dim)))\nmodel.add(Dropout(0.7))\nmodel.add(LSTM(64, return_sequences=True))\nmodel.add(Dropout(0.7))\nmodel.add(LSTM(64, return_sequences=True))\nmodel.add(Flatten())\nmodel.add(Dense(num_classes, activation=\'softmax\'))\n\nmodel.compile(loss=\'categorical_crossentropy\',\n              optimizer=\'adam\',\n              metrics=[\'accuracy\'])\n\nmodel.fit(X, Yhot, epochs=5, batch_size=b_size, validation_split=0.1, shuffle=True)\nresult = model.evaluate(X, Yhot)\nprint("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))\nprint("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))\n\npredict(model)'

In [11]:
'''
raw_data = pd.read_csv('data/sample_data_format.csv', skiprows=range(0, 7))
print(raw_data.shape)
cropped_data = raw_data.values.reshape(-1, 40, 9)
print(cropped_data.shape)
print(cropped_data)
pickle.dump(cropped_data, open('data/cropped_data_format.pkl', 'wb'))
np.savetxt("data/cropped_data_format_2.csv", cropped_data[2], delimiter=",")
'''


Out[11]:
'\nraw_data = pd.read_csv(\'data/sample_data_format.csv\', skiprows=range(0, 7))\nprint(raw_data.shape)\ncropped_data = raw_data.values.reshape(-1, 40, 9)\nprint(cropped_data.shape)\nprint(cropped_data)\npickle.dump(cropped_data, open(\'data/cropped_data_format.pkl\', \'wb\'))\nnp.savetxt("data/cropped_data_format_2.csv", cropped_data[2], delimiter=",")\n'

In [ ]: