In [1]:
import pandas as pd
import numpy as np
import pickle
import os

from scipy.stats import kurtosis, skew
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, Activation, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.models import load_model
from sklearn import metrics
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, freqz


Using TensorFlow backend.

In [2]:
LABELS = [
    "HAZMEI'S SEXY POSE (NEUTRAL)", 
    "HAZMEI'S FLYING KISS (WAVE HANDS)",
    "HYUN'S MAD DRIVING SKILLS (BUS DRIVING)",
    "ENCIK'S IPPT 2.4KM (FRONT BACK)",
    "HYUN'S BALLET DANCE (SIDE STEP)",
    "HAZMEI'S BELLY BOUNCE (JUMPING)",
    "JUMPING JACK",
    "TURN CLAP",
    "SQUAT TURN CLAP",
    "WINDOW",
    "WINDOW 360",
    "MONEY (FINAL MOVE)"
] 

SAMPLING_RATE = 50
WINDOW_SIZE = 2.5
WINDOW_READINGS = int(WINDOW_SIZE * SAMPLING_RATE)
DATAPATH = 'data1/'

ORDER = 3       # Order 3
CUTOFF = 7    # desired cutoff frequency of the filter in Hz (take max/60)
FILTER_SAMPLING_RATE = 20

In [3]:
def normalize_data(data):
    data_norm = (data - data.mean()) / (data.max() - data.min())
    return np.array(data_norm)

def frequency(data):
    fourier = np.fft.fft(data)
    freqs = np.fft.fftfreq(len(data), d=1/SAMPLING_RATE)
    
def magnitude(x, y, z):
    x_sq = np.power(x, 2)
    y_sq = np.power(y, 2)
    z_sq = np.power(z, 2)

    xyz_sq = x_sq + y_sq + z_sq

    xyz_mag = np.sqrt(xyz_sq)
    return xyz_mag

def rms(x, axis=None):
    return np.sqrt(np.mean(np.power(x, 2), axis=axis))

def feature_extraction(x, y, z):
    #'''
    #mean, std
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals mean, std, mad
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    #max, min
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    #correlation
    features.extend((np.correlate(x, y)[0], np.correlate(x, z)[0], np.correlate(y, z)[0]))
    #energy
    features.extend((np.dot(x,x)/len(x), np.dot(y,y)/len(y), np.dot(z,z)/len(z)))
    #iqr
    #features.extend((np.subtract(*np.percentile(x, [75, 25])), np.subtract(*np.percentile(y, [75, 25])), np.subtract(*np.percentile(z, [75, 25]))))
    #Root Mean Square
    features.extend((rms(x), rms(y), rms(z)))
    #Skew, Kurtosis
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    #'''
    
    '''
    #Frequency Domain Features
    fourier_x = np.fft.fft(x)
    fourier_y = np.fft.fft(x)
    fourier_z = np.fft.fft(x)
    freqs = np.fft.fftfreq(WINDOW_READINGS)
    fourier_x = np.abs(fourier_x)
    fourier_y = np.abs(fourier_y)
    fourier_z = np.abs(fourier_z)
    #Mean Frequency, Skew, Kurtosis
    features.extend((np.mean(fourier_x), np.mean(fourier_y), np.mean(fourier_z)))
    features.extend((skew(fourier_x), skew(fourier_y), skew(fourier_z), kurtosis(fourier_x), kurtosis(fourier_y), kurtosis(fourier_z)))
    '''

    '''
    #Old Feature Extraction
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    '''
    
    return features

def add_noise(data):
    data_noise = data + np.random.uniform(size=len(data))
    data_noise = data_noise + np.random.laplace(loc=0.0, scale=1.0, size=len(data))
    return data_noise

def data_augmentation(X):
    X_noise = X
    for i in range(X.shape[0]):
        for j in range(X.shape[2]):
            X_noise[i][:][j] = add_noise(X_noise[i][:][j])
    return np.concatenate((X, X_noise), axis=0)

def feature_selection(X, augmentData=False):
    data = []
    for i in range(X.shape[0]):
        features = []
        for j in range(0, X.shape[2], 3):
            x = [X[i][u][j] for u in range(X.shape[1])]
            y = [X[i][u][j+1] for u in range(X.shape[1])]
            z = [X[i][u][j+2] for u in range(X.shape[1])]
            
            if augmentData:
                x_noise = add_noise(x)
                y_noise = add_noise(y)
                z_noise = add_noise(z)
                features.append(feature_extraction(x_noise, y_noise, z_noise))
            else:
                features.append(feature_extraction(x, y, z))

        data.append(features)
    return np.array(data)

def feature_engineering(X, augmentData=False):
    if augmentData:
        return np.concatenate((feature_selection(X, False), feature_selection(X, True)), axis=0)
    else:
        return feature_selection(X, False)
    
def shitHotLP(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order)
    v = butter_lowpass_filter(data, cutoff, fs, order)
    return v

def butter_lowpass(cutoff, fs, order):
    nyq = 0.5 * fs
    normal_cutoff = cutoff / nyq
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    return b, a

def butter_lowpass_filter(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order=order)
    y = lfilter(b, a, data)
    return y


def read_data(filename, label):
    raw_data = pd.read_csv(filename)
    raw_data = raw_data.iloc[100:-50, 0:9]
    raw_data = raw_data[:raw_data.shape[0]-(raw_data.shape[0]%WINDOW_READINGS)]
    #print(raw_data.shape)
    
    normalized_data = shitHotLP(raw_data, CUTOFF, FILTER_SAMPLING_RATE, ORDER)
    sampled_data = normalized_data.reshape(-1, WINDOW_READINGS, 9)
    #print(sampled_data.shape)
    return sampled_data, [label]*sampled_data.shape[0] 



def import_data(root_dirpath, test_data_size):
    X = np.zeros([1,WINDOW_READINGS,9])
    Xt = np.zeros([1,WINDOW_READINGS,9])
    y = []
    yt = []
    
    sub_directories = next(os.walk(root_dirpath))[1]    
    for sub_dir in sub_directories:
        files = next(os.walk(root_dirpath + sub_dir))[2]
        #print(files)
        count = 0;
        for file in files:
            if not file:
                continue
            temp_x, temp_y = read_data(root_dirpath + sub_dir + '/' + file, int(sub_dir))
            if (count < test_data_size):
                print(file)
                Xt = np.concatenate((Xt, temp_x), axis=0)
                yt = yt + temp_y
            else:
                X = np.concatenate((X, temp_x), axis=0)
                y = y + temp_y
            count = count + 1
            
    y = np.array(y)
    yt = np.array(yt)
    return X[1:], y, Xt[1:], yt

In [4]:
#Prediction

def predict(model):
    Y_output = model.predict(Xt)
    Y_pred = np.argmax(Y_output, axis=1)
    print(np.array(Y_pred))
    print("")
    print("Accuracy Rate:")
    print(metrics.accuracy_score(Yt, Y_pred))
    print("")
    print("Confusion Matrix:")
    confusion_matrix = metrics.confusion_matrix(Yt, Y_pred)
    print(confusion_matrix)
    normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
    
    '''
    print("Precision: {}%".format(100*metrics.precision_score(Yt, Y_pred, average="weighted")))
    print("Recall: {}%".format(100*metrics.recall_score(Yt, Y_pred, average="weighted")))
    print("f1_score: {}%".format(100*metrics.f1_score(Yt, Y_pred, average="weighted")))
    print("")
    print("Confusion matrix (normalised to % of total test data):")
    print(normalised_confusion_matrix)
    '''
    
    # Plot Results: 
    plt.figure(figsize=(12, 12))
    plt.imshow(
        normalised_confusion_matrix, 
        interpolation='nearest', 
        cmap=plt.cm.rainbow
    )
    plt.title("Confusion matrix \n(normalised to % of total test data)")
    plt.colorbar()
    tick_marks = np.arange(len(LABELS))
    plt.xticks(tick_marks, LABELS, rotation=90)
    plt.yticks(tick_marks, LABELS)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()

    np.savetxt("data/accuracy.csv", [metrics.accuracy_score(Yt, Y_pred)], delimiter=",")
    np.savetxt("data/confusion_matrix.csv", metrics.confusion_matrix(Yt, Y_pred), delimiter=",")
    model.save('data/trained_nn_model.h5')

In [5]:
X, Y, Xt, Yt = import_data(DATAPATH, 1)
print(X.shape)
print(Y.shape)
print(Xt.shape)
print(Yt.shape)

#print(X)
#print(Y)
print(Yt)


hazmei_window_3.csv
santos_natural.csv
hazmei_logout.csv
hazmei_turnclap2.csv
hyun_jumpingjack.csv
Rahman_waving.csv
santos_window360_2.csv
hyun_squatturnclap_1.csv
hyun_sidestep.csv
Rahman_frontback.csv
121017_hazmei_busdriver.csv
yz_jumping.csv
(4680, 100, 9)
(4680,)
(586, 100, 9)
(586,)
[ 9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5]

In [6]:
#Plot Data
Size = Xt.shape[1];
Sample = [230, 180]
legends = ['acc1x', 'acc1y', 'acc1z', 'acc1x', 'acc2y', 'acc3z', 'gyrox', 'gyroy', 'gyroz']

for i in Sample:
    print(Yt[i])
    plt.figure(figsize=(9,9))
    plt.plot(list(range(Size)), Xt[i][:][:], label=legends )
    plt.show()

    temp = Xt[i][:][:].T
    print(temp.shape)
    #print(temp[0])
    print(np.correlate(temp[0], temp[1])[0])

    fourier = np.fft.fft(temp[8])
    freqs = np.fft.fftfreq(temp.shape[1])
    plt.plot(freqs, fourier.real, freqs, fourier.imag)
    plt.show()

    plt.plot(freqs, np.abs(fourier)**2)
    plt.show()

    idx = np.argsort(freqs)
    plt.plot(freqs[idx], fourier[idx])
    plt.show()
    print(max(freqs))
    print(np.mean(np.abs(fourier)**2))



#print(fourier)
#print(freqs)
#print(np.abs(fourier))
#print(np.mean(fourier))
#print(freqs)
#print(np.abs(freqs[0:51]))


1
(9, 100)
7124.85004708
/Users/arshanrahman/anaconda/lib/python3.6/site-packages/numpy/core/numeric.py:531: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)
0.49
224.917667986
6
(9, 100)
22302.7698074
0.49
29974.6271676

In [7]:
#Raw Data with Data Augmentation
'''
X = data_augmentation(X)
Xt = data_augmentation(Xt)
Y = np.concatenate((Y, Y), axis=0)
Yt = np.concatenate((Yt, Yt), axis=0)
'''

#Feature Selection & Data Augmentation

#X = feature_engineering(X, False)
#Xt = feature_engineering(Xt, False)
#Y = np.concatenate((Y, Y), axis=0)
#Yt = np.concatenate((Yt, Yt), axis=0)





Yhot = to_categorical(Y)

print(X.shape)
print(Xt.shape)
print(Yhot.shape)
#print([Xt[i] for i in Sample])

np.savetxt("data/test_data_format.csv", Xt[0], delimiter=",")

#X = X.reshape(X.shape[0], X.shape[1]*X.shape[2])
#Xt = Xt.reshape(Xt.shape[0], Xt.shape[1]*Xt.shape[2])


(4680, 100, 9)
(586, 100, 9)
(4680, 12)

In [8]:
#Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Dense(50, input_shape=(timesteps, data_dim), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=50, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)


Train on 4212 samples, validate on 468 samples
Epoch 1/50
4212/4212 [==============================] - 1s - loss: 4.8915 - acc: 0.4335 - val_loss: 14.9316 - val_acc: 0.0726
Epoch 2/50
4212/4212 [==============================] - 1s - loss: 3.2356 - acc: 0.6455 - val_loss: 14.9329 - val_acc: 0.0705
Epoch 3/50
4212/4212 [==============================] - 1s - loss: 2.6314 - acc: 0.7049 - val_loss: 14.8939 - val_acc: 0.0726
Epoch 4/50
4212/4212 [==============================] - 1s - loss: 2.2211 - acc: 0.7519 - val_loss: 14.8119 - val_acc: 0.0726
Epoch 5/50
4212/4212 [==============================] - 1s - loss: 2.1167 - acc: 0.7695 - val_loss: 14.7107 - val_acc: 0.0705
Epoch 6/50
4212/4212 [==============================] - 1s - loss: 2.0276 - acc: 0.8008 - val_loss: 14.5725 - val_acc: 0.0705
Epoch 7/50
4212/4212 [==============================] - 1s - loss: 1.9976 - acc: 0.8113 - val_loss: 14.4215 - val_acc: 0.0705
Epoch 8/50
4212/4212 [==============================] - 1s - loss: 1.9511 - acc: 0.8143 - val_loss: 14.3417 - val_acc: 0.0726
Epoch 9/50
4212/4212 [==============================] - 1s - loss: 1.9296 - acc: 0.8257 - val_loss: 14.1608 - val_acc: 0.0726
Epoch 10/50
4212/4212 [==============================] - 1s - loss: 1.8875 - acc: 0.8397 - val_loss: 14.2806 - val_acc: 0.0705
Epoch 11/50
4212/4212 [==============================] - 1s - loss: 1.8738 - acc: 0.8438 - val_loss: 14.2384 - val_acc: 0.0726
Epoch 12/50
4212/4212 [==============================] - 1s - loss: 1.7255 - acc: 0.8466 - val_loss: 14.2618 - val_acc: 0.0726
Epoch 13/50
4212/4212 [==============================] - 1s - loss: 1.4189 - acc: 0.8711 - val_loss: 14.5688 - val_acc: 0.0705
Epoch 14/50
4212/4212 [==============================] - 1s - loss: 1.3997 - acc: 0.8777 - val_loss: 14.6413 - val_acc: 0.0705
Epoch 15/50
4212/4212 [==============================] - 1s - loss: 1.3685 - acc: 0.8811 - val_loss: 14.5016 - val_acc: 0.0726
Epoch 16/50
4212/4212 [==============================] - 1s - loss: 1.3714 - acc: 0.8884 - val_loss: 14.5272 - val_acc: 0.0726
Epoch 17/50
4212/4212 [==============================] - 1s - loss: 1.3506 - acc: 0.8906 - val_loss: 14.7256 - val_acc: 0.0726
Epoch 18/50
4212/4212 [==============================] - 1s - loss: 1.3481 - acc: 0.8910 - val_loss: 14.7548 - val_acc: 0.0726
Epoch 19/50
4212/4212 [==============================] - 1s - loss: 1.3284 - acc: 0.8953 - val_loss: 14.8026 - val_acc: 0.0726
Epoch 20/50
4212/4212 [==============================] - 1s - loss: 1.3220 - acc: 0.8991 - val_loss: 14.7815 - val_acc: 0.0726
Epoch 21/50
4212/4212 [==============================] - 1s - loss: 1.3035 - acc: 0.9019 - val_loss: 14.8345 - val_acc: 0.0726
Epoch 22/50
4212/4212 [==============================] - 1s - loss: 1.3045 - acc: 0.9027 - val_loss: 14.8225 - val_acc: 0.0726
Epoch 23/50
4212/4212 [==============================] - 1s - loss: 1.3011 - acc: 0.8998 - val_loss: 14.8078 - val_acc: 0.0726
Epoch 24/50
4212/4212 [==============================] - 1s - loss: 1.3257 - acc: 0.8958 - val_loss: 14.8600 - val_acc: 0.0726
Epoch 25/50
4212/4212 [==============================] - 1s - loss: 1.3035 - acc: 0.9022 - val_loss: 14.8595 - val_acc: 0.0726
Epoch 26/50
4212/4212 [==============================] - 1s - loss: 1.2954 - acc: 0.9029 - val_loss: 14.8337 - val_acc: 0.0705
Epoch 27/50
4212/4212 [==============================] - 1s - loss: 1.2976 - acc: 0.9057 - val_loss: 14.8414 - val_acc: 0.0726
Epoch 28/50
4212/4212 [==============================] - 1s - loss: 1.2937 - acc: 0.9034 - val_loss: 14.8400 - val_acc: 0.0726
Epoch 29/50
4212/4212 [==============================] - 1s - loss: 1.2990 - acc: 0.9024 - val_loss: 14.8385 - val_acc: 0.0705
Epoch 30/50
4212/4212 [==============================] - 1s - loss: 1.2940 - acc: 0.9062 - val_loss: 14.7770 - val_acc: 0.0726
Epoch 31/50
4212/4212 [==============================] - 1s - loss: 1.2852 - acc: 0.9057 - val_loss: 14.8021 - val_acc: 0.0726
Epoch 32/50
4212/4212 [==============================] - 1s - loss: 1.2735 - acc: 0.9100 - val_loss: 14.8758 - val_acc: 0.0726
Epoch 33/50
4212/4212 [==============================] - 1s - loss: 1.2810 - acc: 0.9086 - val_loss: 14.8448 - val_acc: 0.0726
Epoch 34/50
4212/4212 [==============================] - 1s - loss: 1.2844 - acc: 0.9086 - val_loss: 14.8723 - val_acc: 0.0726
Epoch 35/50
4212/4212 [==============================] - 1s - loss: 1.2900 - acc: 0.9041 - val_loss: 14.8944 - val_acc: 0.0726
Epoch 36/50
4212/4212 [==============================] - 1s - loss: 1.2977 - acc: 0.9041 - val_loss: 14.8129 - val_acc: 0.0726
Epoch 37/50
4212/4212 [==============================] - 1s - loss: 1.2878 - acc: 0.9076 - val_loss: 14.7860 - val_acc: 0.0726
Epoch 38/50
4212/4212 [==============================] - 1s - loss: 1.2731 - acc: 0.9103 - val_loss: 14.8621 - val_acc: 0.0726
Epoch 39/50
4212/4212 [==============================] - 1s - loss: 1.2713 - acc: 0.9112 - val_loss: 14.8680 - val_acc: 0.0726
Epoch 40/50
4212/4212 [==============================] - 1s - loss: 1.2798 - acc: 0.9107 - val_loss: 14.8701 - val_acc: 0.0726
Epoch 41/50
4212/4212 [==============================] - 1s - loss: 1.2678 - acc: 0.9129 - val_loss: 14.8893 - val_acc: 0.0684
Epoch 42/50
4212/4212 [==============================] - 1s - loss: 1.2879 - acc: 0.9091 - val_loss: 14.8757 - val_acc: 0.0726
Epoch 43/50
4212/4212 [==============================] - 1s - loss: 1.2787 - acc: 0.9098 - val_loss: 14.8992 - val_acc: 0.0726
Epoch 44/50
4212/4212 [==============================] - 1s - loss: 1.2845 - acc: 0.9081 - val_loss: 14.8654 - val_acc: 0.0726
Epoch 45/50
4212/4212 [==============================] - 1s - loss: 1.2797 - acc: 0.9086 - val_loss: 14.8858 - val_acc: 0.0726
Epoch 46/50
4212/4212 [==============================] - 1s - loss: 1.2758 - acc: 0.9088 - val_loss: 14.8528 - val_acc: 0.0726
Epoch 47/50
4212/4212 [==============================] - 1s - loss: 1.2810 - acc: 0.9100 - val_loss: 14.8593 - val_acc: 0.0726
Epoch 48/50
4212/4212 [==============================] - 1s - loss: 1.2823 - acc: 0.9084 - val_loss: 14.9025 - val_acc: 0.0726
Epoch 49/50
4212/4212 [==============================] - 1s - loss: 1.2737 - acc: 0.9138 - val_loss: 14.8914 - val_acc: 0.0726
Epoch 50/50
4212/4212 [==============================] - 1s - loss: 1.2669 - acc: 0.9157 - val_loss: 14.8983 - val_acc: 0.0726
4576/4680 [============================>.] - ETA: 0s
loss: 2.61%

acc: 83.70%
[ 9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  0  0  0  0  0  0  0  0  0  0  0  0  0  3  3  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  2  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  3  3  3 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11  7  7
  7  7  7 11  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  4  6
  6  4  6  6  4  6  6  6  6  4  6  6  4  4  4  6  6  6  6  4  6  6  6  6  6
  6 11  4  4  6  6  4  4  6  4  6  6  6  6 11  1  1  1  1  1  1  7  1  7  1
  7  1  1  1  1  1  7  1  1  1  1  1  1  1  1  1  1  7  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  2  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  3  7  7  7  7  7  7  7  7  7  7  4  3  3  4  4  4  4  4  4  3  4  4  4
  4  4  4  4  4  4  2  4  4  4  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  3
  3  3  3  3  3  3  3  3  3  3  4  3  3  3  3  3  3  3  3  3  3  0  3  3  3
  3  4  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  0  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  6  6
  3  3  4  3  3  3  3  3  3  4  3  3  3  3  4  3  3  0  3  3  3  3  3  3  3
  3  3  3  4  6  3  3  3  3  6  6  3  3  3  4  4  3  3  3  3  4  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3]

Accuracy Rate:
0.776450511945

Confusion Matrix:
[[57  0  1  5  0  0  0  0  0  0  0  0]
 [ 0 56  1  0  0  0  0  5  0  0  0  0]
 [ 1  0 61  0  0  0  0  0  0  0  0  0]
 [ 1  0  0 59  2  0  0  0  0  0  0  0]
 [ 0  0  1  5 56  0  0  0  0  0  0  0]
 [ 1  0  0 50  7  0  5  0  0  0  0  0]
 [ 0  0  0  0 13  0 47  0  0  0  0  2]
 [ 0  0  0  0  0  0  0 29  0  0  0  1]
 [ 0  0  0  1  0  0  0 29  0  0  0  0]
 [ 0  0  0  0  0  0  0  0  0 30  0  0]
 [ 0  0  0  0  0  0  0  0  0  0 30  0]
 [ 0  0  0  0  0  0  0  0  0  0  0 30]]

In [9]:
#CNN Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Conv1D(64, 4, padding='valid', activation='relu', strides=1, input_shape=(timesteps, data_dim)))
model.add(MaxPooling1D())
model.add(Dropout(0.2))
model.add(Conv1D(64, 2, padding='valid', activation='relu', strides=1) )
model.add(Conv1D(32, 2, padding='valid', activation='relu', strides=1) )
model.add(MaxPooling1D())
model.add(Dropout(0.2))
model.add(Conv1D(16, 1, padding='valid', activation='relu', strides=1) )
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))


model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=15, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)


Train on 4212 samples, validate on 468 samples
Epoch 1/60
4212/4212 [==============================] - 2s - loss: 1.7483 - acc: 0.4186 - val_loss: 3.5632 - val_acc: 0.0705
Epoch 2/60
4212/4212 [==============================] - 2s - loss: 1.0381 - acc: 0.6686 - val_loss: 8.1548 - val_acc: 0.0726
Epoch 3/60
4212/4212 [==============================] - 2s - loss: 0.6916 - acc: 0.7562 - val_loss: 11.0164 - val_acc: 0.0705
Epoch 4/60
4212/4212 [==============================] - 2s - loss: 0.5172 - acc: 0.8132 - val_loss: 13.1918 - val_acc: 0.0726
Epoch 5/60
4212/4212 [==============================] - 2s - loss: 0.4455 - acc: 0.8466 - val_loss: 12.6315 - val_acc: 0.0705
Epoch 6/60
4212/4212 [==============================] - 2s - loss: 0.3859 - acc: 0.8635 - val_loss: 14.2266 - val_acc: 0.0726
Epoch 7/60
4212/4212 [==============================] - 2s - loss: 0.3545 - acc: 0.8775 - val_loss: 13.7950 - val_acc: 0.0726
Epoch 8/60
4212/4212 [==============================] - 2s - loss: 0.3051 - acc: 0.8965 - val_loss: 11.6992 - val_acc: 0.0726
Epoch 9/60
4212/4212 [==============================] - 2s - loss: 0.3027 - acc: 0.9029 - val_loss: 13.7240 - val_acc: 0.0726
Epoch 10/60
4212/4212 [==============================] - 2s - loss: 0.2834 - acc: 0.9046 - val_loss: 13.5908 - val_acc: 0.0726
Epoch 11/60
4212/4212 [==============================] - 2s - loss: 0.2665 - acc: 0.9131 - val_loss: 13.5560 - val_acc: 0.0705
Epoch 12/60
4212/4212 [==============================] - 2s - loss: 0.2537 - acc: 0.9190 - val_loss: 12.5764 - val_acc: 0.0726
Epoch 13/60
4212/4212 [==============================] - 2s - loss: 0.2322 - acc: 0.9269 - val_loss: 14.3244 - val_acc: 0.0726
Epoch 14/60
4212/4212 [==============================] - 2s - loss: 0.2307 - acc: 0.9252 - val_loss: 13.8742 - val_acc: 0.0726
Epoch 15/60
4212/4212 [==============================] - 2s - loss: 0.2187 - acc: 0.9347 - val_loss: 14.4479 - val_acc: 0.0726
Epoch 16/60
4212/4212 [==============================] - 2s - loss: 0.2121 - acc: 0.9349 - val_loss: 14.5856 - val_acc: 0.0726
Epoch 17/60
4212/4212 [==============================] - 2s - loss: 0.1955 - acc: 0.9378 - val_loss: 14.6223 - val_acc: 0.0705
Epoch 18/60
4212/4212 [==============================] - 2s - loss: 0.2000 - acc: 0.9359 - val_loss: 14.3483 - val_acc: 0.0726
Epoch 19/60
4212/4212 [==============================] - 2s - loss: 0.1899 - acc: 0.9392 - val_loss: 14.7428 - val_acc: 0.0726
Epoch 20/60
4212/4212 [==============================] - 2s - loss: 0.1977 - acc: 0.9376 - val_loss: 14.6147 - val_acc: 0.0726
Epoch 21/60
4212/4212 [==============================] - 2s - loss: 0.1631 - acc: 0.9471 - val_loss: 14.8217 - val_acc: 0.0726
Epoch 22/60
4212/4212 [==============================] - 2s - loss: 0.1698 - acc: 0.9471 - val_loss: 14.7910 - val_acc: 0.0726
Epoch 23/60
4212/4212 [==============================] - 2s - loss: 0.1710 - acc: 0.9466 - val_loss: 14.6967 - val_acc: 0.0726
Epoch 24/60
4212/4212 [==============================] - 2s - loss: 0.1654 - acc: 0.9456 - val_loss: 14.8134 - val_acc: 0.0726
Epoch 25/60
4212/4212 [==============================] - 2s - loss: 0.1595 - acc: 0.9478 - val_loss: 14.8008 - val_acc: 0.0726
Epoch 26/60
4212/4212 [==============================] - 2s - loss: 0.1528 - acc: 0.9504 - val_loss: 14.7537 - val_acc: 0.0726
Epoch 27/60
4212/4212 [==============================] - 1s - loss: 0.1540 - acc: 0.9544 - val_loss: 14.6282 - val_acc: 0.0726
Epoch 28/60
4212/4212 [==============================] - 2s - loss: 0.1524 - acc: 0.9518 - val_loss: 14.6351 - val_acc: 0.0726
Epoch 29/60
4212/4212 [==============================] - 1s - loss: 0.1481 - acc: 0.9506 - val_loss: 14.5798 - val_acc: 0.0726
Epoch 30/60
4212/4212 [==============================] - 1s - loss: 0.1324 - acc: 0.9575 - val_loss: 14.8397 - val_acc: 0.0726
Epoch 31/60
4212/4212 [==============================] - 2s - loss: 0.1555 - acc: 0.9506 - val_loss: 14.7442 - val_acc: 0.0726
Epoch 32/60
4212/4212 [==============================] - 1s - loss: 0.1372 - acc: 0.9577 - val_loss: 14.7496 - val_acc: 0.0726
Epoch 33/60
4212/4212 [==============================] - 1s - loss: 0.1339 - acc: 0.9594 - val_loss: 14.7512 - val_acc: 0.0726
Epoch 34/60
4212/4212 [==============================] - 1s - loss: 0.1322 - acc: 0.9634 - val_loss: 14.7643 - val_acc: 0.0726
Epoch 35/60
4212/4212 [==============================] - 1s - loss: 0.1306 - acc: 0.9620 - val_loss: 14.8643 - val_acc: 0.0726
Epoch 36/60
4212/4212 [==============================] - 1s - loss: 0.1247 - acc: 0.9604 - val_loss: 14.8371 - val_acc: 0.0726
Epoch 37/60
4212/4212 [==============================] - 1s - loss: 0.1241 - acc: 0.9606 - val_loss: 14.7599 - val_acc: 0.0726
Epoch 38/60
4212/4212 [==============================] - 1s - loss: 0.1319 - acc: 0.9587 - val_loss: 14.7693 - val_acc: 0.0726
Epoch 39/60
4212/4212 [==============================] - 2s - loss: 0.1238 - acc: 0.9620 - val_loss: 14.8960 - val_acc: 0.0726
Epoch 40/60
4212/4212 [==============================] - 1s - loss: 0.1205 - acc: 0.9620 - val_loss: 14.8650 - val_acc: 0.0726
Epoch 41/60
4212/4212 [==============================] - 1s - loss: 0.1073 - acc: 0.9670 - val_loss: 14.8266 - val_acc: 0.0726
Epoch 42/60
4212/4212 [==============================] - 1s - loss: 0.1136 - acc: 0.9637 - val_loss: 14.9171 - val_acc: 0.0726
Epoch 43/60
4212/4212 [==============================] - 2s - loss: 0.1262 - acc: 0.9618 - val_loss: 14.8673 - val_acc: 0.0662
Epoch 44/60
4212/4212 [==============================] - 2s - loss: 0.1127 - acc: 0.9677 - val_loss: 14.8590 - val_acc: 0.0726
Epoch 45/60
4212/4212 [==============================] - 2s - loss: 0.1050 - acc: 0.9694 - val_loss: 14.7969 - val_acc: 0.0726
Epoch 46/60
4212/4212 [==============================] - 1s - loss: 0.1140 - acc: 0.9634 - val_loss: 14.8990 - val_acc: 0.0726
Epoch 47/60
4212/4212 [==============================] - 1s - loss: 0.0983 - acc: 0.9715 - val_loss: 14.8270 - val_acc: 0.0726
Epoch 48/60
4212/4212 [==============================] - 1s - loss: 0.0976 - acc: 0.9698 - val_loss: 14.9391 - val_acc: 0.0726
Epoch 49/60
4212/4212 [==============================] - 1s - loss: 0.1004 - acc: 0.9689 - val_loss: 14.9295 - val_acc: 0.0726
Epoch 50/60
4212/4212 [==============================] - 1s - loss: 0.1058 - acc: 0.9675 - val_loss: 14.8244 - val_acc: 0.0726
Epoch 51/60
4212/4212 [==============================] - 1s - loss: 0.0942 - acc: 0.9706 - val_loss: 14.8455 - val_acc: 0.0726
Epoch 52/60
4212/4212 [==============================] - 1s - loss: 0.0995 - acc: 0.9710 - val_loss: 14.8883 - val_acc: 0.0726
Epoch 53/60
4212/4212 [==============================] - 1s - loss: 0.1014 - acc: 0.9665 - val_loss: 14.9078 - val_acc: 0.0726
Epoch 54/60
4212/4212 [==============================] - 1s - loss: 0.1019 - acc: 0.9679 - val_loss: 14.8301 - val_acc: 0.0726
Epoch 55/60
4212/4212 [==============================] - 1s - loss: 0.0842 - acc: 0.9744 - val_loss: 14.9049 - val_acc: 0.0726
Epoch 56/60
4212/4212 [==============================] - 1s - loss: 0.0905 - acc: 0.9729 - val_loss: 14.8834 - val_acc: 0.0726
Epoch 57/60
4212/4212 [==============================] - 1s - loss: 0.0844 - acc: 0.9727 - val_loss: 14.8953 - val_acc: 0.0726
Epoch 58/60
4212/4212 [==============================] - 2s - loss: 0.0957 - acc: 0.9729 - val_loss: 14.8890 - val_acc: 0.0705
Epoch 59/60
4212/4212 [==============================] - 1s - loss: 0.0956 - acc: 0.9717 - val_loss: 14.8673 - val_acc: 0.0726
Epoch 60/60
4212/4212 [==============================] - 1s - loss: 0.0788 - acc: 0.9774 - val_loss: 14.8863 - val_acc: 0.0726
4608/4680 [============================>.] - ETA: 0s
loss: 1.51%

acc: 90.15%
[ 9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  0  0  0  0  0  0  0  0  0  0  0  0  2  0  0  0  0  0  0  0
  0  0  0  0  0  0  2  0  0  0  0  4  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  3  0  0 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  2  1  1  1  1  1  1  2  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10  8  8  7  8  8  7  8  8  8  8  8  8  8  8  8  7  8  8
  7  8  8  7  8  8  8  8  8  8  8  8  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  1  2  4  4  4  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  3  3  3  3  3  3  3  3  4  3  4  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  2  2  0  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  7  2  2  2  2  2  2  2  2  2  4  4
  4  6  6  4  3  4  3  6  3  3  4  4  4  3  4  4  4  0  4  3  3  4  4  6  3
  3  4  4  4  4  4  4  4  4  4  4  6  4  6  4  4  4  4  4  4  4  4  4  4  4
  4  4  6  4  4  4  4  4  4  4  4]

Accuracy Rate:
0.858361774744

Confusion Matrix:
[[59  0  2  1  1  0  0  0  0  0  0  0]
 [ 0 60  2  0  0  0  0  0  0  0  0  0]
 [ 1  0 60  0  0  0  0  1  0  0  0  0]
 [ 0  0  0 59  3  0  0  0  0  0  0  0]
 [ 0  1  1  2 58  0  0  0  0  0  0  0]
 [ 1  0  0  9 46  0  7  0  0  0  0  0]
 [ 0  0  0  0  0  0 62  0  0  0  0  0]
 [ 0  0  0  0  0  0  0 30  0  0  0  0]
 [ 0  0  0  0  0  0  0  5 25  0  0  0]
 [ 0  0  0  0  0  0  0  0  0 30  0  0]
 [ 0  0  0  0  0  0  0  0  0  0 30  0]
 [ 0  0  0  0  0  0  0  0  0  0  0 30]]

In [10]:
#LSTM Neural Network

'''data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True))
#model.add(LSTM(32, return_sequences=True))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=10, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)'''


Out[10]:
'data_dim = X.shape[2]\ntimesteps = X.shape[1]\nnum_classes = Yhot.shape[1]\nb_size = 32\n\nmodel = Sequential()\nmodel.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim)))\nmodel.add(LSTM(32, return_sequences=True))\n#model.add(LSTM(32, return_sequences=True))\nmodel.add(Flatten())\nmodel.add(Dense(num_classes, activation=\'softmax\'))\n\nmodel.compile(loss=\'categorical_crossentropy\',\n              optimizer=\'adam\',\n              metrics=[\'accuracy\'])\n\nmodel.fit(X, Yhot, epochs=10, batch_size=b_size, validation_split=0.1, shuffle=True)\nresult = model.evaluate(X, Yhot)\nprint("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))\nprint("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))\n\npredict(model)'

In [11]:
'''
raw_data = pd.read_csv('data/sample_data_format.csv', skiprows=range(0, 7))
print(raw_data.shape)
cropped_data = raw_data.values.reshape(-1, 40, 9)
print(cropped_data.shape)
print(cropped_data)
pickle.dump(cropped_data, open('data/cropped_data_format.pkl', 'wb'))
np.savetxt("data/cropped_data_format_2.csv", cropped_data[2], delimiter=",")
'''


Out[11]:
'\nraw_data = pd.read_csv(\'data/sample_data_format.csv\', skiprows=range(0, 7))\nprint(raw_data.shape)\ncropped_data = raw_data.values.reshape(-1, 40, 9)\nprint(cropped_data.shape)\nprint(cropped_data)\npickle.dump(cropped_data, open(\'data/cropped_data_format.pkl\', \'wb\'))\nnp.savetxt("data/cropped_data_format_2.csv", cropped_data[2], delimiter=",")\n'

In [ ]: