In [1]:
import pandas as pd
import numpy as np
import pickle
import os

from scipy.stats import kurtosis, skew
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, Activation, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.models import load_model
from sklearn import metrics
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, freqz


Using TensorFlow backend.

In [2]:
LABELS = [
    "HAZMEI'S SEXY POSE (NEUTRAL)", 
    "HAZMEI'S FLYING KISS (WAVE HANDS)",
    "HYUN'S MAD DRIVING SKILLS (BUS DRIVING)",
    "ENCIK'S IPPT 2.4KM (FRONT BACK)",
    "HYUN'S BALLET DANCE (SIDE STEP)",
    "HAZMEI'S BELLY BOUNCE (JUMPING)",
    "JUMPING JACK",
    "TURN CLAP",
    "SQUAT TURN CLAP",
    "WINDOW",
    "WINDOW 360",
    "MONEY (FINAL MOVE)"
]

'''LABELS = [
    "HAZMEI'S SEXY POSE (NEUTRAL)", 
    "HAZMEI'S FLYING KISS (WAVE HANDS)",
    "HYUN'S MAD DRIVING SKILLS (BUS DRIVING)",
    "ENCIK'S IPPT 2.4KM (FRONT BACK)",
    "HYUN'S BALLET DANCE (SIDE STEP)",
    "HAZMEI'S BELLY BOUNCE (JUMPING)"
] '''

SAMPLING_RATE = 50
WINDOW_SIZE = 2.4
WINDOW_READINGS = int(WINDOW_SIZE * SAMPLING_RATE)
DATAPATH = 'data_all/'

ORDER = 3       # Order 3
CUTOFF = 7    # desired cutoff frequency of the filter in Hz (take max/60)
FILTER_SAMPLING_RATE = 20

In [18]:
def normalize_data(data):
    data_norm = (data - data.mean()) / (data.max() - data.min())
    return np.array(data_norm)

def frequency(data):
    fourier = np.fft.fft(data)
    freqs = np.fft.fftfreq(len(data), d=1/SAMPLING_RATE)
    
def magnitude(x, y, z):
    x_sq = np.power(x, 2)
    y_sq = np.power(y, 2)
    z_sq = np.power(z, 2)

    xyz_sq = x_sq + y_sq + z_sq

    xyz_mag = np.sqrt(xyz_sq)
    return xyz_mag

def rms(x, axis=None):
    return np.sqrt(np.mean(np.power(x, 2), axis=axis))

def feature_extraction(x, y, z):
    #'''
    #mean, std
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals mean, std, mad
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    #max, min
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    #correlation
    features.extend((np.correlate(x, y)[0], np.correlate(x, z)[0], np.correlate(y, z)[0]))
    #energy
    features.extend((np.dot(x,x)/len(x), np.dot(y,y)/len(y), np.dot(z,z)/len(z)))
    #iqr
    #features.extend((np.subtract(*np.percentile(x, [75, 25])), np.subtract(*np.percentile(y, [75, 25])), np.subtract(*np.percentile(z, [75, 25]))))
    #Root Mean Square
    features.extend((rms(x), rms(y), rms(z)))
    #Skew, Kurtosis
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    #'''
    
    '''
    #Frequency Domain Features
    fourier_x = np.fft.fft(x)
    fourier_y = np.fft.fft(x)
    fourier_z = np.fft.fft(x)
    freqs = np.fft.fftfreq(WINDOW_READINGS)
    fourier_x = np.abs(fourier_x)
    fourier_y = np.abs(fourier_y)
    fourier_z = np.abs(fourier_z)
    #Mean Frequency, Skew, Kurtosis
    features.extend((np.mean(fourier_x), np.mean(fourier_y), np.mean(fourier_z)))
    features.extend((skew(fourier_x), skew(fourier_y), skew(fourier_z), kurtosis(fourier_x), kurtosis(fourier_y), kurtosis(fourier_z)))
    '''

    '''
    #Old Feature Extraction
    features = [np.mean(x), np.mean(y), np.mean(z), np.std(x), np.std(y), np.std(z)]
    #Median Absolute Deviation
    features.extend((np.mean(abs(x - features[0])), np.mean(abs(y - features[1])), np.mean(abs(z - features[2]))))
    #Jerk Signals
    features.extend((np.mean(np.diff(x)), np.mean(np.diff(y)), np.mean(np.diff(z)), np.std(np.diff(x)), np.std(np.diff(y)), np.std(np.diff(z))))
    features.extend((np.mean(abs(np.diff(x) - features[9])), np.mean(abs(np.diff(y) - features[10])), np.mean(abs(np.diff(y) - features[11]))))
    features.extend((skew(x), skew(y), skew(z), kurtosis(x), kurtosis(y), kurtosis(z)))
    features.extend((max(x), max(y), max(z), min(x), min(y), min(z)))
    '''
    
    return features

def add_noise(data):
    data_noise = data + np.random.uniform(size=len(data))
    data_noise = data_noise + np.random.laplace(loc=0.0, scale=1.0, size=len(data))
    return data_noise

def data_augmentation(X):
    X_noise = X
    for i in range(X.shape[0]):
        for j in range(X.shape[2]):
            X_noise[i][:][j] = add_noise(X_noise[i][:][j])
    return np.concatenate((X, X_noise), axis=0)

def feature_selection(X, augmentData=False):
    data = []
    for i in range(X.shape[0]):
        features = []
        for j in range(0, X.shape[2], 3):
            x = [X[i][u][j] for u in range(X.shape[1])]
            y = [X[i][u][j+1] for u in range(X.shape[1])]
            z = [X[i][u][j+2] for u in range(X.shape[1])]
            
            if augmentData:
                x_noise = add_noise(x)
                y_noise = add_noise(y)
                z_noise = add_noise(z)
                features.append(feature_extraction(x_noise, y_noise, z_noise))
            else:
                features.append(feature_extraction(x, y, z))

        data.append(features)
    return np.array(data)

def feature_engineering(X, augmentData=False):
    if augmentData:
        return np.concatenate((feature_selection(X, False), feature_selection(X, True)), axis=0)
    else:
        return feature_selection(X, False)
    
def shitHotLP(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order)
    v = butter_lowpass_filter(data, cutoff, fs, order)
    return v

def butter_lowpass(cutoff, fs, order):
    nyq = 0.5 * fs
    normal_cutoff = cutoff / nyq
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    return b, a

def butter_lowpass_filter(data, cutoff, fs, order):
    b, a = butter_lowpass(cutoff, fs, order=order)
    y = lfilter(b, a, data)
    return y


def read_data(filename, label):
    raw_data = pd.read_csv(filename)
    raw_data = raw_data.iloc[100:-50, 0:9]
    raw_data = raw_data[:raw_data.shape[0]-(raw_data.shape[0]%WINDOW_READINGS)]
    #print(raw_data.shape)
    #print(filename, ': Minimum: ', np.min(raw_data))
    #print(filename, ': Maximum: ', np.max(raw_data))

    
    normalized_data = shitHotLP(raw_data, CUTOFF, FILTER_SAMPLING_RATE, ORDER)
    sampled_data = normalized_data.reshape(-1, WINDOW_READINGS, 9)
    #print(sampled_data.shape)
    return sampled_data, [label]*sampled_data.shape[0] 



def import_data(root_dirpath, test_data_size, test_data_filename):
    X = np.zeros([1,WINDOW_READINGS,9])
    Xt = np.zeros([1,WINDOW_READINGS,9])
    y = []
    yt = []
    
    sub_directories = next(os.walk(root_dirpath))[1] 
    sub_directories = list(map(int, sub_directories))
    sub_directories.sort()
    #print(sub_directories)
    for sub_dir in sub_directories:
        files = next(os.walk(root_dirpath + str(sub_dir)))[2]
        #print(sub_dir, " ", files)
        count = 0
        samples = 0
        for file in files:
            if not file or file == '.DS_Store':
                continue
            #print(file)
            temp_x, temp_y = read_data(root_dirpath + str(sub_dir) + '/' + file, sub_dir)
            if count < test_data_size and test_data_filename in file:
            #if test_data_filename in file:
                print(file, ' size: ', len(temp_y))
                Xt = np.concatenate((Xt, temp_x), axis=0)
                yt = yt + temp_y
            else:
                X = np.concatenate((X, temp_x), axis=0)
                y = y + temp_y
            count = count + 1 if len(temp_y) > 40 else count + 0.5
            samples += len(temp_y)
            
        print(LABELS[y[-1]], ': ', samples)
    y = np.array(y)
    yt = np.array(yt)
    return X[1:], y, Xt[1:], yt

In [19]:
X, Y, Xt, Yt = import_data(DATAPATH, 1, "")
print(X.shape)
print(Y.shape)
print(Xt.shape)
print(Yt.shape)

#print(X)
#print(Y)
print(Yt)


(52, 120, 9)
santos_natural.csv  size:  52
(52, 120, 9)
(52, 120, 9)
(51, 120, 9)
(54, 120, 9)
(52, 120, 9)
(62, 120, 9)
(51, 120, 9)
HAZMEI'S SEXY POSE (NEUTRAL) :  426
(52, 120, 9)
rahman_waving.csv  size:  52
(52, 120, 9)
(55, 120, 9)
(51, 120, 9)
(52, 120, 9)
(51, 120, 9)
(52, 120, 9)
(51, 120, 9)
HAZMEI'S FLYING KISS (WAVE HANDS) :  416
(51, 120, 9)
121017_hazmei_busdriver.csv  size:  51
(52, 120, 9)
(52, 120, 9)
(51, 120, 9)
(58, 120, 9)
(51, 120, 9)
(52, 120, 9)
(52, 120, 9)
HYUN'S MAD DRIVING SKILLS (BUS DRIVING) :  419
(52, 120, 9)
rahman_frontback.csv  size:  52
(51, 120, 9)
(52, 120, 9)
(52, 120, 9)
(51, 120, 9)
(52, 120, 9)
(51, 120, 9)
(52, 120, 9)
ENCIK'S IPPT 2.4KM (FRONT BACK) :  413
(52, 120, 9)
hyun_sidestep.csv  size:  52
(51, 120, 9)
(52, 120, 9)
(52, 120, 9)
(51, 120, 9)
(52, 120, 9)
(52, 120, 9)
(51, 120, 9)
HYUN'S BALLET DANCE (SIDE STEP) :  413
(52, 120, 9)
yz_jumping.csv  size:  52
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
HAZMEI'S BELLY BOUNCE (JUMPING) :  277
(51, 120, 9)
hyun_jumpingjack.csv  size:  51
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(3, 120, 9)
(25, 120, 9)
(25, 120, 9)
(22, 120, 9)
(25, 120, 9)
JUMPING JACK :  226
(25, 120, 9)
hazmei_turnclap2.csv  size:  25
(25, 120, 9)
hazmei_turnclap1.csv  size:  25
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
TURN CLAP :  225
(25, 120, 9)
hyun_squatturnclap_1.csv  size:  25
(25, 120, 9)
hazmei_squatturnclap.csv  size:  25
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(17, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
SQUAT TURN CLAP :  292
(25, 120, 9)
hazmei_window_3.csv  size:  25
(25, 120, 9)
hazmei_window_2.csv  size:  25
(51, 120, 9)
(78, 120, 9)
(25, 120, 9)
(25, 120, 9)
(78, 120, 9)
(25, 120, 9)
(25, 120, 9)
(51, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
WINDOW :  508
(25, 120, 9)
santos_window360_2.csv  size:  25
(25, 120, 9)
hazmei_window360_1.csv  size:  25
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(24, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
(25, 120, 9)
WINDOW 360 :  424
(25, 120, 9)
hazmei_logout.csv  size:  25
(25, 120, 9)
santos_final_2.csv  size:  25
(25, 120, 9)
(78, 120, 9)
(51, 120, 9)
MONEY (FINAL MOVE) :  204
(3631, 120, 9)
(3631,)
(612, 120, 9)
(612,)
[ 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11]

In [21]:
#Raw Data with data augmentation
'''
X = data_augmentation(X)
Xt = data_augmentation(Xt)
Y = np.concatenate((Y, Y), axis=0)
Yt = np.concatenate((Yt, Yt), axis=0)
'''

#Feature Selection with no data augmentation

#X = feature_engineering(X, False)
#Xt = feature_engineering(Xt, False)
#Y = np.concatenate((Y, Y), axis=0)
#Yt = np.concatenate((Yt, Yt), axis=0)




Yhot = to_categorical(Y)

print(X.shape)
print(Xt.shape)
print(Yhot.shape)

np.savetxt(DATAPATH+"yhot.csv", Yhot, delimiter=",")

#X = X.reshape(X.shape[0], X.shape[1]*X.shape[2])
#Xt = Xt.reshape(Xt.shape[0], Xt.shape[1]*Xt.shape[2])


[ 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2
  2  2  2  2  2  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  5  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11]
(3631, 120, 9)
(612, 120, 9)
(3631, 12)
[ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.]

In [6]:
#Plot Data
Size = Xt.shape[1];
Sample = [35, 40]
legends = ['acc1x', 'acc1y', 'acc1z', 'acc1x', 'acc2y', 'acc3z', 'gyrox', 'gyroy', 'gyroz']

for i in Sample:
    print(Yt[i])
    plt.figure(figsize=(9,9))
    plt.plot(list(range(Size)), Xt[i][:][:], label=legends )
    plt.show()

    temp = Xt[i][:][:].T
    print(temp.shape)
    #print(temp[0])
    print(np.correlate(temp[0], temp[1])[0])

    fourier = np.fft.fft(temp[8])
    freqs = np.fft.fftfreq(temp.shape[1])
    plt.plot(freqs, fourier.real, freqs, fourier.imag)
    plt.show()

    plt.plot(freqs, np.abs(fourier)**2)
    plt.show()

    idx = np.argsort(freqs)
    plt.plot(freqs[idx], fourier[idx])
    plt.show()
    print(max(freqs))
    print(np.mean(np.abs(fourier)**2))



#print(fourier)
#print(freqs)
#print(np.abs(fourier))
#print(np.mean(fourier))
#print(freqs)
#print(np.abs(freqs[0:51]))


0
(9, 120)
1.24322557523
/Users/arshanrahman/anaconda/lib/python3.6/site-packages/numpy/core/numeric.py:531: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)
0.491666666667
341.489858596
0
(9, 120)
0.0
0.491666666667
1090.4608953

In [7]:
#Prediction

def predict(model):
    Y_output = model.predict(Xt)
    Y_pred = np.argmax(Y_output, axis=1)
    print(np.array(Y_pred))
    print("")
    print("Accuracy Rate:")
    print(metrics.accuracy_score(Yt, Y_pred))
    print("")
    print("Confusion Matrix:")
    confusion_matrix = metrics.confusion_matrix(Yt, Y_pred)
    print(confusion_matrix)
    normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
    
    '''
    print("Precision: {}%".format(100*metrics.precision_score(Yt, Y_pred, average="weighted")))
    print("Recall: {}%".format(100*metrics.recall_score(Yt, Y_pred, average="weighted")))
    print("f1_score: {}%".format(100*metrics.f1_score(Yt, Y_pred, average="weighted")))
    print("")
    print("Confusion matrix (normalised to % of total test data):")
    print(normalised_confusion_matrix)
    '''
    
    # Plot Results: 
    plt.figure(figsize=(12, 12))
    plt.imshow(
        normalised_confusion_matrix, 
        interpolation='nearest', 
        cmap=plt.cm.rainbow
    )
    plt.title("Confusion matrix \n(normalised to % of total test data)")
    plt.colorbar()
    tick_marks = np.arange(len(LABELS))
    plt.xticks(tick_marks, LABELS, rotation=90)
    plt.yticks(tick_marks, LABELS)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()

    np.savetxt(DATAPATH+"accuracy.csv", [metrics.accuracy_score(Yt, Y_pred)], delimiter=",")
    np.savetxt(DATAPATH+"confusion_matrix.csv", metrics.confusion_matrix(Yt, Y_pred), delimiter=",")
    model.save(DATAPATH+'trained_cnn_model.h5')

In [ ]:
#Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Dense(70, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(70))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=50, batch_size=b_size, validation_split=0.0, validation_data=(Xt, to_categorical(Yt)), shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)


Train on 3631 samples, validate on 612 samples
Epoch 1/50
3631/3631 [==============================] - 4s - loss: 1.2834 - acc: 0.5844 - val_loss: 0.7454 - val_acc: 0.7190
Epoch 2/50
3631/3631 [==============================] - 4s - loss: 0.6343 - acc: 0.7816 - val_loss: 0.3721 - val_acc: 0.8415
Epoch 3/50
3631/3631 [==============================] - 4s - loss: 0.4449 - acc: 0.8405 - val_loss: 0.3547 - val_acc: 0.8644
Epoch 4/50
3631/3631 [==============================] - 5s - loss: 0.3336 - acc: 0.8819 - val_loss: 0.2264 - val_acc: 0.9248
Epoch 5/50
3631/3631 [==============================] - 4s - loss: 0.2830 - acc: 0.9055 - val_loss: 0.2517 - val_acc: 0.9069
Epoch 6/50
3631/3631 [==============================] - 4s - loss: 0.2688 - acc: 0.9072 - val_loss: 0.2176 - val_acc: 0.9330
Epoch 7/50
3631/3631 [==============================] - 4s - loss: 0.2238 - acc: 0.9251 - val_loss: 0.2083 - val_acc: 0.9297
Epoch 8/50
3631/3631 [==============================] - 4s - loss: 0.2000 - acc: 0.9306 - val_loss: 0.1919 - val_acc: 0.9379
Epoch 9/50
3631/3631 [==============================] - 5s - loss: 0.1720 - acc: 0.9408 - val_loss: 0.2380 - val_acc: 0.9199
Epoch 10/50
3631/3631 [==============================] - 4s - loss: 0.1714 - acc: 0.9441 - val_loss: 0.2484 - val_acc: 0.9216
Epoch 11/50
3631/3631 [==============================] - 5s - loss: 0.1665 - acc: 0.9446 - val_loss: 0.2218 - val_acc: 0.9314
Epoch 12/50
3631/3631 [==============================] - 4s - loss: 0.1566 - acc: 0.9490 - val_loss: 0.1920 - val_acc: 0.9461
Epoch 13/50
3631/3631 [==============================] - 4s - loss: 0.1643 - acc: 0.9438 - val_loss: 0.1751 - val_acc: 0.9542
Epoch 14/50
3631/3631 [==============================] - 4s - loss: 0.1672 - acc: 0.9482 - val_loss: 0.1630 - val_acc: 0.9542
Epoch 15/50
3631/3631 [==============================] - 4s - loss: 0.1200 - acc: 0.9609 - val_loss: 0.1531 - val_acc: 0.9444
Epoch 16/50
3631/3631 [==============================] - 4s - loss: 0.1173 - acc: 0.9617 - val_loss: 0.1661 - val_acc: 0.9477
Epoch 17/50
3631/3631 [==============================] - 4s - loss: 0.1174 - acc: 0.9579 - val_loss: 0.1320 - val_acc: 0.9624
Epoch 18/50
3631/3631 [==============================] - 4s - loss: 0.1123 - acc: 0.9623 - val_loss: 0.2317 - val_acc: 0.9346
Epoch 19/50
3631/3631 [==============================] - 4s - loss: 0.1269 - acc: 0.9625 - val_loss: 0.2753 - val_acc: 0.9199
Epoch 20/50
3631/3631 [==============================] - 4s - loss: 0.1011 - acc: 0.9672 - val_loss: 0.1550 - val_acc: 0.9461
Epoch 21/50
3631/3631 [==============================] - 4s - loss: 0.1110 - acc: 0.9625 - val_loss: 0.1793 - val_acc: 0.9461
Epoch 22/50
3631/3631 [==============================] - 4s - loss: 0.1092 - acc: 0.9683 - val_loss: 0.1587 - val_acc: 0.9526
Epoch 23/50
3631/3631 [==============================] - 4s - loss: 0.0979 - acc: 0.9653 - val_loss: 0.1371 - val_acc: 0.9673
Epoch 24/50
3631/3631 [==============================] - 4s - loss: 0.0911 - acc: 0.9703 - val_loss: 0.2051 - val_acc: 0.9493
Epoch 25/50
3631/3631 [==============================] - 4s - loss: 0.1153 - acc: 0.9647 - val_loss: 0.1515 - val_acc: 0.9526
Epoch 26/50
3631/3631 [==============================] - 4s - loss: 0.0817 - acc: 0.9769 - val_loss: 0.2029 - val_acc: 0.9346
Epoch 27/50
3631/3631 [==============================] - 4s - loss: 0.0965 - acc: 0.9708 - val_loss: 0.0886 - val_acc: 0.9722
Epoch 28/50
3631/3631 [==============================] - 4s - loss: 0.0845 - acc: 0.9738 - val_loss: 0.1830 - val_acc: 0.9592
Epoch 29/50
3631/3631 [==============================] - 4s - loss: 0.0913 - acc: 0.9758 - val_loss: 0.1338 - val_acc: 0.96410.9
Epoch 30/50
3631/3631 [==============================] - 4s - loss: 0.0904 - acc: 0.9719 - val_loss: 0.1481 - val_acc: 0.9608
Epoch 31/50
3631/3631 [==============================] - 4s - loss: 0.0876 - acc: 0.9714 - val_loss: 0.0838 - val_acc: 0.9755
Epoch 32/50
3631/3631 [==============================] - 4s - loss: 0.0880 - acc: 0.9738 - val_loss: 0.0862 - val_acc: 0.9722
Epoch 33/50
3631/3631 [==============================] - 4s - loss: 0.0743 - acc: 0.9755 - val_loss: 0.1115 - val_acc: 0.9739
Epoch 34/50
3631/3631 [==============================] - 4s - loss: 0.0832 - acc: 0.9771 - val_loss: 0.1307 - val_acc: 0.9755
Epoch 35/50
3631/3631 [==============================] - 4s - loss: 0.0951 - acc: 0.9705 - val_loss: 0.1329 - val_acc: 0.9706
Epoch 36/50
3631/3631 [==============================] - 4s - loss: 0.0808 - acc: 0.9777 - val_loss: 0.1602 - val_acc: 0.9641
Epoch 37/50
3631/3631 [==============================] - 4s - loss: 0.0776 - acc: 0.9752 - val_loss: 0.2388 - val_acc: 0.9395
Epoch 38/50
3631/3631 [==============================] - 4s - loss: 0.0753 - acc: 0.9766 - val_loss: 0.2505 - val_acc: 0.9477
Epoch 39/50
 544/3631 [===>..........................] - ETA: 3s - loss: 0.0638 - acc: 0.9724

In [25]:
#CNN Neural Network

data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(Conv1D(64, 2, padding='valid', strides=2, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D())
model.add(Dropout(0.4))

model.add(Conv1D(64, 2, padding='valid', strides=2) )
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D())
model.add(Dropout(0.4))

model.add(Conv1D(32, 1, padding='valid', strides=2) )
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Activation('relu'))
model.add(GlobalAveragePooling1D())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=80, batch_size=b_size, validation_split=0.0, validation_data=(Xt, to_categorical(Yt)), shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)


Train on 3631 samples, validate on 612 samples
Epoch 1/80
3631/3631 [==============================] - 2s - loss: 1.9959 - acc: 0.3600 - val_loss: 1.4549 - val_acc: 0.5801
Epoch 2/80
3631/3631 [==============================] - 1s - loss: 1.4625 - acc: 0.5244 - val_loss: 1.0737 - val_acc: 0.6977
Epoch 3/80
3631/3631 [==============================] - 1s - loss: 1.2007 - acc: 0.6238 - val_loss: 0.8943 - val_acc: 0.7484
Epoch 4/80
3631/3631 [==============================] - 1s - loss: 1.0083 - acc: 0.6747 - val_loss: 0.7551 - val_acc: 0.7663
Epoch 5/80
3631/3631 [==============================] - 1s - loss: 0.8950 - acc: 0.6913 - val_loss: 0.6835 - val_acc: 0.7712
Epoch 6/80
3631/3631 [==============================] - 1s - loss: 0.8103 - acc: 0.7207 - val_loss: 0.5870 - val_acc: 0.8301
Epoch 7/80
3631/3631 [==============================] - 1s - loss: 0.7426 - acc: 0.7395 - val_loss: 0.5428 - val_acc: 0.8366
Epoch 8/80
3631/3631 [==============================] - 1s - loss: 0.6711 - acc: 0.7722 - val_loss: 0.4781 - val_acc: 0.8497
Epoch 9/80
3631/3631 [==============================] - 1s - loss: 0.6323 - acc: 0.7777 - val_loss: 0.4168 - val_acc: 0.8856
Epoch 10/80
3631/3631 [==============================] - 1s - loss: 0.5956 - acc: 0.7838 - val_loss: 0.3841 - val_acc: 0.8987
Epoch 11/80
3631/3631 [==============================] - 1s - loss: 0.5558 - acc: 0.7998 - val_loss: 0.3500 - val_acc: 0.8824
Epoch 12/80
3631/3631 [==============================] - 1s - loss: 0.5443 - acc: 0.7959 - val_loss: 0.3454 - val_acc: 0.8987
Epoch 13/80
3631/3631 [==============================] - 1s - loss: 0.5191 - acc: 0.8147 - val_loss: 0.3092 - val_acc: 0.9036
Epoch 14/80
3631/3631 [==============================] - 1s - loss: 0.4774 - acc: 0.8290 - val_loss: 0.2993 - val_acc: 0.9150
Epoch 15/80
3631/3631 [==============================] - 1s - loss: 0.4718 - acc: 0.8303 - val_loss: 0.2712 - val_acc: 0.9346
Epoch 16/80
3631/3631 [==============================] - 1s - loss: 0.4480 - acc: 0.8334 - val_loss: 0.2959 - val_acc: 0.9150
Epoch 17/80
3631/3631 [==============================] - 1s - loss: 0.4400 - acc: 0.8356 - val_loss: 0.2907 - val_acc: 0.9134
Epoch 18/80
3631/3631 [==============================] - 1s - loss: 0.4309 - acc: 0.8403 - val_loss: 0.2746 - val_acc: 0.9150
Epoch 19/80
3631/3631 [==============================] - 2s - loss: 0.4152 - acc: 0.8529 - val_loss: 0.2541 - val_acc: 0.9379
Epoch 20/80
3631/3631 [==============================] - 1s - loss: 0.4039 - acc: 0.8474 - val_loss: 0.2744 - val_acc: 0.9134
Epoch 21/80
3631/3631 [==============================] - 1s - loss: 0.3892 - acc: 0.8565 - val_loss: 0.2261 - val_acc: 0.9461
Epoch 22/80
3631/3631 [==============================] - 1s - loss: 0.4052 - acc: 0.8518 - val_loss: 0.2530 - val_acc: 0.9265
Epoch 23/80
3631/3631 [==============================] - 1s - loss: 0.3842 - acc: 0.8573 - val_loss: 0.2551 - val_acc: 0.9183
Epoch 24/80
3631/3631 [==============================] - 1s - loss: 0.3618 - acc: 0.8744 - val_loss: 0.2461 - val_acc: 0.9281
Epoch 25/80
3631/3631 [==============================] - 1s - loss: 0.3769 - acc: 0.8606 - val_loss: 0.2301 - val_acc: 0.9363
Epoch 26/80
3631/3631 [==============================] - 1s - loss: 0.3576 - acc: 0.8686 - val_loss: 0.2122 - val_acc: 0.9412
Epoch 27/80
3631/3631 [==============================] - 1s - loss: 0.3615 - acc: 0.8653 - val_loss: 0.2030 - val_acc: 0.9526
Epoch 28/80
3631/3631 [==============================] - 1s - loss: 0.3333 - acc: 0.8805 - val_loss: 0.2580 - val_acc: 0.9183
Epoch 29/80
3631/3631 [==============================] - 1s - loss: 0.3453 - acc: 0.8766 - val_loss: 0.2570 - val_acc: 0.9118
Epoch 30/80
3631/3631 [==============================] - 1s - loss: 0.3491 - acc: 0.8752 - val_loss: 0.2156 - val_acc: 0.9461
Epoch 31/80
3631/3631 [==============================] - 1s - loss: 0.3277 - acc: 0.8802 - val_loss: 0.2248 - val_acc: 0.9477
Epoch 32/80
3631/3631 [==============================] - 1s - loss: 0.3312 - acc: 0.8802 - val_loss: 0.2094 - val_acc: 0.9395
Epoch 33/80
3631/3631 [==============================] - 1s - loss: 0.3222 - acc: 0.8785 - val_loss: 0.2211 - val_acc: 0.9412
Epoch 34/80
3631/3631 [==============================] - 1s - loss: 0.3153 - acc: 0.8860 - val_loss: 0.1859 - val_acc: 0.9526
Epoch 35/80
3631/3631 [==============================] - 2s - loss: 0.3167 - acc: 0.8854 - val_loss: 0.2062 - val_acc: 0.9412
Epoch 36/80
3631/3631 [==============================] - 2s - loss: 0.3138 - acc: 0.8863 - val_loss: 0.1927 - val_acc: 0.9477
Epoch 37/80
3631/3631 [==============================] - 2s - loss: 0.2982 - acc: 0.8973 - val_loss: 0.2271 - val_acc: 0.9232
Epoch 38/80
3631/3631 [==============================] - 2s - loss: 0.3202 - acc: 0.8810 - val_loss: 0.1760 - val_acc: 0.9641
Epoch 39/80
3631/3631 [==============================] - 2s - loss: 0.3031 - acc: 0.8964 - val_loss: 0.1639 - val_acc: 0.9559
Epoch 40/80
3631/3631 [==============================] - 2s - loss: 0.3063 - acc: 0.8942 - val_loss: 0.1740 - val_acc: 0.9592
Epoch 41/80
3631/3631 [==============================] - 2s - loss: 0.2891 - acc: 0.8970 - val_loss: 0.1561 - val_acc: 0.9592
Epoch 42/80
3631/3631 [==============================] - 1s - loss: 0.2982 - acc: 0.8915 - val_loss: 0.1416 - val_acc: 0.9624
Epoch 43/80
3631/3631 [==============================] - 1s - loss: 0.3008 - acc: 0.8951 - val_loss: 0.2068 - val_acc: 0.9395
Epoch 44/80
3631/3631 [==============================] - 1s - loss: 0.2939 - acc: 0.8879 - val_loss: 0.1898 - val_acc: 0.9461
Epoch 45/80
3631/3631 [==============================] - 1s - loss: 0.2870 - acc: 0.8975 - val_loss: 0.1394 - val_acc: 0.9706
Epoch 46/80
3631/3631 [==============================] - 1s - loss: 0.2882 - acc: 0.8995 - val_loss: 0.1445 - val_acc: 0.9624
Epoch 47/80
3631/3631 [==============================] - 1s - loss: 0.2973 - acc: 0.8973 - val_loss: 0.1862 - val_acc: 0.9346
Epoch 48/80
3631/3631 [==============================] - 1s - loss: 0.2850 - acc: 0.8998 - val_loss: 0.1478 - val_acc: 0.9641
Epoch 49/80
3631/3631 [==============================] - 2s - loss: 0.2690 - acc: 0.9014 - val_loss: 0.1324 - val_acc: 0.9788
Epoch 50/80
3631/3631 [==============================] - 2s - loss: 0.2737 - acc: 0.9044 - val_loss: 0.1768 - val_acc: 0.9461
Epoch 51/80
3631/3631 [==============================] - 2s - loss: 0.2690 - acc: 0.9028 - val_loss: 0.1352 - val_acc: 0.9739
Epoch 52/80
3631/3631 [==============================] - 3s - loss: 0.2701 - acc: 0.9036 - val_loss: 0.1396 - val_acc: 0.9739
Epoch 53/80
3631/3631 [==============================] - 2s - loss: 0.2925 - acc: 0.8973 - val_loss: 0.1594 - val_acc: 0.9608
Epoch 54/80
3631/3631 [==============================] - 1s - loss: 0.2595 - acc: 0.9020 - val_loss: 0.1445 - val_acc: 0.9706
Epoch 55/80
3631/3631 [==============================] - 2s - loss: 0.2562 - acc: 0.9061 - val_loss: 0.1405 - val_acc: 0.9673
Epoch 56/80
3631/3631 [==============================] - 2s - loss: 0.2781 - acc: 0.9009 - val_loss: 0.1367 - val_acc: 0.9624
Epoch 57/80
3631/3631 [==============================] - 2s - loss: 0.2714 - acc: 0.8962 - val_loss: 0.1534 - val_acc: 0.9624
Epoch 58/80
3631/3631 [==============================] - 2s - loss: 0.2691 - acc: 0.9075 - val_loss: 0.1792 - val_acc: 0.9493
Epoch 59/80
3631/3631 [==============================] - 2s - loss: 0.2578 - acc: 0.9119 - val_loss: 0.1542 - val_acc: 0.9657
Epoch 60/80
3631/3631 [==============================] - 2s - loss: 0.2780 - acc: 0.9064 - val_loss: 0.1381 - val_acc: 0.9690
Epoch 61/80
3631/3631 [==============================] - 2s - loss: 0.2653 - acc: 0.8981 - val_loss: 0.1631 - val_acc: 0.9559
Epoch 62/80
3631/3631 [==============================] - 2s - loss: 0.2696 - acc: 0.9017 - val_loss: 0.1465 - val_acc: 0.9592
Epoch 63/80
3631/3631 [==============================] - 1s - loss: 0.2581 - acc: 0.9113 - val_loss: 0.1262 - val_acc: 0.9755
Epoch 64/80
3631/3631 [==============================] - 2s - loss: 0.2494 - acc: 0.9124 - val_loss: 0.1384 - val_acc: 0.9673
Epoch 65/80
3631/3631 [==============================] - 1s - loss: 0.2578 - acc: 0.9116 - val_loss: 0.1936 - val_acc: 0.9592
Epoch 66/80
3631/3631 [==============================] - 1s - loss: 0.2533 - acc: 0.9152 - val_loss: 0.1832 - val_acc: 0.9608
Epoch 67/80
3631/3631 [==============================] - 1s - loss: 0.2420 - acc: 0.9157 - val_loss: 0.1420 - val_acc: 0.9608
Epoch 68/80
3631/3631 [==============================] - 1s - loss: 0.2577 - acc: 0.9091 - val_loss: 0.1389 - val_acc: 0.9657
Epoch 69/80
3631/3631 [==============================] - 1s - loss: 0.2476 - acc: 0.9121 - val_loss: 0.1520 - val_acc: 0.9559
Epoch 70/80
3631/3631 [==============================] - 1s - loss: 0.2352 - acc: 0.9240 - val_loss: 0.1562 - val_acc: 0.9657
Epoch 71/80
3631/3631 [==============================] - 1s - loss: 0.2324 - acc: 0.9240 - val_loss: 0.1432 - val_acc: 0.9624
Epoch 72/80
3631/3631 [==============================] - 1s - loss: 0.2490 - acc: 0.9141 - val_loss: 0.1330 - val_acc: 0.9706
Epoch 73/80
3631/3631 [==============================] - 1s - loss: 0.2373 - acc: 0.9177 - val_loss: 0.1481 - val_acc: 0.9575
Epoch 74/80
3631/3631 [==============================] - 1s - loss: 0.2481 - acc: 0.9188 - val_loss: 0.1544 - val_acc: 0.9526
Epoch 75/80
3631/3631 [==============================] - 1s - loss: 0.2473 - acc: 0.9138 - val_loss: 0.1205 - val_acc: 0.9706
Epoch 76/80
3631/3631 [==============================] - 2s - loss: 0.2379 - acc: 0.9179 - val_loss: 0.1383 - val_acc: 0.9608
Epoch 77/80
3631/3631 [==============================] - 1s - loss: 0.2270 - acc: 0.9179 - val_loss: 0.1268 - val_acc: 0.9755
Epoch 78/80
3631/3631 [==============================] - 1s - loss: 0.2393 - acc: 0.9193 - val_loss: 0.1557 - val_acc: 0.9575
Epoch 79/80
3631/3631 [==============================] - 1s - loss: 0.2395 - acc: 0.9240 - val_loss: 0.1318 - val_acc: 0.9641
Epoch 80/80
3631/3631 [==============================] - 1s - loss: 0.2420 - acc: 0.9160 - val_loss: 0.1138 - val_acc: 0.9755
3456/3631 [===========================>..] - ETA: 0s
loss: 0.09%

acc: 98.07%
[ 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1
  1  1  1  1  2  2  2  2  0  2  2  2  2  2  2  2  2  2  2  2  2  3  2  2  2
  2  2  0  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  2  0  2  2  2  2
  2  2  2  2  2  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  0  3
  3  3  3  3  3  4  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
  3  3  3  3  3  3  3  4  3  4  3  4  4  4  4  4  4  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  4  3  4  4  4  4  4  4  4  4
  4  4  4  4  4  4  4  4  4  5  5  5  5  5  5  5  5  5  5  5  5  5  5  3  5
  3  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5  5
  5  5  5  5  5  5  5  5  5  5  3  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6  6
  6  6  6  6  6  6  6  6  6  6  6  6  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7  7
  7  7  7  7  7  7  7  7  7  7  7  7  8  8  8  8  8  8  8  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  8  7  8  7  8  8  8  8  8  8
  8  8  8  8  8  8  8  8  8  8  8  7  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9
  9  9  9  9  9  9  9  9  9  9  9  9 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
 10 10 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
 11 11 11 11 11 11 11 11 11 11 11 11]

Accuracy Rate:
0.975490196078

Confusion Matrix:
[[52  0  0  0  0  0  0  0  0  0  0  0]
 [ 0 52  0  0  0  0  0  0  0  0  0  0]
 [ 3  0 47  1  0  0  0  0  0  0  0  0]
 [ 1  0  0 50  1  0  0  0  0  0  0  0]
 [ 0  0  0  3 49  0  0  0  0  0  0  0]
 [ 0  0  0  3  0 49  0  0  0  0  0  0]
 [ 0  0  0  0  0  0 51  0  0  0  0  0]
 [ 0  0  0  0  0  0  0 50  0  0  0  0]
 [ 0  0  0  0  0  0  0  3 47  0  0  0]
 [ 0  0  0  0  0  0  0  0  0 50  0  0]
 [ 0  0  0  0  0  0  0  0  0  0 50  0]
 [ 0  0  0  0  0  0  0  0  0  0  0 50]]

In [10]:
#LSTM Neural Network

'''data_dim = X.shape[2]
timesteps = X.shape[1]
num_classes = Yhot.shape[1]
b_size = 32

model = Sequential()
model.add(LSTM(64, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(LSTM(64, return_sequences=True))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(LSTM(64, return_sequences=True))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, Yhot, epochs=5, batch_size=b_size, validation_split=0.1, shuffle=True)
result = model.evaluate(X, Yhot)
print("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))
print("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))

predict(model)'''


Out[10]:
'data_dim = X.shape[2]\ntimesteps = X.shape[1]\nnum_classes = Yhot.shape[1]\nb_size = 32\n\nmodel = Sequential()\nmodel.add(LSTM(64, return_sequences=True, input_shape=(timesteps, data_dim)))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Dropout(0.2))\n\nmodel.add(LSTM(64, return_sequences=True))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Dropout(0.2))\n\nmodel.add(LSTM(64, return_sequences=True))\nmodel.add(BatchNormalization())\nmodel.add(Activation(\'relu\'))\nmodel.add(Flatten())\nmodel.add(Dense(num_classes, activation=\'softmax\'))\n\nmodel.compile(loss=\'categorical_crossentropy\',\n              optimizer=\'adam\',\n              metrics=[\'accuracy\'])\n\nmodel.fit(X, Yhot, epochs=5, batch_size=b_size, validation_split=0.1, shuffle=True)\nresult = model.evaluate(X, Yhot)\nprint("\n%s: %.2f%%" % (model.metrics_names[0], result[0]))\nprint("\n%s: %.2f%%" % (model.metrics_names[1], result[1]*100))\n\npredict(model)'

In [ ]: