In [1]:
# GPUs or CPU
import tensorflow as tf

# Check TensorFlow Version
print('TensorFlow Version: {}'.format(tf.__version__))

# Check for a GPU
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))


TensorFlow Version: 1.4.1
Default GPU Device: /device:GPU:0

In [11]:
# Input data
import numpy as np
from utilities import *

# test and train read
X_train_valid, Y_train_valid, list_ch_train_valid = read_data(data_path="../../datasets/har-data/", 
                                                              split="train")
X_test, Y_test, list_ch_test = read_data(data_path="../../datasets/har-data/", split="test")

assert list_ch_train_valid == list_ch_test, "Mistmatch in channels!"
assert Y_train_valid.max(axis=0) == Y_test.max(axis=0)

print(np.mean(Y_train_valid==0), np.mean(Y_train_valid==1), np.mean(Y_train_valid==2), 
      np.mean(Y_train_valid==3), np.mean(Y_train_valid==4), np.mean(Y_train_valid==5),
      np.mean(Y_train_valid==6), np.mean(Y_train_valid==7))


0.0 0.16675734494015235 0.1459466811751904 0.13411316648531013 0.1749183895538629 0.1868879216539717 0.1913764961915125 0.0

In [12]:
# Preparing input and output data
# from utilities import *

# Normalizing/standardizing the input data features
X_train_valid_norm, X_test_norm = standardize(test=X_test, train=X_train_valid)

# Onehot encoding/vectorizing the output data labels
print(np.mean((Y_train_valid).reshape(-1)==0), np.mean((Y_train_valid).reshape(-1)==1),
     np.mean((Y_train_valid).reshape(-1)==2), np.mean((Y_train_valid).reshape(-1)==3),
     np.mean((Y_train_valid).reshape(-1)==4), np.mean((Y_train_valid).reshape(-1)==5),
     np.mean((Y_train_valid).reshape(-1)==6), np.mean((Y_train_valid).reshape(-1)==7))

Y_train_valid_onehot = one_hot(labels=Y_train_valid.reshape(-1), n_class=6) 
Y_test_onehot = one_hot(labels=Y_test.reshape(-1), n_class=6) 

print(Y_train_valid_onehot.shape, Y_train_valid_onehot.dtype, 
      Y_test_onehot.shape, Y_test_onehot.dtype)


0.0 0.16675734494015235 0.1459466811751904 0.13411316648531013 0.1749183895538629 0.1868879216539717 0.1913764961915125 0.0
(7352, 6) float64 (2947, 6) float64

In [13]:
# Train and valid split
from sklearn.model_selection import train_test_split

X_train_norm, X_valid_norm, Y_train_onehot, Y_valid_onehot = train_test_split(X_train_valid_norm, 
                                                                              Y_train_valid_onehot,
                                                                              test_size=0.30)

print(X_train_norm.shape, X_valid_norm.shape, Y_train_onehot.shape, Y_valid_onehot.shape)


(5146, 128, 9) (2206, 128, 9) (5146, 6) (2206, 6)

In [14]:
## Hyperparameters
# Input data
batch_size = X_train_norm.shape[0]// 100 # minibatch size & number of minibatches
seq_len = X_train_norm.shape[1] # Number of steps: each trial length
n_channels = X_train_norm.shape[2] # number of channels in each trial
print('batch_size, seq_len, n_channels', batch_size, seq_len, n_channels)

# Output labels
n_classes = Y_train_valid.max(axis=0)
assert Y_train_valid.max(axis=0) == Y_test.max(axis=0)
print('n_classes', n_classes)

# learning parameters
learning_rate = 0.0001 #1e-4
epochs = 100 # num iterations for updating model
keep_prob = 0.50 # 90% neurons are kept and 10% are dropped out


batch_size, seq_len, n_channels 51 128 9
n_classes 6

In [15]:
# Feed the data from python/numpy to tensorflow framework
inputs_ = tf.placeholder(tf.float32, [None, seq_len, n_channels], name = 'inputs_')
labels_ = tf.placeholder(tf.float32, [None, n_classes], name = 'labels_')
keep_prob_ = tf.placeholder(tf.float32, name = 'keep_prob_')
learning_rate_ = tf.placeholder(tf.float32, name = 'learning_rate_')# Construct the LSTM inputs and LSTM cells

In [16]:
# batch_size, seq_len, n_channels: 51 128 9; n_classes: 6
# (batch, 128, 9) --> (batch, 256, 18)
# conv same
# pool same: (128-2+0)/2 + 1 = (126/2)+1 = 63 + 1=64
conv1 = tf.layers.conv1d(inputs=inputs_, filters=18, kernel_size=2, strides=1, padding='same', 
                         activation = tf.nn.relu)
max_pool_1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2, padding='same')
# max_pool_1 = tf.nn.dropout(max_pool_1, keep_prob=keep_prob_)
print('inputs_.shape, conv1.shape, max_pool_1.shape', inputs_.shape, conv1.shape, max_pool_1.shape)

# (batch, 64, 18) --> (batch, 32, 36)
# conv same
# pool same: (64-2+0)/2 + 1 = (62/2)+1 = 31 + 1=32
conv2 = tf.layers.conv1d(inputs=max_pool_1, filters=36, kernel_size=2, strides=1, padding='same', 
                         activation = tf.nn.relu)
max_pool_2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2, padding='same')
# max_pool_2 = tf.nn.dropout(max_pool_2, keep_prob=keep_prob_)
print('max_pool_1.shape, conv2.shape, max_pool_2.shape', max_pool_1.shape, conv2.shape, max_pool_2.shape)

# (batch, 32, 36) --> (batch, 16, 72)
# conv same
# pool same: (32-2+0)/2 + 1 = (30/2)+1 = 15 + 1=16
conv3 = tf.layers.conv1d(inputs=max_pool_2, filters=72, kernel_size=2, strides=1, padding='same', 
                         activation = tf.nn.relu)
max_pool_3 = tf.layers.max_pooling1d(inputs=conv3, pool_size=2, strides=2, padding='same')
# max_pool_3 = tf.nn.dropout(max_pool_3, keep_prob=keep_prob_)
print('max_pool_2.shape, conv3.shape, max_pool_3.shape', max_pool_2.shape, conv3.shape, max_pool_3.shape)

# (batch, 16, 72) --> (batch, 8, 144)
# conv same
# pool same: (16-2+0)/2 + 1 = (14/2)+1 = 7 + 1=8
conv4 = tf.layers.conv1d(inputs=max_pool_3, filters=144, kernel_size=2, strides=1, padding='same', 
                         activation = tf.nn.relu)
max_pool_4 = tf.layers.max_pooling1d(inputs=conv4, pool_size=2, strides=2, padding='same')
# max_pool_4 = tf.nn.dropout(max_pool_4, keep_prob=keep_prob_)
print('max_pool_3.shape, conv4.shape, max_pool_4.shape', max_pool_3.shape, conv4.shape, max_pool_4.shape)

# Flatten and add dropout + predicted output
flat = tf.reshape(max_pool_4, (-1, 8*144))
flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
logits = tf.layers.dense(flat, n_classes)
print('max_pool_4.shape, flat.shape, logits.shape', max_pool_4.shape, flat.shape, logits.shape)


inputs_.shape, conv1.shape, max_pool_1.shape (?, 128, 9) (?, 128, 18) (?, 64, 18)
max_pool_1.shape, conv2.shape, max_pool_2.shape (?, 64, 18) (?, 64, 36) (?, 32, 36)
max_pool_2.shape, conv3.shape, max_pool_3.shape (?, 32, 36) (?, 32, 72) (?, 16, 72)
max_pool_3.shape, conv4.shape, max_pool_4.shape (?, 16, 72) (?, 16, 144) (?, 8, 144)
max_pool_4.shape, flat.shape, logits.shape (?, 8, 144) (?, 1152) (?, 6)

In [17]:
# Backward pass: error backpropagation
# Cost function
cost_tensor = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_)
cost = tf.reduce_mean(input_tensor=cost_tensor)
print('cost_tensor, cost', cost_tensor, cost)

# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_).minimize(cost)
print('optimizer', optimizer)

# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
print('correct_pred, accuracy', correct_pred, accuracy)

# Confusion matrix
confusion_matrix = tf.confusion_matrix(predictions=tf.argmax(logits, 1),
                                       labels=tf.argmax(labels_, 1))
print('confusion_matrix', confusion_matrix)


cost_tensor, cost Tensor("Reshape_3:0", shape=(?,), dtype=float32) Tensor("Mean:0", shape=(), dtype=float32)
optimizer name: "Adam"
op: "NoOp"
input: "^Adam/update_conv1d/kernel/ApplyAdam"
input: "^Adam/update_conv1d/bias/ApplyAdam"
input: "^Adam/update_conv1d_1/kernel/ApplyAdam"
input: "^Adam/update_conv1d_1/bias/ApplyAdam"
input: "^Adam/update_conv1d_2/kernel/ApplyAdam"
input: "^Adam/update_conv1d_2/bias/ApplyAdam"
input: "^Adam/update_conv1d_3/kernel/ApplyAdam"
input: "^Adam/update_conv1d_3/bias/ApplyAdam"
input: "^Adam/update_dense/kernel/ApplyAdam"
input: "^Adam/update_dense/bias/ApplyAdam"
input: "^Adam/Assign"
input: "^Adam/Assign_1"

correct_pred, accuracy Tensor("Equal:0", shape=(?,), dtype=bool) Tensor("accuracy:0", shape=(), dtype=float32)
confusion_matrix Tensor("confusion_matrix/SparseTensorDenseAdd:0", shape=(?, ?), dtype=int32)

In [ ]:
train_acc, train_loss = [], []
valid_acc, valid_loss = [], []

# Save the training result or trained and validated model params
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
   
    # Loop over epochs
    for e in range(epochs):
        
        # Loop over batches
        for x, y in get_batches(X_train_norm, Y_train_onehot, batch_size):
            
            ######################## Training
            # Feed dictionary
            feed = {inputs_ : x, labels_ : y, keep_prob_ : keep_prob, learning_rate_ : learning_rate}
            
            # Loss
            loss, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict = feed)
            train_acc.append(acc)
            train_loss.append(loss)
            
            ################## Validation
            acc_batch = []
            loss_batch = []    
            # Loop over batches
            for x, y in get_batches(X_valid_norm, Y_valid_onehot, batch_size):

                # Feed dictionary
                feed = {inputs_ : x, labels_ : y, keep_prob_ : 1.0}

                # Loss
                loss, acc = sess.run([cost, accuracy], feed_dict = feed)
                acc_batch.append(acc)
                loss_batch.append(loss)

            # Store
            valid_acc.append(np.mean(acc_batch))
            valid_loss.append(np.mean(loss_batch))
            
        # Print info for every iter/epoch
        print("Epoch: {}/{}".format(e+1, epochs),
              "Train loss: {:6f}".format(np.mean(train_loss)),
              "Valid loss: {:.6f}".format(np.mean(valid_loss)),
              "Train acc: {:6f}".format(np.mean(train_acc)),
              "Valid acc: {:.6f}".format(np.mean(valid_acc)))
                
    saver.save(sess,".checkpoints-dcnn/har.ckpt")


Epoch: 1/100 Train loss: 1.573449 Valid loss: 1.556015 Train acc: 0.312157 Valid acc: 0.349818
Epoch: 2/100 Train loss: 1.390195 Valid loss: 1.376072 Train acc: 0.411471 Valid acc: 0.459989
Epoch: 3/100 Train loss: 1.258292 Valid loss: 1.239838 Train acc: 0.483203 Valid acc: 0.539840
Epoch: 4/100 Train loss: 1.139723 Valid loss: 1.115950 Train acc: 0.538333 Valid acc: 0.600848
Epoch: 5/100 Train loss: 1.027838 Valid loss: 1.001262 Train acc: 0.590980 Valid acc: 0.652461
Epoch: 6/100 Train loss: 0.925564 Valid loss: 0.898638 Train acc: 0.636144 Valid acc: 0.693331
Epoch: 7/100 Train loss: 0.838400 Valid loss: 0.812195 Train acc: 0.673277 Valid acc: 0.724680
Epoch: 8/100 Train loss: 0.764863 Valid loss: 0.740766 Train acc: 0.703946 Valid acc: 0.749407
Epoch: 9/100 Train loss: 0.703454 Valid loss: 0.681704 Train acc: 0.728976 Valid acc: 0.769374
Epoch: 10/100 Train loss: 0.652153 Valid loss: 0.632562 Train acc: 0.749431 Valid acc: 0.785643
Epoch: 11/100 Train loss: 0.608382 Valid loss: 0.591097 Train acc: 0.766934 Valid acc: 0.799416
Epoch: 12/100 Train loss: 0.570972 Valid loss: 0.555764 Train acc: 0.781683 Valid acc: 0.810966
Epoch: 13/100 Train loss: 0.538703 Valid loss: 0.525315 Train acc: 0.794148 Valid acc: 0.820858
Epoch: 14/100 Train loss: 0.510389 Valid loss: 0.498781 Train acc: 0.805028 Valid acc: 0.829556
Epoch: 15/100 Train loss: 0.485333 Valid loss: 0.475503 Train acc: 0.814523 Valid acc: 0.837113
Epoch: 16/100 Train loss: 0.463300 Valid loss: 0.454934 Train acc: 0.822831 Valid acc: 0.843753
Epoch: 17/100 Train loss: 0.443654 Valid loss: 0.436576 Train acc: 0.830334 Valid acc: 0.849682
Epoch: 18/100 Train loss: 0.425878 Valid loss: 0.420090 Train acc: 0.837157 Valid acc: 0.855008
Epoch: 19/100 Train loss: 0.410010 Valid loss: 0.405238 Train acc: 0.843158 Valid acc: 0.859765
Epoch: 20/100 Train loss: 0.395410 Valid loss: 0.391754 Train acc: 0.848814 Valid acc: 0.864138
Epoch: 21/100 Train loss: 0.382071 Valid loss: 0.379474 Train acc: 0.853782 Valid acc: 0.868106
Epoch: 22/100 Train loss: 0.369816 Valid loss: 0.368267 Train acc: 0.858342 Valid acc: 0.871712
Epoch: 23/100 Train loss: 0.358684 Valid loss: 0.357963 Train acc: 0.862447 Valid acc: 0.875041
Epoch: 24/100 Train loss: 0.348386 Valid loss: 0.348484 Train acc: 0.866266 Valid acc: 0.878080
Epoch: 25/100 Train loss: 0.338791 Valid loss: 0.339713 Train acc: 0.869820 Valid acc: 0.880893
Epoch: 26/100 Train loss: 0.329945 Valid loss: 0.331581 Train acc: 0.873107 Valid acc: 0.883505
Epoch: 27/100 Train loss: 0.321676 Valid loss: 0.324009 Train acc: 0.876158 Valid acc: 0.885947
Epoch: 28/100 Train loss: 0.313866 Valid loss: 0.316955 Train acc: 0.878992 Valid acc: 0.888217
Epoch: 29/100 Train loss: 0.306606 Valid loss: 0.310366 Train acc: 0.881758 Valid acc: 0.890326
Epoch: 30/100 Train loss: 0.299887 Valid loss: 0.304197 Train acc: 0.884281 Valid acc: 0.892313
Epoch: 31/100 Train loss: 0.293501 Valid loss: 0.298395 Train acc: 0.886610 Valid acc: 0.894197
Epoch: 32/100 Train loss: 0.287459 Valid loss: 0.292935 Train acc: 0.888879 Valid acc: 0.895945
Epoch: 33/100 Train loss: 0.281796 Valid loss: 0.287798 Train acc: 0.890951 Valid acc: 0.897584
Epoch: 34/100 Train loss: 0.276357 Valid loss: 0.282930 Train acc: 0.892976 Valid acc: 0.899140
Epoch: 35/100 Train loss: 0.271279 Valid loss: 0.278334 Train acc: 0.894852 Valid acc: 0.900610
Epoch: 36/100 Train loss: 0.266497 Valid loss: 0.273979 Train acc: 0.896585 Valid acc: 0.902009
Epoch: 37/100 Train loss: 0.261942 Valid loss: 0.269837 Train acc: 0.898251 Valid acc: 0.903343
Epoch: 38/100 Train loss: 0.257584 Valid loss: 0.265901 Train acc: 0.899871 Valid acc: 0.904604
Epoch: 39/100 Train loss: 0.253404 Valid loss: 0.262164 Train acc: 0.901413 Valid acc: 0.905802
Epoch: 40/100 Train loss: 0.249405 Valid loss: 0.258596 Train acc: 0.902877 Valid acc: 0.906944
Epoch: 41/100 Train loss: 0.245627 Valid loss: 0.255195 Train acc: 0.904247 Valid acc: 0.908026
Epoch: 42/100 Train loss: 0.242006 Valid loss: 0.251946 Train acc: 0.905542 Valid acc: 0.909072
Epoch: 43/100 Train loss: 0.238528 Valid loss: 0.248832 Train acc: 0.906872 Valid acc: 0.910079
Epoch: 44/100 Train loss: 0.235164 Valid loss: 0.245847 Train acc: 0.908133 Valid acc: 0.911038
Epoch: 45/100 Train loss: 0.232009 Valid loss: 0.242986 Train acc: 0.909320 Valid acc: 0.911958
Epoch: 46/100 Train loss: 0.228921 Valid loss: 0.240240 Train acc: 0.910460 Valid acc: 0.912839
Epoch: 47/100 Train loss: 0.225983 Valid loss: 0.237591 Train acc: 0.911556 Valid acc: 0.913683
Epoch: 48/100 Train loss: 0.223111 Valid loss: 0.235038 Train acc: 0.912622 Valid acc: 0.914495
Epoch: 49/100 Train loss: 0.220368 Valid loss: 0.232620 Train acc: 0.913621 Valid acc: 0.915262
Epoch: 50/100 Train loss: 0.217700 Valid loss: 0.230254 Train acc: 0.914635 Valid acc: 0.916014
Epoch: 51/100 Train loss: 0.215097 Valid loss: 0.227983 Train acc: 0.915586 Valid acc: 0.916731
Epoch: 52/100 Train loss: 0.212611 Valid loss: 0.225786 Train acc: 0.916489 Valid acc: 0.917416
Epoch: 53/100 Train loss: 0.210215 Valid loss: 0.223669 Train acc: 0.917355 Valid acc: 0.918086
Epoch: 54/100 Train loss: 0.207890 Valid loss: 0.221618 Train acc: 0.918235 Valid acc: 0.918730
Epoch: 55/100 Train loss: 0.205630 Valid loss: 0.219631 Train acc: 0.919062 Valid acc: 0.919346
Epoch: 56/100 Train loss: 0.203446 Valid loss: 0.217720 Train acc: 0.919867 Valid acc: 0.919943
Epoch: 57/100 Train loss: 0.201320 Valid loss: 0.215863 Train acc: 0.920636 Valid acc: 0.920518
Epoch: 58/100 Train loss: 0.199237 Valid loss: 0.214053 Train acc: 0.921400 Valid acc: 0.921083
Epoch: 59/100 Train loss: 0.197239 Valid loss: 0.212302 Train acc: 0.922110 Valid acc: 0.921619
Epoch: 60/100 Train loss: 0.195302 Valid loss: 0.210604 Train acc: 0.922840 Valid acc: 0.922141
Epoch: 61/100 Train loss: 0.193431 Valid loss: 0.208951 Train acc: 0.923533 Valid acc: 0.922648
Epoch: 62/100 Train loss: 0.191586 Valid loss: 0.207348 Train acc: 0.924175 Valid acc: 0.923133

In [15]:
import matplotlib.pyplot as mplot

mplot.plot(train_loss, label='har train_loss')
mplot.plot(valid_loss, label='har valid_loss')
mplot.legend()
mplot.show()



In [16]:
# import matplotlib.pyplot as mplot
mplot.plot(train_acc, label='har train_acc')
mplot.plot(valid_acc, label='har valid_acc')
mplot.legend()
mplot.show()



In [17]:
test_acc, test_loss = [], []

with tf.Session() as sess:
    # Restore the validated model
    saver.restore(sess, tf.train.latest_checkpoint('checkpoints-dcnn'))
    
    ################## Test
    acc_batch = []
    loss_batch = []    
    # Loop over batches
    for x, y in get_batches(X_test_norm, Y_test_onehot, batch_size):

        # Feed dictionary
        feed = {inputs_ : x, labels_ : y, keep_prob_ : 1.0}

        # Loss
        loss, acc = sess.run([cost, accuracy], feed_dict = feed)
        acc_batch.append(acc)
        loss_batch.append(loss)

    # Store
    test_acc.append(np.mean(acc_batch))
    test_loss.append(np.mean(loss_batch))

    # Print info for every iter/epoch
    print("Test loss: {:6f}".format(np.mean(test_loss)),
          "Test acc: {:.6f}".format(np.mean(test_acc)))


INFO:tensorflow:Restoring parameters from checkpoints-dcnn/har.ckpt
Test loss: 0.297591 Test acc: 0.925697

In [ ]: