In [104]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

from get_pong_data import get_objects_locations


#must compute current speed of the ball to check where it is going to

In [105]:
#get_objects_locations(train_data[1][0])

In [106]:
#480 #270 #1

In [107]:
train_data_inputs = np.load(file="train_data_inputs.npy", encoding='bytes')
train_data_labels = np.load(file="train_data_labels.npy", encoding='bytes')

In [108]:
extra_data_inputs = list()
extra_data_labels = list()

for d_input, d_label in zip(train_data_inputs, train_data_labels):
    if d_label[1] == 1 or d_label[2] == 1:
        for _ in range(5):
            extra_data_inputs.append(d_input.copy())
            extra_data_labels.append(d_label.copy())
            
extra_data_inputs = np.array(extra_data_inputs)
extra_data_labels = np.array(extra_data_labels)

print(extra_data_inputs.shape)
print(extra_data_labels.shape)

enhanced_train_data_inputs = np.concatenate((train_data_inputs, extra_data_inputs), axis=0)
enhanced_train_data_labels = np.concatenate((train_data_labels, extra_data_labels), axis=0)

print(enhanced_train_data_inputs.shape)
print(enhanced_train_data_labels.shape)


(3510, 8)
(3510, 3)
(7506, 8)
(7506, 3)

Normalize data


In [111]:
data_mean = np.mean(enhanced_train_data_inputs, axis=0)
data_std = np.std(enhanced_train_data_inputs, axis=0)

norm_enhanced_train_data_inputs = (enhanced_train_data_inputs - data_mean) / data_std

In [112]:
#data_std

Split dataset into training validation and test set


In [121]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(norm_enhanced_train_data_inputs, enhanced_train_data_labels, test_size=0.4, random_state=42)
X_valid, X_test, y_valid, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42)

In [122]:
list1 = list(range(0,10))
list2 = list(range(0,10))
train_test_split(list1, list2, test_size=0.2, random_state=5)


Out[122]:
[[2, 4, 7, 1, 0, 8, 6, 3], [9, 5], [2, 4, 7, 1, 0, 8, 6, 3], [9, 5]]

In [123]:
X_train.shape


Out[123]:
(4503, 8)

Create model


In [128]:
def pong_dense_model():
    inputs = tf.placeholder(dtype=tf.float32, shape=[None, 8])
    
    y = tf.placeholder(dtype=tf.float32, shape=[None, 3])
    
    h1 = tf.layers.dense(inputs, 20, activation=tf.nn.relu)
    h2 = tf.layers.dense(h1, 20, activation=tf.nn.relu)
    h3 = tf.layers.dense(h2, 10, activation=tf.nn.relu)
    
    logits = tf.layers.dense(h3, 3, activation=None)

    sc = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)

    cost = tf.reduce_mean(sc)

    # Accuracy
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
    
    return inputs, y, logits, cost, accuracy

In [129]:
#from pong_model import pong_model

# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()


inputs, y, logits, cost, accuracy = pong_dense_model()

optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)

In [131]:
from random import shuffle
import sklearn
from sklearn.model_selection import train_test_split
import matplotlib

epochs = 10000
#atch_size = 1000

cost_list = []
acc_list = []

with tf.Session() as sess:
    # Initializing the variables
    sess.run(tf.global_variables_initializer())

    for e in range(epochs):
                
        #for b in range(0, 800, batch_size):

        cost_value, _ = sess.run([cost, optimizer], feed_dict={
            inputs: X_train,
            y: y_train
        })

        if e % 500 == 0:
            print("Epoch: {} Cost: {}".format(e, cost_value))

        cost_list.append(cost_value)

        acc_value = sess.run(accuracy, feed_dict={
            inputs: X_valid,
            y: y_valid
        })
                
        acc_list.append(acc_value)

        if e % 500 == 0:
            print("Accuracy: {}".format(acc_value))
            print("")
     
    #Calculate final accuracy
    final_acc = sess.run(accuracy, feed_dict={
            inputs: X_test,
            y: y_test
        })

             
    plt.plot(cost_list)
    plt.show()
    plt.plot(acc_list)
    plt.show()
    print("Final accuracy: {}".format(final_acc))
            
    saver = tf.train.Saver()
    save_path = saver.save(sess, "./model/model2.ckpt")


Epoch: 0 Cost: 1.1327524185180664
Accuracy: 0.3624250590801239

Epoch: 500 Cost: 0.3396291732788086
Accuracy: 0.8134576678276062

Epoch: 1000 Cost: 0.2848663032054901
Accuracy: 0.8094603419303894

Epoch: 1500 Cost: 0.26006993651390076
Accuracy: 0.8341106176376343

Epoch: 2000 Cost: 0.24427221715450287
Accuracy: 0.8341106176376343

Epoch: 2500 Cost: 0.24237866699695587
Accuracy: 0.8500999212265015

Epoch: 3000 Cost: 0.2026253044605255
Accuracy: 0.8634243607521057

Epoch: 3500 Cost: 0.19453008472919464
Accuracy: 0.8647568225860596

Epoch: 4000 Cost: 0.22615908086299896
Accuracy: 0.8500999212265015

Epoch: 4500 Cost: 0.1779916137456894
Accuracy: 0.8760825991630554

Epoch: 5000 Cost: 0.18019352853298187
Accuracy: 0.8734177350997925

Epoch: 5500 Cost: 0.16696207225322723
Accuracy: 0.8827448487281799

Epoch: 6000 Cost: 0.16450820863246918
Accuracy: 0.8840773105621338

Epoch: 6500 Cost: 0.16639916598796844
Accuracy: 0.8787475228309631

Epoch: 7000 Cost: 0.15615050494670868
Accuracy: 0.8920719623565674

Epoch: 7500 Cost: 0.15388043224811554
Accuracy: 0.8887408375740051

Epoch: 8000 Cost: 0.1507451981306076
Accuracy: 0.8894070386886597

Epoch: 8500 Cost: 0.1499628871679306
Accuracy: 0.8887408375740051

Epoch: 9000 Cost: 0.160520538687706
Accuracy: 0.8847435116767883

Epoch: 9500 Cost: 0.14401133358478546
Accuracy: 0.8967354893684387

Final accuracy: 0.9134487509727478

In [ ]: