In [ ]:
import tensorflow as tf
from collections import deque
import numpy as np

In [ ]:
INPUT_DIM = 1
HIDDEN_LAYER_UNITS = 25
OUTPUT_DIM = 2
LEARNING_RATE = 0.01

Session


In [ ]:
sess = tf.InteractiveSession()

Graph building

Input & label placeholders


In [ ]:
input_number = tf.placeholder(tf.float32, shape=[None, INPUT_DIM], name="input_number")
label = tf.placeholder(tf.float32, shape=[None, OUTPUT_DIM], name="label")

Weights and biases


In [ ]:
W1 = tf.get_variable("W1", shape=(INPUT_DIM, HIDDEN_LAYER_UNITS), initializer=tf.contrib.layers.xavier_initializer(False))
W2 = tf.get_variable("W2", shape=(HIDDEN_LAYER_UNITS, OUTPUT_DIM), initializer=tf.contrib.layers.xavier_initializer(False))
B1 = tf.get_variable("B1", shape=(HIDDEN_LAYER_UNITS,), initializer=tf.contrib.layers.xavier_initializer(False))
B2 = tf.get_variable("B2", shape=(OUTPUT_DIM,), initializer=tf.contrib.layers.xavier_initializer(False))

Neural network operations


In [ ]:
h1 = tf.add(tf.matmul(input_number, W1), B1)
activation_hidden = tf.nn.relu(h1)
output = tf.add(tf.matmul(activation_hidden, W2), B2)

In [ ]:
output_softmax = tf.nn.softmax(output)

Loss calculation (Cross-entropy)


In [ ]:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, label))
loss_function = tf.summary.scalar("loss_func", loss)

RMSPropOptimizer


In [ ]:
train_opt = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(loss)

Test data generation


In [ ]:
inp_prev = np.random.rand(100000)
step1 = np.round(inp_prev)
step2 = np.ones_like(step1) - step1

In [ ]:
inp = inp_prev.reshape(100000,1)
final_label = np.vstack((step2, step1)).T

Training

Initialization


In [ ]:
tf.global_variables_initializer().run()

FileWriter for TensorBoard


In [ ]:
summary = tf.summary.FileWriter("c:\\Work\\Coding\\Tensorflow_log", sess.graph)
merged = tf.summary.merge_all() # Merge all summary operations (In this case we only have loss_func)

Training steps


In [ ]:
for i in range(0, 99900, 100):
    a, b = sess.run([train_opt, merged], feed_dict={input_number: inp[i:i+100], label: final_label[i:i+100]})
    summary.add_summary(b, i/100)  # Collect the results of summary operations

Flush out data to disk


In [ ]:
summary.flush()

Test the neural network


In [ ]:
sess.run(output_softmax, feed_dict={input_number: np.array([0.0]).reshape(1, 1)})