Here is modified code you can copy/paste into the previous notebook.
In [ ]:
HIDDEN2_UNITS = 64
weights1 = weight_variable(NUM_PIXELS, HIDDEN1_UNITS, "weights1")
biases1 = bias_variable(HIDDEN1_UNITS, "biases1")
hidden1 = tf.nn.relu(tf.matmul(x, weights1) + biases1, name="hidden1")
weights2 = weight_variable(HIDDEN1_UNITS, HIDDEN2_UNITS, "weights2")
biases2 = bias_variable(HIDDEN2_UNITS, "biases2")
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2, name="hidden2")
weights3 = weight_variable(HIDDEN2_UNITS, NUM_CLASSES, "weights3")
biases3 = bias_variable(NUM_CLASSES, "biases3")
y = tf.matmul(hidden2, weights3) + biases3
with tf.name_scope("hidden_layer_1"):
# weights and baises for the first layer
weights1 = weight_variable(NUM_PIXELS, HIDDEN1_UNITS, "weights1")
biases1 = bias_variable(HIDDEN1_UNITS, "biases1")
# activations for the first hidden layer
hidden1 = tf.nn.relu(tf.matmul(x, weights1) + biases1, name="hidden1")
with tf.name_scope("hidden_layer_2"):
# weights and baises for the second layer
weights2 = weight_variable(HIDDEN1_UNITS, HIDDEN2_UNITS, "weights2")
biases2 = bias_variable(HIDDEN2_UNITS, "biases2")
# activations for the second hidden layer
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2, name="hidden2")
with tf.name_scope("output_layer"):
# weights and biases for the second layer
weights3 = weight_variable(HIDDEN2_UNITS, NUM_CLASSES, "weights3")
biases3 = bias_variable(NUM_CLASSES, "biases3")
# logits - you can think of these (roughly)
# as unnormalized probabilities, or the amount of
# evidence we have that the input image corresponds to
# each digit
y = tf.matmul(hidden2, weights3) + biases3