In [1]:
import sys
import time
import numpy as np
np.random.seed(1)
In [2]:
# http://stackoverflow.com/questions/3160699/python-progress-bar
def progressbar(it, size=30):
count = len(it)
def _show(_i):
x = int(size*_i/count)
sys.stdout.write("{}/{} [{}{}] \r".format(_i, count, "="*x, "."*(size-x)))
sys.stdout.flush()
_show(0)
for i, item in enumerate(it):
yield item
_show(i+1)
sys.stdout.write("\n")
sys.stdout.flush()
In [3]:
import keras
import tensorflow as tf
In [4]:
# data
x = np.random.randn(100)
y = 0.5 * x + 1
In [5]:
# Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential()
model.add(Dense(units=1, activation="linear", input_dim=1))
model.compile(loss="mean_squared_error", optimizer="sgd")
model.summary()
model.fit(x, y, batch_size=5, validation_split=0.2, verbose=1)
model.get_weights()
Out[5]:
In [6]:
# one liner
# Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential([Dense(units=1, activation="linear", input_dim=1)])
model.compile(loss="mean_squared_error", optimizer="sgd")
model.summary()
model.fit(x, y, batch_size=5, validation_split=0.2, verbose=1)
model.get_weights()
Out[6]:
In [7]:
"""
full_size = 80
batch_size = 5
-> full gradient pass = 16 (80/5)
"""
for i in progressbar(range(80)):
if i > 16:
# just run the progessbar without any changes
pass
else:
# do something
time.sleep(0.1)
In [8]:
# TensorFlow
import tensorflow as tf
from sklearn.model_selection import train_test_split
# Parameters
learning_rate = 0.01
training_epochs = 10
split = 0.2
batch_size = 10
graph = tf.Graph()
with graph.as_default():
# Placeholder & Variables
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
# tf.random_normal(shape=[]) == np.random.randn()
W = tf.Variable(tf.random_normal(shape=[]), name="weight")
b = tf.Variable(tf.random_normal(shape=[]), name="bias")
# Construct a linear model
Y_predicted = tf.add(tf.multiply(X, W), b)
# Mean squared error
#cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
cost = tf.losses.mean_squared_error(labels=Y, predictions=Y_predicted)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./graphs", sess.graph) # python -m tensorflow.tensorboard logs="./graphs"
sess.run(init)
# validation_split
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=split)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
train_range = int(x_train.shape[0] / batch_size)
for index in progressbar(range(x_train.shape[0])):
if index > train_range:
pass
else:
mini_batch = np.random.choice(x_train.shape[0], batch_size, replace=False)
_, loss = sess.run([optimizer, cost], feed_dict={X: x_train[mini_batch], Y: y_train[mini_batch]})
val_range = int(x_val.shape[0] / batch_size)
for _ in range(x_val.shape[0]):
mini_batch = np.random.choice(x_val.shape[0], batch_size, replace=False)
_, val_loss = sess.run([optimizer, cost], feed_dict={X: x_val[mini_batch], Y: y_val[mini_batch]})
print("loss: {} - val_loss: {}".format(loss, val_loss))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: x, Y: y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
writer.close()
In [9]:
# Keras
from keras.models import load_model
# save model -> Another way is to save weights and model seperately
model.save("linear_model.h5")
# load model
model_loaded = load_model("linear_model.h5")
In [10]:
model.summary(), model_loaded.get_config(), model_loaded.get_weights()
Out[10]:
In [11]:
# TensorFlow - save model without graph
import tensorflow as tf
# Parameters
learning_rate = 0.01
training_epochs = 10
split = 0.2
graph = tf.Graph()
with graph.as_default():
# Placeholder & Variables
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
# tf.random_normal(shape=[]) == np.random.randn()
W = tf.Variable(tf.random_normal(shape=[]), name="weight")
b = tf.Variable(tf.random_normal(shape=[]), name="bias")
# Construct a linear model
Y_predicted = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.losses.mean_squared_error(labels=Y, predictions=Y_predicted)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Launch the graph
with tf.Session(graph=graph) as sess:
sess.run(init)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
# loop through the entire batch
for i, j in zip(x, y):
_, loss = sess.run([optimizer, cost], feed_dict={X: i, Y: j})
print("loss: {}".format(loss))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: x, Y: y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
save_path = saver.save(sess, "linear_model.ckpt")
print("Model saved in file: {}".format(save_path))
In [12]:
# load model without loading the graph
graph = tf.Graph() # graph needs to be loaded before
with graph.as_default():
# Placeholder & Variables
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
# tf.random_normal(shape=[]) == np.random.randn()
W = tf.Variable(tf.random_normal(shape=[]), name="weight")
b = tf.Variable(tf.random_normal(shape=[]), name="bias")
# Construct a linear model
Y_predicted = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.losses.mean_squared_error(labels=Y, predictions=Y_predicted)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
saver.restore(sess, "linear_model.ckpt")
# model weights
print(sess.run([W, b]))
In [13]:
# TensorFlow - save model with graph
import tensorflow as tf
# Parameters
learning_rate = 0.01
training_epochs = 10
split = 0.2
graph = tf.Graph()
with graph.as_default():
# Placeholder & Variables
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
# tf.random_normal(shape=[]) == np.random.randn()
W = tf.Variable(tf.random_normal(shape=[]), name="weight")
b = tf.Variable(tf.random_normal(shape=[]), name="bias")
# Construct a linear model
Y_predicted = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.losses.mean_squared_error(labels=Y, predictions=Y_predicted)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver(tf.trainable_variables())
tf.add_to_collection("vars", W)
tf.add_to_collection("vars", b)
# Launch the graph
with tf.Session(graph=graph) as sess:
sess.run(init)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
# loop through the entire batch
for i, j in zip(x, y):
_, loss = sess.run([optimizer, cost], feed_dict={X: i, Y: j})
print("loss: {}".format(loss))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: x, Y: y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
save_path = saver.save(sess, "linear_model.ckpt")
print("Model saved in file: {}".format(save_path))
In [14]:
# load model with graph
with tf.Session() as sess:
saver = tf.train.import_meta_graph("linear_model.ckpt.meta")
saver.restore(sess, tf.train.latest_checkpoint('./'))
all_vars = tf.get_collection("vars")
for v in all_vars:
print(sess.run(v))
In [15]:
# Keras
model.predict(x[:5])
Out[15]:
In [16]:
# TensorFlow - save model without graph
import tensorflow as tf
# Parameters
learning_rate = 0.01
training_epochs = 10
split = 0.2
graph = tf.Graph()
with graph.as_default():
# Placeholder & Variables
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
# tf.random_normal(shape=[]) == np.random.randn()
W = tf.Variable(tf.random_normal(shape=[]), name="weight")
b = tf.Variable(tf.random_normal(shape=[]), name="bias")
# Construct a linear model
Y_predicted = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.losses.mean_squared_error(labels=Y, predictions=Y_predicted)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session(graph=graph) as sess:
sess.run(init)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
# loop through the entire batch
for i, j in zip(x, y):
_, loss = sess.run([optimizer, cost], feed_dict={X: i, Y: j})
print("loss: {}".format(loss))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: x, Y: y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Make prediction
prediction = sess.run(Y_predicted, feed_dict={X: x[:5]})
print(prediction)
In [17]:
# data
from keras.datasets import mnist
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
num_classes = 10
# convert class vectors to binary class matrices
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
In [18]:
# Keras
batch_size = 128
epochs = 10
model = Sequential()
model.add(Dense(10, activation="softmax", input_shape=(784,)))
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"])
In [19]:
model.fit(x_train, y_train, batch_size=batch_size, validation_split=0.2, verbose=1)
Out[19]:
In [20]:
x_train_full, y_train_full = x_train[:1000], y_train[:1000]
In [21]:
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 128
split = 0.2
import tensorflow as tf
graph = tf.Graph()
with graph.as_default():
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
init = tf.global_variables_initializer()
Y_predicted = tf.add(tf.matmul(X, W), b)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_predicted))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# evaluation
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_predicted, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
y = tf.nn.softmax(tf.add(tf.matmul(X, W), b))
# Launch the graph
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./graphs", sess.graph) # python -m tensorflow.tensorboard logs="./graphs"
sess.run(init)
# validation_split
x_train, x_val, y_train, y_val = train_test_split(x_train_full, y_train_full, test_size=split)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
train_range = int(x_train.shape[0] / batch_size)
for index in progressbar(range(x_train.shape[0])):
if index > train_range:
pass
else:
mini_batch = np.random.choice(x_train.shape[0], batch_size, replace=False)
_, loss, acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_train[mini_batch], Y: y_train[mini_batch]})
val_range = int(x_val.shape[0] / batch_size)
for _ in range(x_val.shape[0]):
mini_batch = np.random.choice(x_val.shape[0], batch_size, replace=False)
_, val_loss, val_acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_val[mini_batch], Y: y_val[mini_batch]})
print("loss: {} - acc: {} - val_loss: {} - val_acc: {}".format(loss, acc, val_loss, val_acc))
classification = sess.run(y, feed_dict={X: x_train[:5]})
print(classification)
In [22]:
# Keras
model.predict(x_test[:5]), model.predict_classes(x_test[:5]), y_test[:5]
Out[22]:
In [23]:
# TensorFlow
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 128
split = 0.2
import tensorflow as tf
graph = tf.Graph()
with graph.as_default():
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
init = tf.global_variables_initializer()
Y_predicted = tf.add(tf.matmul(X, W), b)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_predicted))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# evaluation
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_predicted, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# prediction
pred = tf.nn.softmax(tf.add(tf.matmul(X, W), b))
# Launch the graph
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./graphs", sess.graph) # python -m tensorflow.tensorboard logs="./graphs"
sess.run(init)
# validation_split
x_train, x_val, y_train, y_val = train_test_split(x_train_full, y_train_full, test_size=split)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
train_range = int(x_train.shape[0] / batch_size)
for index in progressbar(range(x_train.shape[0])):
if index > train_range:
pass
else:
mini_batch = np.random.choice(x_train.shape[0], batch_size, replace=False)
_, loss, acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_train[mini_batch], Y: y_train[mini_batch]})
val_range = int(x_val.shape[0] / batch_size)
for _ in range(x_val.shape[0]):
mini_batch = np.random.choice(x_val.shape[0], batch_size, replace=False)
_, val_loss, val_acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_val[mini_batch], Y: y_val[mini_batch]})
print("loss: {} - acc: {} - val_loss: {} - val_acc: {}".format(loss, acc, val_loss, val_acc))
# Making predictions
pred_prob, pred_classes = sess.run([pred, tf.arg_max(pred, 1)], feed_dict={X: x_train[:5]})
print(pred_prob, pred_classes)
In [24]:
# data
from keras.datasets import mnist
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
In [25]:
# Keras
batch_size = 128
num_classes = 10
epochs = 10
model = Sequential()
model.add(Dense(32, activation="relu", input_shape=(784,)))
model.add(Dense(10, activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"])
In [26]:
model.fit(x_train, y_train, batch_size=batch_size, validation_split=0.2, verbose=1)
Out[26]:
In [27]:
x_train_full, y_train_full = x_train[:1000], y_train[:1000]
In [28]:
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 128
split = 0.2
import tensorflow as tf
graph = tf.Graph()
with graph.as_default():
def init_weights(shape):
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def deep_neural_network(X, w_1, w_2):
h = tf.nn.sigmoid(tf.matmul(X, w_1))
yhat = tf.matmul(h, w_2)
return yhat
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
w_1 = init_weights((784, 32))
w_2 = init_weights((32, 10))
init = tf.global_variables_initializer()
Y_predicted = deep_neural_network(X, w_1, w_2)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_predicted))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# evaluation
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_predicted, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Launch the graph
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./graphs", sess.graph) # python -m tensorflow.tensorboard logs="./graphs"
sess.run(init)
# validation_split
x_train, x_val, y_train, y_val = train_test_split(x_train_full, y_train_full, test_size=split)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
train_range = int(x_train.shape[0] / batch_size)
for index in progressbar(range(x_train.shape[0])):
if index > train_range:
pass
else:
mini_batch = np.random.choice(x_train.shape[0], batch_size, replace=False)
_, loss, acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_train[mini_batch], Y: y_train[mini_batch]})
val_range = int(x_val.shape[0] / batch_size)
for _ in range(x_val.shape[0]):
mini_batch = np.random.choice(x_val.shape[0], batch_size, replace=False)
_, val_loss, val_acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_val[mini_batch], Y: y_val[mini_batch]})
print("loss: {} - acc: {} - val_loss: {} - val_acc: {}".format(loss, acc, val_loss, val_acc))
In [29]:
# data
from keras.datasets import mnist
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape((-1, 28, 28, 1))
x_train = x_train.astype("float32")
x_train /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
In [30]:
# Keras
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
model.add(Conv2D(32, (5, 5), strides=(1, 1), activation="relu", input_shape=(28, 28, 1), padding="same")) # same -> input = output
model.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
model.add(Conv2D(64, (5, 5), strides=(1, 1), activation="relu", padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="sgd")
model.summary()
In [31]:
model.fit(x_train[:1000], y_train[:1000], batch_size=32, epochs=10, validation_split=0.2, verbose=1)
Out[31]:
In [32]:
x_train_full = x_train[:1000]
y_train_full = y_train[:1000]
In [33]:
# TensorFlow
import tensorflow as tf
learning_rate = 0.01
training_epochs = 10
batch_size = 32
graph = tf.Graph()
with graph.as_default():
X = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
Y = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(X, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024]) # need to understand how paddle and stride works to define the numbers
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
Y_predicted = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
init = tf.global_variables_initializer()
cross_entropy = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(Y_predicted), reduction_indices=[1]))
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(Y_predicted, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Launch the graph
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./graphs", sess.graph) # python -m tensorflow.tensorboard logs="./graphs"
sess.run(init)
# validation_split
x_train, x_val, y_train, y_val = train_test_split(x_train_full, y_train_full, test_size=split)
for epoch in range(training_epochs):
print("Epoch: {}/{}".format(epoch+1, training_epochs))
train_range = int(x_train.shape[0] / batch_size)
for index in progressbar(range(x_train.shape[0])):
if index > train_range:
pass
else:
mini_batch = np.random.choice(x_train.shape[0], batch_size, replace=False)
_, loss, acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_train[mini_batch], Y: y_train[mini_batch]})
val_range = int(x_val.shape[0] / batch_size)
for _ in range(x_val.shape[0]):
mini_batch = np.random.choice(x_val.shape[0], batch_size, replace=False)
_, val_loss, val_acc = sess.run([optimizer, cross_entropy, accuracy], feed_dict={X: x_val[mini_batch], Y: y_val[mini_batch]})
print("loss: {} - acc: {} - val_loss: {} - val_acc: {}".format(loss, acc, val_loss, val_acc))
In [ ]: