In [ ]:
import tensorflow as tf
# a = tf.placeholder(dtype=tf.int32, name="a")
# a = tf.constant([[1,2], [3, 4]], name = "a")
# b = tf.constant([[1],[2]], name="b")
# c = tf.matmul(a, b)
# print(a.shape, b.shape, c.shape)
# a = tf.placeholder(shape=(None, 2), dtype=tf.int32, name="a")
# b = a + a
c = tf.Variable(1, name="c")
d = tf.assign(c, c + 1)
saver = tf.train.Saver()
with tf.Session() as sess:
# print("a + b", sess.run(c, feed_dict={a: 1}))
# print("a * b", sess.run(c))
# print(sess.run(a, feed_dict={a: [[1, 2]]}))
sess.run(tf.global_variables_initializer())
print(sess.run(d))
saver.save(sess, "model/model.ckpt")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path="model/model.ckpt")
print(sess.run(d))
In [ ]:
import tensorflow as tf
LOG_DIR = "./logs"
a = tf.constant(1, name="a")
b = tf.constant(1, name="b")
c = a + b
graph = tf.get_default_graph()
with tf.summary.FileWriter(LOG_DIR) as writer:
writer.add_graph(graph)
In [ ]:
import tensorflow as tf
x = tf.Variable(100.0, name="x")
func = (x - 1) ** 2
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.1
)
train_step = optimizer.minimize(func)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(40):
sess.run(train_step)
print("x = ", sess.run(x))
In [ ]:
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data()
In [21]:
x_train_mean = x_train.mean(axis=0)
x_train_std = x_train.std(axis=0)
y_train_mean = y_train.mean()
y_train_std = y_train.std()
x_train = (x_train - x_train_mean)/ x_train_std
y_train = (y_train - y_train_mean) / y_train_std
x_test = (x_test - x_train_mean)/ x_train_std
y_test = (y_test - y_train_mean) / y_train_std
plt.plot(x_train[:, 5], y_train, "o")
Out[21]:
In [ ]:
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 10*3
plt.rcParams["figure.figsize"] = [18, 12]
# plt.rcParams["font.family"] = ["IPAexGothic"]
plt.hist(y_train, bins=20)
plt.xlabel("")
plt.ylabel("")
plt.show()
plt.plot(x_train[:, 5], y_train, "o")
plt.xlabel("")
plt.ylabel("")
In [25]:
x = tf.placeholder(tf.float32, (None, 13), name="x")
y = tf.placeholder(tf.float32, (None, 1), name="y")
w = tf.Variable(tf.random_normal((13, 1)))
pred = tf.matmul(x, w)
loss = tf.reduce_mean((y - pred) ** 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# for step in range(100):
# train_loss, _ = sess.run(
# [loss, train_step],
# feed_dict={
# x: x_train,
# y: y_train.reshape((-1, 1))
# }
# )
# print("step: {}, train_loss: {}, ".format(
# step, train_loss
# ))
# pred_ = sess.run(pred, feed_dict={x: x_test})
In [26]:
plt.plot(x_test[:, 5], pred_, "o")
Out[26]:
In [24]:
import numpy as np
def get_batches(x, y, batch_size):
n_data = len(x)
indices = np.arange(n_data)
np.random.shuffle(indices)
x_shuffled = x[indices]
y_shuffled = y[indices]
for i in range(0, n_data, batch_size):
x_batch = x_shuffled[i: i + batch_size]
y_batch = y_shuffled[i: i + batch_size]
yield x_batch, y_batch
BATCH_SIZE = 32
step = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(100):
for x_batch, y_batch in get_batches(x_train, y_train, 32):
train_loss, _ = sess.run(
[loss, train_step],
feed_dict={
x: x_batch,
y: y_batch.reshape((-1, 1))
}
)
print(step, train_loss)
step += 1
pred_ = sess.run(pred, feed_dict={x: x_test})