In [1]:
import tensorflow as tf
import pandas as pd
import numpy as np
import time
In [2]:
dataset = pd.read_csv("dataset.txt")
In [3]:
dataset.head()
Out[3]:
In [4]:
def minmax(data):
max_value = max(data)
min_value = min(data)
norm_value = (data-min_value)/ (max_value - min_value)
return norm_value
In [5]:
x_data = dataset.Brain_Weight
y_data = dataset.Body_Weight
x_data = np.expand_dims(x_data, -1)
y_data = np.expand_dims(y_data, -1)
In [6]:
# data normalization
x_data = minmax(x_data)
y_data = minmax(y_data)
In [7]:
batch_size = 20
print_every = 10
iteration = 100
x = tf.placeholder(tf.float32, shape=[None,1])
y = tf.placeholder(tf.float32, shape=[None,1])
In [8]:
w = tf.get_variable("weight", shape=[1,1], initializer=tf.random_normal_initializer(stddev=0.01))
b = tf.get_variable("bias", shape=[1,], initializer=tf.zeros_initializer())
hypothesis = tf.nn.bias_add(tf.matmul(x,w), b)
In [9]:
cost = tf.reduce_mean(tf.square(hypothesis-y))
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
In [10]:
saver = tf.train.Saver()
start_time = time.time()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
n_batch = int(len(x_data)/batch_size)
for iteration in range(iteration):
avg_loss = 0
for step in range(n_batch):
op = sess.run(optimizer, feed_dict = {x:x_data[step*n_batch:(step+1)*n_batch], y:y_data[step*n_batch:(step+1)*n_batch]})
loss = sess.run(cost, feed_dict={x:x_data[step*n_batch:(step+1)*n_batch], y:y_data[step*n_batch:(step+1)*n_batch]})
avg_loss += loss/n_batch
if iteration % print_every == 0:
print("cost at %d: %f"%(iteration+1,avg_loss))
finish_time = time.time() - start_time
save_path = saver.save(sess, "./model/model")
print("Label : ", x_data[:1])
print("predict : " , sess.run(hypothesis, feed_dict={x:x_data[:1]}))
print("학습 소요 시간 : ", finish_time)
In [11]:
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, 'model/model')
sess.run(hypothesis, feed_dict={x:x_data[:1]})
Out[11]: