In [1]:
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
Step 1: read in data from the .xls file
Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
In [5]:
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
Step 3: create weight and bias, initialized to 0
In [6]:
w = tf.Variable(0.0, name='w')
b = tf.Variable(0.0, name='b')
Step 4: build model to predict Y
In [7]:
Y_predicted = w * X + b
Step 5: use the square error as the loss function
In [8]:
loss = tf.square(Y - Y_predicted)
Step 5a: implement Huber loss function from lecture and try it out
In [7]:
def huber_loss(labels, predictions, delta=1.0):
pass
In [8]:
# loss = utils.huber_loss(Y, Y_predicted)
Step 6: using gradient descent with learning rate of 0.01 to minimize loss
In [9]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
In [10]:
sess = tf.Session() # prefer with tf.Session() as sess: in your code
Step 7: initialize the necessary variables, in this case, w and b
In [11]:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)
Step 8: train the model
In [12]:
for i in range(50): # train the model 50 epochs
total_loss = 0
for x, y in data:
# Session runs train_op and fetch values of loss
_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})
total_loss += l
print('Epoch {0}: {1}'.format(i, total_loss/float(n_samples)))
# close the writer when you're done using it
writer.close()
Step 9: output the values of w and b
In [13]:
w, b = sess.run([w, b])
Step 10: plot the results
In [17]:
X, Y = data[:, 0], data[:, 1]
plt.scatter(X, Y, label="Real data")
plt.plot(X, w * X + b, label="Predicted data", color='r')
plt.show()