In [1]:
import numpy as np
import tensorflow as tf
In [2]:
import csv
def load_data(filename):
x=[]
target=[]
a=[]
with open(filename) as csv_file:
data_file = csv.reader(csv_file)
for row in data_file:
a.append(row)
print a[0]
np_a=np.array(a)
x=np_a[1:,:-1].astype(np.float32) #read after first row
target=np_a[1:,-1].astype(np.float32)
return x, target
In [4]:
X, y=load_data('room/train.csv')
In [5]:
print len(X)
print len(y)
In [6]:
print X[0]
In [7]:
#*** Inspect data
n_samples=len(X)
n_features = len(X[0])
print n_samples
print n_features
In [12]:
losses = []
training_steps = 50
learning_rate = 0.0001
with tf.Session() as sess:
input = tf.constant(X.astype(np.float32))
target = tf.constant(np.transpose([y]).astype(np.float32))
weights = tf.Variable(tf.random_normal([5, 1], 0, 0.1))
tf.initialize_all_variables().run()
yhat = tf.matmul(input, weights)
yerror = tf.sub(yhat, target)
loss = tf.nn.l2_loss(yerror)
#2. Use sigmoid
#yhat2=tf.sigmoid(yhat)
#yerror = tf.sub(yhat2, target)
#loss = tf.nn.l2_loss(yerror)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(yhat, target))
update_weights = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
count=1
for _ in range(training_steps):
update_weights.run()
losses.append(loss.eval())
# if (count<10):
# print yhat.eval()
# print yerror.eval()
count +=1
print losses
# Training is done, get the final values for the graphs
betas = weights.eval()
yhat = yhat.eval()
In [ ]: