In [1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
In [15]:
dataframe = pd.read_csv('data.csv')
dataframe = dataframe.drop(['index', 'price','sq_price'], axis=1)
dataframe = dataframe[0:10]
print(dataframe)
In [21]:
#step 2 - add labels
dataframe.loc[:,('y1')]=[1,1,1,0,0,1,1,1,0,0]
#y2 is a negation of y1, opposite
dataframe.loc[:,('y2')] = dataframe['y1']==0
# turn t/f to 1's and 0's
dataframe.loc[:,('y2')] = dataframe['y2'].astype(int)
print(dataframe)
In [27]:
# prepare the data
# tensor is generic version of vertor and matrices
# convert features into input tensors
inputX = dataframe.loc[:,['area','bathrooms']].as_matrix()
inputY = dataframe.loc[:, ['y1','y2']].as_matrix()
print(inputX)
In [28]:
inputY
Out[28]:
In [30]:
# step 4 write out hyper params
learning_rate = 0.000001
training_epochs = 2000
display_steps = 50
n_samples = inputY.size
In [41]:
# step 5 - create computation graph
x = tf.placeholder(tf.float32, [None, 2])
# create weights
# 2x2 float matrix
W = tf.Variable(tf.zeros([2,2]))
# add biases
b = tf.Variable(tf.zeros([2]))
# multiply our weights by our inputs, first calculation
# weights are how we govern how data flows in our computation graph
y_values = tf.add(tf.matmul(x, W), b)
# softmax - activation function
y = tf.nn.softmax(y_values)
# Feed in a matrix of labels
y_ = tf.placeholder(tf.float32, [None,2])
In [43]:
cost = tf.reduce_sum(tf.pow(y_ - y, 2))/(2*n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
In [45]:
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(training_epochs):
sess.run(optimizer, feed_dict={x: inputX, y_: inputY})
if (i) % display_step == 0:
cc = sess.run(cost, feed_dict)