In [32]:
import pandas as pd #work with data as tables
import numpy as np #use number matrices
import matplotlib.pyplot as plt
import tensorflow as tf
In [33]:
# step 1
dataframe = pd.read_csv('data.csv')
In [34]:
dataframe.head()
dataframe = dataframe[0:10]
dataframe
Out[34]:
In [35]:
dataframe = dataframe.drop(['index', 'price','sq_price'], axis=1)
In [36]:
dataframe.head()
Out[36]:
In [41]:
# step 2
# 1 is good buy and 0 is bad buy
dataframe.loc[:, ('y1')] = [1, 1, 1, 0, 0, 1, 0, 1, 1,1]
dataframe.loc[:, ('y2')] = dataframe['y1'] == 0
dataframe.loc[:, ('y2')] = dataframe['y2'].astype(int)
dataframe
Out[41]:
In [43]:
# step 3 - prepare data for tensorflow (tensors)
# tensors are a generic version of vectors and matrices
#vector is a list of list of numbers (1D tensor)
#matrix is a list of list numbers (2D Tensor)
#list of list of list of numbers (3D tensor)
#......
#convert features to input tensor
inputX = dataframe.loc[:, ['area', 'bathrooms']].as_matrix()
#convert labels to input tensors
inputY = dataframe.loc[:, ['y1', 'y2']].as_matrix()
In [57]:
#step 4 - write out our hyperparneters
learning_rate = 0.000001
training_epochs = 2000
display_step = 50
n_samples = inputY.size
Out[57]:
In [55]:
#step 5 - Create our computation graph/neural network
x = tf.placeholder(tf.float32, [None, 2])
W = tf.Variable(tf.zeros([2, 2]))
b = tf.Variable(tf.zeros([2]))
y_values = tf.add(tf.matmul(x, W), b)
y = tf.nn.softmax(y_values)
y_ = tf.placeholder(tf.float32, [None,2])
In [56]:
#step 6 - perform training
# Cost function: Mean squared error
cost = tf.reduce_sum(tf.pow(y_ - y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
In [ ]: