In [1]:
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy
import math
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)


---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-1-caaaa9765f89> in <module>()
----> 1 import tensorflow as tf
      2 import pandas as pd
      3 import numpy as np
      4 import matplotlib
      5 import matplotlib.pyplot as plt

/usr/local/lib/python3.5/site-packages/tensorflow/__init__.py in <module>()
     21 from __future__ import print_function
     22 
---> 23 from tensorflow.python import *
     24 
     25 

/usr/local/lib/python3.5/site-packages/tensorflow/python/__init__.py in <module>()
     47 _default_dlopen_flags = sys.getdlopenflags()
     48 sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)
---> 49 from tensorflow.python import pywrap_tensorflow
     50 sys.setdlopenflags(_default_dlopen_flags)
     51 

/usr/local/lib/python3.5/site-packages/tensorflow/python/pywrap_tensorflow.py in <module>()
     26                 fp.close()
     27             return _mod
---> 28     _pywrap_tensorflow = swig_import_helper()
     29     del swig_import_helper
     30 else:

/usr/local/lib/python3.5/site-packages/tensorflow/python/pywrap_tensorflow.py in swig_import_helper()
     22         if fp is not None:
     23             try:
---> 24                 _mod = imp.load_module('_pywrap_tensorflow', fp, pathname, description)
     25             finally:
     26                 fp.close()

/usr/local/Cellar/python3/3.5.0/Frameworks/Python.framework/Versions/3.5/lib/python3.5/imp.py in load_module(name, file, filename, details)
    240                 return load_dynamic(name, filename, opened_file)
    241         else:
--> 242             return load_dynamic(name, filename, file)
    243     elif type_ == PKG_DIRECTORY:
    244         return load_package(name, filename)

/usr/local/Cellar/python3/3.5.0/Frameworks/Python.framework/Versions/3.5/lib/python3.5/imp.py in load_dynamic(name, path, file)
    340         spec = importlib.machinery.ModuleSpec(
    341             name=name, loader=loader, origin=path)
--> 342         return _load(spec)
    343 
    344 else:

ImportError: dlopen(/usr/local/lib/python3.5/site-packages/tensorflow/python/_pywrap_tensorflow.so, 10): Library not loaded: @rpath/libcudart.7.5.dylib
  Referenced from: /usr/local/lib/python3.5/site-packages/tensorflow/python/_pywrap_tensorflow.so
  Reason: image not found

In [ ]:
## READ AND DEFINE DATA SET
train = pd.read_csv("./DL/notebooks/intro_to_ann.csv")
print (train.head())
features = train.ix[:,0:2]
targets = train.ix[:,2]

Xs, ys = np.array(features), np.array(targets)
ys = np.reshape(ys, (Xs.shape[0], 1))
print(Xs.shape, ys.shape)
#plt.scatter(Xs[:,0], Xs[:,1], s=40, c=ys, cmap=plt.cm.BuGn)

#filename_queue = tf.train.string_input_producer(["./DL/notebooks/intro_to_ann.csv"])
#reader = tf.TextLineReader()
#key, value = reader.read(filename_queue)

#record_defaults = [[1], [1], [1]]
#col1, col2, col3 = tf.decode_csv(value, record_defaults=record_defaults)
#features = tf.pack([col1, col2])
#target = col3

## IMPLEMENT REGRESSION

# define placeholders
x = tf.placeholder(tf.float32, [None, 2])
y_ = tf.placeholder(tf.float32, [None, 1])

# define variables
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))
#W = tf.Variable(tf.zeros([2, 1]))
n_samples = Xs.shape[0]
n_i = 2 # number of input nodes
n_h = 5 # number of hidden nodes
n_o = 1 # number of output nodes
n_h_layers = 1 # number of hidden layers
W1 = init_weights([n_i, n_h])
W2 = init_weights([n_h, n_o])
#b = init_weights([n_o])
b = init_weights([n_h])
# define model with sigmoid activation function
def model(X, w_h, w_o, bias):
    h = tf.nn.sigmoid(tf.add(tf.matmul(X, w_h), bias[0]))
    return tf.matmul(h, w_o)

## TRAINING

# implement cross-entropy
y = model(x, W1, W2, b)
#cost = -tf.reduce_sum(y_ * tf.log(tf.nn.sigmoid(y)))
#cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y, y_))

cost = tf.reduce_sum(tf.pow(tf.nn.sigmoid(y) - y_, 2) / 2)

#cost = tf.nn.l2_loss(y - y_)
#cost = tf.reduce_mean(tf.reduce_sum(0.5*(y_ - y)**2, reduction_indices=[0]))
#cost = tf.nn.l2_loss(y - y_)

# gradient descent
#train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
train_op = tf.train.MomentumOptimizer(0.01, 0.95).minimize(cost) # CURRENT OPTIMIMUM FOR 1000 ITERATIONS
#train_op = tf.train.MomentumOptimizer(0.01, 0.97).minimize(cost) # CURRENT OPTIMUM FOR 10000 ITERATIONS
#train_op = tf.train.MomentumOptimizer(0.0001, 0.999).minimize(cost) # CURRENT OPTIMUM FOR 100000 ITERATIONS
#train_op = tf.train.MomentumOptimizer(0.000001, 0.9999).minimize(cost)
predict_op = tf.nn.sigmoid(y)


# initialize session
init = tf.initialize_all_variables()

sess = tf.Session()
sess.run(init)

# print inital weights and output bias
print("initial input weights:", W1.eval(session=sess))
print("initial hidden:", W2.eval(session=sess))
print("initial output bias:", b.eval(session=sess))
#plot_decision_boundary()

# iterate 1000 times
for i in range(1000):
    _, loss_value = sess.run([train_op, cost], feed_dict={x: Xs, y_: ys})
    if i%100 == 0:
        print("\nmse:", loss_value)
    
# compute accuracy of model
#correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
#accuracy = tf.reduce_mean({tf.cast(correct_prediction, tf.float32)})
#print(correct_prediction)
# print results
#print(sess.run(accuracy, feed_dict={x: Xs, y_: ys}))

# Helper function to plot a decision boundary.
# This generates the contour plot to show the decision boundary visually
def plot_decision_boundary():
    # Set min and max values and give it some padding
    x_min, x_max = Xs[:, 0].min() - .5, Xs[:, 0].max() + .5
    y_min, y_max = Xs[:, 1].min() - .5, Xs[:, 1].max() + .5
    h = 0.01
    
    # Generate a grid of points with distance h between them
    xx, yy = np.float32(np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)))
    grid = np.array([xx.ravel(), yy.ravel()])
    grid = grid.reshape(176358, 2)
    for i in range(0, len(xx.ravel())):
        grid[i, 0] = xx.ravel()[i]
        grid[i, 1] = yy.ravel()[i]
    
    #grid = np.c_[xx.ravel(), yy.ravel()] 
    
    # Predict the function value for the whole grid
    #Z = forward_prop(grid)
    #Z = Z.eval(session=sess)
    Z = sess.run(predict_op, feed_dict={x: grid, y_: ys})
    print(Z)
    Z[Z>=0.5] = 1
    Z[Z<0.5] = 0
    print(Z)
    Z = Z.reshape(xx.shape)
    
    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(Xs[:, 0], Xs[:, 1], s=40,  c=ys, cmap=plt.cm.BuGn)

plot_decision_boundary()
# print final weights and output bias
W1 = W1.eval(session=sess)
W2 = W2.eval(session=sess)
b = b.eval(session=sess)
print("input weights:", W1)
print("hidden weights:", W2)
print("output bias:", b)
print("\n\n")