In [81]:
import numpy as np
from data_prep import features, targets, features_test, targets_test
In [82]:
#print features
In [83]:
#print targets
In [84]:
#print features_test
In [85]:
#print targets_test
In [86]:
# Defining the sigmoid function for activations
def sigmoid(x):
return 1 / (1 + np.exp(-x))
In [87]:
# Use to same seed to make debugging easier
np.random.seed(42)
In [88]:
#print features.shape
n_records, n_features = features.shape
#print n_records, n_features
last_loss = None
In [89]:
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
#print weights
In [90]:
# Neural Network hyperparameters
epochs = 5000
learnrate = 0.05
In [91]:
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Note: We haven't included the h variable from the previous
# lesson. You can add it if you want, or you can calculate
# the h together with the output
h = np.dot(x,weights)
# TODO: Calculate the output
output = sigmoid(h)
# TODO: Calculate the error
error = y - output
# TODO: Calculate the error term
error_term = error * output * (1 - output)
# TODO: Calculate the change in weights for this sample
# and add it to the total weight change
del_w += error_term * x
# TODO: Update weights using the learning rate and the average change in weights
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
In [92]:
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
In [ ]: