In [1]:
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
x = np.array([0.5, 0.1, -0.2])
target = 0.6
learnrate = 0.5
weights_input_hidden = np.array([[0.5, -0.6],
[0.1, -0.2],
[0.1, 0.7]])
weights_hidden_output = np.array([0.1, -0.3])
In [9]:
## Forward pass
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_input
Out[9]:
In [7]:
hidden_layer_output = sigmoid(hidden_layer_input)
hidden_layer_output
Out[7]:
In [10]:
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
output_layer_in
Out[10]:
In [11]:
output = sigmoid(output_layer_in)
output
Out[11]:
In [13]:
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x): return 1./(1.+np.exp(-x))
x = np.linspace(-5., 5., 200)
plt.plot(x, sigmoid(x)*(1.-sigmoid(x)))
plt.show()
In [15]:
sig = lambda x: 1 / (1 + np.exp(-x))
In [16]:
sig(-1)
Out[16]:
In [17]:
sigmoid(-1)
Out[17]:
In [18]:
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
# Network size
N_input = 4
N_hidden = 3
N_output = 2
np.random.seed(42)
# Make some fake data
X = np.random.randn(4)
weights_in_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))
weights_hidden_out = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))
print('hidden weights in', weights_in_hidden)
print('hidden out', weights_hidden_out)
# TODO: Make a forward pass through the network
hidden_layer_in = np.dot(X, weights_in_hidden)
print(hidden_layer_in)
hidden_layer_out = sigmoid(hidden_layer_in)
print('Hidden-layer Output:')
print(hidden_layer_out)
output_layer_in = np.dot(hidden_layer_out, weights_hidden_out)
output_layer_out = sigmoid(output_layer_in)
print('Output-layer Output:')
print(output_layer_out)
testing if tensorflow works, using this example.
In [5]:
# testing if tensorflow works
import tensorflow as tf
x = tf.constant(8)
y = tf.constant(9)
z = tf.multiply(x,y)
sess = tf.Session()
sess.run(z)
Out[5]:
In [7]:
# Import libraries (Numpy, Tensorflow, matplotlib)
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# Create 100 points following a function y=0.1 * x + 0.3 with some normal random distribution
num_points = 100
vectors_set = []
for i in range(num_points):
x1 = np.random.normal(0.0, 0.55)
y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)
vectors_set.append([x1, y1])
x_data = [v[0] for v in vectors_set]
y_data = [v[1] for v in vectors_set]
# Plot data
plt.plot(x_data, y_data, 'r*', label='Original data')
plt.legend()
plt.show()
In [8]:
# Create our linear regression model
# Variables resides internally inside the graph memory
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1.0]))
y = W * x_data + b
# Define a loss function that take into account the distance between
# the prediction and our dataset
loss = tf.reduce_mean(tf.square(y-y_data))
# Create an optimizer for our loss function (With gradient descent)
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
In [11]:
# Run session
# Initialize all graph variables
init = tf.initialize_all_variables()
# Create a session and initialize the graph variables (Will acutally run now...)
session = tf.Session()
session.run(init)
# Train on 8 steps
for step in range(8):
# Optimize one step
session.run(train)
# Get access to graph variables(just read) with session.run(varName)
#print("Step=%d, loss=%f, [W=%f b=%f]") % (step,session.run(loss),session.run(W),session.run(b))
# Just plot the set of weights and bias with less loss (last)
plt.plot(x_data, y_data, 'ro')
plt.plot(x_data, session.run(W) * x_data + session.run(b))
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
# Close the Session when we're done.
session.close()
In [5]:
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
lb.fit([1, 2, 6, 4, 2,6,6,6,6])
Out[5]:
In [6]:
lb.classes_
Out[6]:
In [8]:
lb.fit_transform([1, 2, 4, 6,6,6,6,6])
Out[8]:
In [ ]: