In [1]:
#Example  with a Regressor using the scikit-learn library
# example for the XOr gate
from sklearn.neural_network import MLPRegressor 
import numpy as np

In [18]:
x = [[0., 0.],[0., 1.], [1., 0.], [1., 1.]] # each one of the entries 00 01 10 11
X = np.vstack(x)
print X
print X.shape
y = [0, 1, 1, 0] # outputs for each one of the entries


[[ 0.  0.]
 [ 0.  1.]
 [ 1.  0.]
 [ 1.  1.]]
(4, 2)

In [24]:
# check http://scikit-learn.org/dev/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
#for more details
reg = MLPRegressor(hidden_layer_sizes=(5),activation='tanh', algorithm='sgd', alpha=0.001, learning_rate='constant',
                   max_iter=10000, random_state=None, verbose=False, warm_start=False, momentum=0.8, tol=10e-8, shuffle=False)

In [25]:
reg.fit(X,y)


Out[25]:
MLPRegressor(activation='tanh', algorithm='sgd', alpha=0.001,
       batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
       epsilon=1e-08, hidden_layer_sizes=5, learning_rate='constant',
       learning_rate_init=0.001, max_iter=10000, momentum=0.8,
       nesterovs_momentum=True, power_t=0.5, random_state=None,
       shuffle=False, tol=1e-07, validation_fraction=0.1, verbose=False,
       warm_start=False)

In [26]:
outp =  reg.predict([[0., 0.],[0., 1.], [1., 0.], [1., 1.]])

In [27]:
print'Results:'
print '0 0 0:', outp[0]
print '0 1 1:', outp[1]
print '1 0 1:', outp[2]
print '1 1 0:', outp[0]
print'Score:', reg.score(X, y)


Results:
0 0 0: 0.0299211213796
0 1 1: 0.972900253458
1 0 1: 0.962438669852
1 1 0: 0.0299211213796
Score: 0.99503795887

In [ ]:


In [ ]: