A perceptron classifier is a simple model of a neuron. It has different (inputs) $(x_{1}...x_{n})$ with different weights $(w_{1}...w_{n})$ $ s = \sum\limits_{i=0}^n w_{i} \cdot x_{i}$
The weighted sum $s$ of these inputs is then passed through a step function $f$. $$ f(s) = \begin{cases} \text{1,} &\quad\text{if s} \geqslant \text{0} \\ \text{0,} &\quad\text{otherwise} \end{cases} $$
In [39]:
%matplotlib inline
from random import choice
from numpy import array, dot, random
# Threshold function
unit_step = lambda x: 0 if x < 0 else 1
training_data = [
(array([0,0, 1]), 0),
(array([0,1,1]), 1),
(array([1,0,1]), 1),
(array([1,1,1]), 1),
]
# Weights
w = random.rand(3)
# Errors
errors = []
# Learning rate
eta = 0.2
# Training iterations
n = 500
# Training
for i in range(n):
x, expected = choice(training_data)
result = dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += eta * error * x
# Testing
for x, _ in training_data:
result = dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
from pylab import plot, ylim
ylim([-1, 1])
plot(errors)
Out[39]: