Définition des macros LaTeX...
$$ \newcommand{\activthres}{\theta} \newcommand{\activfunc}{f} \newcommand{\pot}{p} \newcommand{\learnrate}{\eta} \newcommand{\it}{t} \newcommand{\sigin}{s_i} \newcommand{\sigout}{s_j} \newcommand{\sigoutdes}{d_j} \newcommand{\wij}{w_{ij}} $$Auteur: F. Rosenblatt
Reference: F. Rosenblatt 1958 The Perceptron: a Probabilistic Model for Information Storage and Organization in the Brain Psychological Review, 65, 386-408
Le modéle est constitué des éléments suivants:
0
ou 1
:1
si le signal d'entrée dépasse un seuil $\activthres$0
sinon0
ou 1
:1
si la somme des signaux d'entrée dépasse un seuil $\activthres$0
sinon1
, -1
ou une valeur indéterminée:1
si la somme des signaux d'entrée est positive-1
si elle est négativeEvaluation de la fonction: $$ \pot = \sum \sigin \wij $$
$$ \sigout = \activfunc(\pot - \activthres) $$Fonction de transfert: signe et heaviside
In [ ]:
%matplotlib inline
#x = np.linspace(-5, 5, 300)
#y = np.array([-1 if xi < 0 else 1 for xi in x])
#plt.plot(x, y)
plt.hlines(y=-1, xmin=-5, xmax=0, color='red')
plt.hlines(y=1, xmin=0, xmax=5, color='red')
plt.hlines(y=0, xmin=-5, xmax=5, color='gray', linestyles='dotted')
plt.vlines(x=0, ymin=-2, ymax=2, color='gray', linestyles='dotted')
plt.title("Fonction signe")
plt.axis([-5, 5, -2, 2])
In [ ]:
#x = np.linspace(-5, 5, 300)
#y = (x > 0).astype('float')
#plt.plot(x, y)
plt.hlines(y=0, xmin=-5, xmax=0, color='red')
plt.hlines(y=1, xmin=0, xmax=5, color='red')
plt.hlines(y=0, xmin=-5, xmax=5, color='gray', linestyles='dotted')
plt.vlines(x=0, ymin=-2, ymax=2, color='gray', linestyles='dotted')
plt.title("Fonction heaviside")
plt.axis([-5, 5, -2, 2])
Règle du Perceptron (mise à jour des poid $\wij$):
$$ \wij(\it + 1) = \wij(\it) + \learnrate (\sigoutdes - \sigout) \sigin $$Poids de depart des synapses du réseau Nombre de neurones associatifs (A-units) Nombre d'unités sensitives Motif à apprendre
In [ ]:
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import itertools
In [ ]:
# https://github.com/jeremiedecock/neural-network-figures.git
import nnfigs.core as fig
fig.draw_neural_network();
In [ ]:
# Poids de depart des synapses du réseau
initial_weights = np.array([0., 0., 0., 0., 2.])
# Pas d'apprentissage eta=1
learning_rate = 1.
In [ ]:
class Log:
def __init__(self):
self.input_signal = []
self.output_signal = []
self.desired_output_signal = []
self.error = []
self.weights = []
self.iteration = []
self.current_iteration = 0
def log(self, input_signal, output_signal, desired_output_signal, error, weights):
self.input_signal.append(input_signal)
self.output_signal.append(output_signal)
self.desired_output_signal.append(desired_output_signal)
self.error.append(error)
self.weights.append(weights)
self.iteration.append(self.current_iteration)
log = Log()
In [ ]:
def sign_function(x):
y = 1. if x >= 0. else -1.
return y
def heaviside_function(x):
y = 1. if x >= 0. else 0.
return y
In [ ]:
def activation_function(p):
return heaviside_function(p)
def evaluate_network(weights, input_signal): # TODO: find a better name
p = np.sum(input_signal * weights)
output_signal = activation_function(p)
return output_signal
def update_weights(weights, input_signal, desired_output_signal):
output_signal = evaluate_network(weights, input_signal)
error = desired_output_signal - output_signal
weights = weights + learning_rate * error * input_signal
log.log(input_signal, output_signal, desired_output_signal, error, weights)
return weights
def learn_examples(example_list, label_list, weights, num_iterations):
for it in range(num_iterations):
log.current_iteration = it
for input_signal, desired_output_signal in zip(example_list, label_list):
weights = update_weights(weights, np.array(input_signal + (-1,)), desired_output_signal)
return weights
Rappel: $\sigin \in \{0, 1\}$
In [ ]:
example_list = tuple(reversed(tuple(itertools.product((0., 1.), repeat=4))))
# Motif à apprendre: (1 0 0 1)
label_list = [1. if x == (1., 0., 0., 1.) else 0. for x in example_list]
print(example_list)
print(label_list)
In [ ]:
weights = learn_examples(example_list, label_list, initial_weights, 5)
weights
In [ ]:
for input_signal, output_signal, desired_output_signal, error, weights, iteration in zip(log.input_signal, log.output_signal, log.desired_output_signal, log.error, log.weights, log.iteration):
print(iteration, input_signal, output_signal, desired_output_signal, error, weights)
In [ ]:
plt.plot(log.error)
In [ ]:
import pandas as pd
df = pd.DataFrame(np.array([log.iteration, log.error]).T, columns=["Iteration", "Error"])
abs_err_per_it = abs(df).groupby(["Iteration"]).sum()
abs_err_per_it.plot(title="Sum of absolute errors per iteration")