In [105]:
import csv
import math
def get_samples(path='./data/examples.txt'):
# read attributes
with open(path) as f:
# `\d\\t\d\\n` format
example = [(int(a[0]), int(a[1]))
for a in csv.reader(f, delimiter='\t')]
return example
def get_results(path='./data/d.txt'):
with open(path) as f:
results = [int(a[0])
for a in csv.reader(f, delimiter='\t')]
return results
class Perceptron():
"""
n – neironu skaits
m – svaru skaits neironos, arī ieeju skaits
x(m) – viens apmācības paraugs – skaitļu virkne
W(n,m) – svaru matrica
b(n) – papildus svaru matrica
g – slīpuma koeficients
y(n) – iegūtais rezultāts – skaitļu virkne
e(n) - paraugu kļūda
nn - apmācības koef
"""
def __init__(self, n, m, W, b, g, nn):
self.n = n
self.m = m
self.w = W
self.b = b
self.g = g
self.nn = nn
self.grad = [0] * n
self.y = []
def run_slp_single(self, x):
y = []
for i in range(self.n):
net = self.b[i]
for k in range(self.m):
net += x[k] * self.w[i][k]
# lineārās aktivitātes funkcija
y.append(1 / (1 + math.exp(-net / self.g)))
# y.append(1 if net > 0.5 else 0)
return y
def run_mlp_single(self, x):
for i in range(self.n):
net = self.b[i]
for k in range(self.m):
net += x[k] * self.w[i][k]
# lineārās aktivitātes funkcija
self.y.append(1 / (1 + math.exp(-net / self.g)))
# y.append(1 if net > 0.5 else 0)
return self.y
def train_slp_single(self, x, d):
e = []
y = self.run_slp_single(x)
for i in range(self.n):
e.append(d - y[i])
self.grad[i] = e[i] / self.g * y[i] * (1 - y[i])
for k in range(self.m):
self.w[i][k] += self.nn * x[k] * self.grad[i]
self.b[i] += self.nn * self.grad[i]
return e
def train_slp(self, x, d):
p = len(x)
ee = 0.01
err = 100000
epoch = 0
maxepoch = 500
while epoch < maxepoch and err > ee:
epoch += 1
err = 0
for k in range(p):
e = self.train_slp_single(x[k], d[k])
for j in range(self.n):
err += math.pow(e[j], 2)
err /= p * self.n
return self.w, self.b, epoch
In [106]:
samples = get_samples('./ld2/data/examples.txt')
results = get_results('./ld2/data/d.txt')
preceptron = Perceptron(
n=1,
m=2,
W=[[-0.3, -0.3], [-0.3, -0.3]],
b=[1],
g=0.2,
nn=0.1
)
weights, b, epoch = preceptron.train_slp(samples, results)
print('W = {}, {}; b = {}; epoch = {}'.format(weights[0][0], weights[0][1], b, epoch))
In [107]:
samples = get_samples('./ld3/data/examples_boolean.txt')
results = get_results('./ld3/data/d_notand.txt')
class MLPerceptron():
def __init__(self, HIDDEN_COUNT, INPUT_SIZE, W, b, g, nn):
# izveidojam divs slāņus
self.HIDDEN = Perceptron(
n=HIDDEN_COUNT,
m=INPUT_SIZE,
W=W,
b=b,
g=g,
nn=nn)
self.LAST = Perceptron(
n=1,
m=HIDDEN_COUNT,
W=[[i[0] for i in W]],
b=b,
g=g,
nn=nn
)
self.grad = [0] * 1
def run_mlp_single(self, x):
return self.LAST.run_slp_single(self.HIDDEN.run_slp_single(x))
def train_last_single(self, x, d):
return self.LAST.train_slp_single(x, d)
def train_hidden_single(self, x, NEXT):
y = self.HIDDEN.run_slp_single(x)
for j in range(self.HIDDEN.n):
error = 0
for k in range(NEXT.n):
error += NEXT.grad[k] * NEXT.w[k][j]
self.HIDDEN.grad[j] = error / self.HIDDEN.g * y[j] * (1 - y[j])
for i in range(self.HIDDEN.m):
self.HIDDEN.w[j][i] += self.HIDDEN.nn * x[i] * self.HIDDEN.grad[j]
self.HIDDEN.b[j] += self.HIDDEN.nn + self.HIDDEN.grad[j]
def train_mlp(self, x, d):
err = 10000
epoch = 0
maxepoch = 500
ee = 0.01
while epoch < maxepoch and err > ee:
epoch += 1
err = 0
for k in range(len(x)):
self.run_mlp_single(x[k])
e = self.train_last_single(
self.HIDDEN.run_slp_single(x[k]), d[k])
self.train_hidden_single(x[k], self.LAST)
for j in range(len(e)):
err += math.pow(e[j], 2)
err /= len(x) * len(e)
return (self.HIDDEN.w, self.HIDDEN.b), (self.LAST.w, self.LAST.b), epoch
ml_preceptron = MLPerceptron(
HIDDEN_COUNT=4,
INPUT_SIZE=2,
W=[[-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]],
b=[0, 0, 0, 0],
g=0.2,
nn=0.1
)
hidden, last, epochs = ml_preceptron.train_mlp(samples, results)
In [108]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
last[0], last[1]
))
print("EPOCH = {}".format(epochs))
In [109]:
samples = get_samples('./ld3/data/examples_boolean.txt')
results = get_results('./ld3/data/d_xor.txt')
ml_preceptron = MLPerceptron(
HIDDEN_COUNT=4,
INPUT_SIZE=2,
W=[[-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]],
b=[0, 0, 0, 0],
g=0.2,
nn=0.1
)
hidden, last, epochs = ml_preceptron.train_mlp(samples, results)
In [110]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
last[0], last[1]
))
print("EPOCH = {}".format(epochs))
In [111]:
def normalize(n):
if n == 1:
return 0
elif n == 2:
return 0.5
else:
return 1
def get_example_data():
# read example line by line
with open('./ld3/data/examples.txt') as f:
# `\d\\t\d\\t ... \d\\n` format
data = [tuple(i for i in map(lambda x: int(x), d))
for d in csv.reader(f, delimiter='\t')]
data = [tuple(normalize(i) for i in col) for col in data]
return data
samples = get_example_data()
def get_results(path='./data/d.txt'):
with open(path) as f:
results = [int(a[0])
for a in csv.reader(f, delimiter='\t')]
data = [normalize(i) for i in results]
return data
results = get_results('./ld3/data/d.txt')
samples
Out[111]:
In [112]:
results
Out[112]:
In [113]:
ml_preceptron = MLPerceptron(
HIDDEN_COUNT=4,
INPUT_SIZE=4,
W=[[-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3]],
b=[0, 0, 0, 0],
g=0.2,
nn=0.1
)
hidden, last, epochs = ml_preceptron.train_mlp(samples, results)
In [114]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
last[0], last[1]
))
print("EPOCH = {}".format(epochs))
2.
In [115]:
def normalize(n):
if n == 1:
return 0.1
elif n == 2:
return 0.5
else:
return 0.9
def get_example_data():
# read example line by line
with open('./ld3/data/examples.txt') as f:
# `\d\\t\d\\t ... \d\\n` format
data = [tuple(i for i in map(lambda x: int(x), d))
for d in csv.reader(f, delimiter='\t')]
data = [tuple(normalize(i) for i in col) for col in data]
return data
samples = get_example_data()
def get_results(path='./data/d.txt'):
with open(path) as f:
results = [int(a[0])
for a in csv.reader(f, delimiter='\t')]
data = [normalize(i) for i in results]
return data
results = get_results('./ld3/data/d.txt')
samples
Out[115]:
In [116]:
results
Out[116]:
In [117]:
ml_preceptron = MLPerceptron(
HIDDEN_COUNT=4,
INPUT_SIZE=4,
W=[[-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3]],
b=[0, 0, 0, 0],
g=0.2,
nn=0.1
)
hidden, last, epochs = ml_preceptron.train_mlp(samples, results)
In [118]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
last[0], last[1]
))
print("EPOCH = {}".format(epochs))
In [118]: