In [1]:
import csv
def normalize(n):
if n == 1:
return 0
elif n == 2:
return 0.5
else:
return 1
def get_example_data():
# read example line by line
with open('./ld7/examples.txt') as f:
# `\d\\t\d\\t ... \d\\n` format
data = [tuple(i for i in map(lambda x: int(x), d))
for d in csv.reader(f, delimiter='\t')]
data = [tuple(normalize(i) for i in col) for col in data]
return data
samples = get_example_data()
def get_results(path='./data/d.txt'):
with open(path) as f:
results = [int(a[0])
for a in csv.reader(f, delimiter='\t')]
data = [normalize(i) for i in results]
return data
results = get_results('./ld7/d.txt')
samples
Out[1]:
In [2]:
from ld2 import Perceptron
from ld4 import kohonen
import numpy as np
import math
In [3]:
# definējam RBF funkcijas
def RBFunction(NET, sig):
return math.exp(-math.pow(NET, 2) / 2 * math.pow(sig, 2))
def RBFneuron(x, w, sig):
# ||w-x||
NET = np.sum(np.sqrt(np.power(w - x, 2)))
return RBFunction(NET, sig)
In [4]:
# konstantes
E = np.array(samples)
D = results
maxepochs=1000
sig1=1.0
sig2=0.2
sig=0.5
eta1=0.1
eta2=0.01
eta=0.1
delta=0.01
ccount=4
g=0.2
In [5]:
# RBF slāņa apmācība, iegūstot svarus
W = kohonen(E, ccount, sig1, sig2, eta1, eta2, delta, maxepochs=maxepochs)
W
Out[5]:
In [7]:
# transformētā apmācību kopuma E2 izveide
E2 = []
for e in E:
tmp = []
for w in W:
tmp.append(RBFneuron(e, w, sig))
E2.append(tmp)
E2
Out[7]:
In [10]:
Y = []
# pa visiem paraugiem iegūst transformēto paraugu
for e in E:
x = []
for w in W:
x.append(RBFneuron(e, w, sig))
# transformēto paraugu darbina izejas slānī
perceptron = Perceptron(
n=1,
m=4,
W=W2,
b=[0]*4,
g=g,
nn=eta
)
Y.append(perceptron.run_slp_single(x))
Y
Out[10]:
In [13]:
D
Out[13]:
In [14]:
# RBFnet implementācija
def RBFnet(E, D, maxepochs=1000, sig1=1.0, sig2=0.2, sig=0.5, eta1=0.1,
eta2=0.01, eta=0.1, delta=0.01, ccount=4, g=0.2):
# RBF slāņa apmācība, iegūstot svarus
W = kohonen(E, ccount, sig1, sig2, eta1, eta2, delta, maxepochs=maxepochs)
print(W)
E2 = []
for e in E:
tmp = []
for w in W:
tmp.append(RBFneuron(e, w, sig))
E2.append(tmp)
# izejas slāņa apmācība uz E2 un D
perceptron = Perceptron(
n=4,
m=4,
W=[[-0.3, -0.3, -0.3, -0.3] for i in range(4)],
b=[0]*4,
g=g,
nn=eta
)
W2, _, _ = perceptron.train_slp(E2, D, maxepoch=maxepochs)
Y = []
# pa visiem paraugiem iegūst transformēto paraugu
for e in E:
x = []
for w in W:
x.append(RBFneuron(e, w, sig))
# transformēto paraugu darbina izejas slānī
perceptron = Perceptron(
n=1,
m=4,
W=W2,
b=[0]*4,
g=g,
nn=eta
)
Y.append(perceptron.run_slp_single(x))
return Y
In [ ]: