Kristaps Taube, kt11023

ld7


In [1]:
import csv

def normalize(n):
    if n == 1:
        return 0
    elif n == 2:
        return 0.5
    else:
        return 1

def get_example_data():
    # read example line by line
    with open('./ld7/examples.txt') as f:
        # `\d\\t\d\\t ... \d\\n` format
        data = [tuple(i for i in map(lambda x: int(x), d))
                for d in csv.reader(f, delimiter='\t')]
    

    data = [tuple(normalize(i) for i in col) for col in data]
    return data

samples = get_example_data()

def get_results(path='./data/d.txt'):
    with open(path) as f:
        results = [int(a[0])
                   for a in csv.reader(f, delimiter='\t')]
    data = [normalize(i) for i in results]
    return data

results = get_results('./ld7/d.txt')
samples


Out[1]:
[(0, 0.5, 0, 0),
 (0.5, 0.5, 0, 0.5),
 (0.5, 0, 0, 0.5),
 (0.5, 0, 0, 0),
 (0.5, 0, 0, 1),
 (0.5, 0.5, 0.5, 1),
 (0, 0, 0, 0),
 (0, 0, 0.5, 1),
 (1, 0, 0, 1),
 (1, 0.5, 0.5, 1),
 (1, 0.5, 0, 0),
 (1, 0.5, 0, 0.5),
 (1, 0.5, 0, 1),
 (0, 0.5, 0, 0.5)]

In [2]:
from ld2 import Perceptron
from ld4 import kohonen

import numpy as np
import math

In [3]:
# definējam RBF funkcijas
def RBFunction(NET, sig):
    return math.exp(-math.pow(NET, 2) / 2 * math.pow(sig, 2))

def RBFneuron(x, w, sig):
    # ||w-x||
    NET = np.sum(np.sqrt(np.power(w - x, 2)))
    return RBFunction(NET, sig)

In [4]:
# konstantes
E = np.array(samples)
D = results
maxepochs=1000
sig1=1.0
sig2=0.2
sig=0.5
eta1=0.1
eta2=0.01
eta=0.1
delta=0.01
ccount=4
g=0.2

In [5]:
# RBF slāņa apmācība, iegūstot svarus
W = kohonen(E, ccount, sig1, sig2, eta1, eta2, delta, maxepochs=maxepochs)
W


Out[5]:
[array([ 1. ,  0.5,  0. ,  0. ]),
 array([ 0.        ,  0.49998038,  0.        ,  0.49998038]),
 array([  1.96232943e-05,   1.96225241e-05,   4.99999999e-01,
          1.00000000e+00]),
 array([ 0.99998038,  0.        ,  0.        ,  0.99996075])]

In [7]:
# transformētā apmācību kopuma E2 izveide
E2 = []
for e in E:
    tmp = []
    for w in W:
        tmp.append(RBFneuron(e, w, sig))
    E2.append(tmp)
E2


Out[7]:
[[0.8824969025845955,
  0.9692332345696469,
  0.6065306597126334,
  0.4578502070355083],
 [0.7548396022070005, 1.0, 0.7548396017710142, 0.6065485124399369],
 [0.7548396019890073,
  0.8824969024146889,
  0.7548396024250106,
  0.8825098902502566],
 [0.8824969025845955,
  0.7548507110371456,
  0.6065306601797513,
  0.7548562656225203],
 [0.6065306597126334,
  0.7548396017710142,
  0.8824969029244217,
  0.9692308570822938],
 [0.6065306597126334,
  0.7548284928137063,
  0.8825055608655519,
  0.7548340475262487],
 [0.7548396019890073,
  0.8825055610354533,
  0.754828493031702,
  0.6065485126734868],
 [0.32465246735834974,
  0.7548396017710142,
  0.9999999998074631,
  0.7548340475262487],
 [0.7548396019890073,
  0.6065306594790836,
  0.7548396024250106,
  0.9999999995668034],
 [0.3246620235628411, 0.7548396017710142, 1.0, 0.7548340479622545],
 [1.0, 0.7548396022070005, 0.3246620235628411, 0.7548451564199765],
 [1.0, 0.7548396022070005, 0.3246620235628411, 0.7548451564199765],
 [0.7548451564199765, 0.6065485124399369, 0.7548340479622545, 1.0],
 [0.7548396019890073,
  0.9999999998074707,
  0.7548396019890073,
  0.6065485126734868]]

izejas slāņa apmācība uz E2 un D

perceptron = Perceptron( n=4, m=4, W=[[-0.3, -0.3, -0.3, -0.3] for i in range(4)], b=[0]*4, g=g, nn=eta )

W2, , = perceptron.train_slp(E2, D, maxepoch=maxepochs) W2


In [10]:
Y = []
# pa visiem paraugiem iegūst transformēto paraugu
for e in E:
    x = []
    for w in W:
        x.append(RBFneuron(e, w, sig))
    # transformēto paraugu darbina izejas slānī
    perceptron = Perceptron(
        n=1,
        m=4,
        W=W2,
        b=[0]*4,
        g=g,
        nn=eta
    )
    Y.append(perceptron.run_slp_single(x))
Y


Out[10]:
[[0.9985083622301664],
 [0.9561879671192411],
 [0.25462561368768044],
 [0.7547613577211222],
 [0.004271957496089647],
 [0.04051434189896449],
 [0.8664131470149564],
 [0.0020588733979178377],
 [0.005610005958107373],
 [0.002059015874170425],
 [0.9867703784706617],
 [0.9867703784706617],
 [0.005611516327680499],
 [0.9561879667896502]]

In [13]:
D


Out[13]:
[1, 1, 0.5, 1, 0, 0, 1, 0.5, 0, 0, 1, 0.5, 0, 1]

Secinājumi

Diemžēl D !== Y. Skaitļos gan ir redzama korelācija ar D. Varbūt nepieciešama normalizācija?


In [14]:
# RBFnet implementācija
def RBFnet(E, D, maxepochs=1000, sig1=1.0, sig2=0.2, sig=0.5, eta1=0.1,
           eta2=0.01, eta=0.1, delta=0.01, ccount=4, g=0.2):
    # RBF slāņa apmācība, iegūstot svarus
    W = kohonen(E, ccount, sig1, sig2, eta1, eta2, delta, maxepochs=maxepochs)
    print(W)
    E2 = []
    for e in E:
        tmp = []
        for w in W:
             tmp.append(RBFneuron(e, w, sig))
        E2.append(tmp)

    # izejas slāņa apmācība uz E2 un D
    perceptron = Perceptron(
        n=4,
        m=4,
        W=[[-0.3, -0.3, -0.3, -0.3] for i in range(4)],
        b=[0]*4,
        g=g,
        nn=eta
    )

    W2, _, _ = perceptron.train_slp(E2, D, maxepoch=maxepochs)

    Y = []
    # pa visiem paraugiem iegūst transformēto paraugu
    for e in E:
        x = []
        for w in W:
            x.append(RBFneuron(e, w, sig))
        # transformēto paraugu darbina izejas slānī
        perceptron = Perceptron(
            n=1,
            m=4,
            W=W2,
            b=[0]*4,
            g=g,
            nn=eta
        )
        Y.append(perceptron.run_slp_single(x))
        return Y

In [ ]: