Kristaps Taube, kt11023

LD2 & LD3


In [105]:
import csv
import math


def get_samples(path='./data/examples.txt'):
    # read attributes
    with open(path) as f:
        # `\d\\t\d\\n` format
        example = [(int(a[0]), int(a[1]))
                   for a in csv.reader(f, delimiter='\t')]
    return example


def get_results(path='./data/d.txt'):
    with open(path) as f:
        results = [int(a[0])
                   for a in csv.reader(f, delimiter='\t')]
    return results


class Perceptron():
    """
    n – neironu skaits
    m – svaru skaits neironos, arī ieeju skaits
    x(m) – viens apmācības paraugs – skaitļu virkne
    W(n,m) – svaru matrica
    b(n) – papildus svaru matrica
    g – slīpuma koeficients
    y(n) – iegūtais rezultāts – skaitļu virkne
    e(n) - paraugu kļūda
    nn - apmācības koef
    """

    def __init__(self, n, m, W, b, g, nn):
        self.n = n
        self.m = m
        self.w = W
        self.b = b
        self.g = g
        self.nn = nn

        self.grad = [0] * n
        self.y = []

    def run_slp_single(self, x):
        y = []
        for i in range(self.n):
            net = self.b[i]
            for k in range(self.m):
                net += x[k] * self.w[i][k]
            # lineārās aktivitātes funkcija
            y.append(1 / (1 + math.exp(-net / self.g)))
            # y.append(1 if net > 0.5 else 0)
        return y

    def run_mlp_single(self, x):
        for i in range(self.n):
            net = self.b[i]
            for k in range(self.m):
                net += x[k] * self.w[i][k]
            # lineārās aktivitātes funkcija
            self.y.append(1 / (1 + math.exp(-net / self.g)))
            # y.append(1 if net > 0.5 else 0)
        return self.y

    def train_slp_single(self, x, d):
        e = []
        y = self.run_slp_single(x)
        for i in range(self.n):
            e.append(d - y[i])
            self.grad[i] = e[i] / self.g * y[i] * (1 - y[i])
            for k in range(self.m):
                self.w[i][k] += self.nn * x[k] * self.grad[i]
            self.b[i] += self.nn * self.grad[i]
        return e

    def train_slp(self, x, d):
        p = len(x)
        ee = 0.01
        err = 100000
        epoch = 0
        maxepoch = 500
        while epoch < maxepoch and err > ee:
            epoch += 1
            err = 0
            for k in range(p):
                e = self.train_slp_single(x[k], d[k])
                for j in range(self.n):
                    err += math.pow(e[j], 2)
            err /= p * self.n
        return self.w, self.b, epoch

In [106]:
samples = get_samples('./ld2/data/examples.txt')
results = get_results('./ld2/data/d.txt')

preceptron = Perceptron(
        n=1,
        m=2,
        W=[[-0.3, -0.3], [-0.3, -0.3]],
        b=[1],
        g=0.2,
        nn=0.1
    )

weights, b, epoch = preceptron.train_slp(samples, results)
print('W = {}, {}; b = {}; epoch = {}'.format(weights[0][0], weights[0][1], b, epoch))


W = -0.82775694571, -0.821318879473; b = [1.2453628917329158]; epoch = 86

LD3 MLP realizācija palīgā ņemot funkcionalitāti no LD2

Izveidojam MLP ar diviem slāņiem. Izmantojam un papildinām Perceptron implementāciju no LD2.


In [107]:
samples = get_samples('./ld3/data/examples_boolean.txt')
results = get_results('./ld3/data/d_notand.txt')


class MLPerceptron():
    def __init__(self, HIDDEN_COUNT, INPUT_SIZE, W, b, g, nn):
        # izveidojam divs slāņus
        self.HIDDEN = Perceptron(
            n=HIDDEN_COUNT,
            m=INPUT_SIZE,
            W=W,
            b=b,
            g=g,
            nn=nn)
        self.LAST = Perceptron(
            n=1,
            m=HIDDEN_COUNT,
            W=[[i[0] for i in W]],
            b=b,
            g=g,
            nn=nn
        )

        self.grad = [0] * 1

    def run_mlp_single(self, x):
        return self.LAST.run_slp_single(self.HIDDEN.run_slp_single(x))

    def train_last_single(self, x, d):
        return self.LAST.train_slp_single(x, d)

    def train_hidden_single(self, x, NEXT):
        y = self.HIDDEN.run_slp_single(x)
        for j in range(self.HIDDEN.n):
            error = 0
            for k in range(NEXT.n):
                error += NEXT.grad[k] * NEXT.w[k][j]
            self.HIDDEN.grad[j] = error / self.HIDDEN.g * y[j] * (1 - y[j])
            for i in range(self.HIDDEN.m):
                self.HIDDEN.w[j][i] += self.HIDDEN.nn * x[i] * self.HIDDEN.grad[j]
            self.HIDDEN.b[j] += self.HIDDEN.nn + self.HIDDEN.grad[j]

    def train_mlp(self, x, d):
        err = 10000
        epoch = 0

        maxepoch = 500
        ee = 0.01

        while epoch < maxepoch and err > ee:
            epoch += 1
            err = 0

            for k in range(len(x)):
                self.run_mlp_single(x[k])
                e = self.train_last_single(
                    self.HIDDEN.run_slp_single(x[k]), d[k])
                self.train_hidden_single(x[k], self.LAST)
                for j in range(len(e)):
                    err += math.pow(e[j], 2)
            err /= len(x) * len(e)
        return (self.HIDDEN.w, self.HIDDEN.b), (self.LAST.w, self.LAST.b), epoch

ml_preceptron = MLPerceptron(
    HIDDEN_COUNT=4,
    INPUT_SIZE=2,
    W=[[-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]],
    b=[0, 0, 0, 0],
    g=0.2,
    nn=0.1
)
hidden, last, epochs = ml_preceptron.train_mlp(samples, results)

In [108]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
    hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
    last[0], last[1]
))
print("EPOCH = {}".format(epochs))


HIDDEN -> 
W = [[-0.33167752246650334, 0.30386434958837544], [-0.3262267180310171, 0.3061548099987219], [-0.3262267180310171, 0.3061548099987219], [-0.3262267180310171, 0.3061548099987219]], 
b = [199.76642549170077, 199.46748238233556, 199.46748238233556, 199.46748238233556]

LAST -> 
W = [[-0.12520089080600166, -0.14049374521892644, -0.14049374521892644, -0.14049374521892644]], 
b = [199.76642549170077, 199.46748238233556, 199.46748238233556, 199.46748238233556]

EPOCH = 500

Augstāk rezultāti examples_boolean.txt, d_notand.txt

Darbināt neironu tīklu ar nelineāru problēmu XOR (examples_boolean.txt, d_xor.txt).


In [109]:
samples = get_samples('./ld3/data/examples_boolean.txt')
results = get_results('./ld3/data/d_xor.txt')

ml_preceptron = MLPerceptron(
    HIDDEN_COUNT=4,
    INPUT_SIZE=2,
    W=[[-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]],
    b=[0, 0, 0, 0],
    g=0.2,
    nn=0.1
)

hidden, last, epochs = ml_preceptron.train_mlp(samples, results)

In [110]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
    hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
    last[0], last[1]
))
print("EPOCH = {}".format(epochs))


HIDDEN -> 
W = [[-0.3350587845993947, 0.30287167889278327], [-0.3369862267724041, 0.30556717411521006], [-0.3369862267724041, 0.30556717411521006], [-0.3369862267724041, 0.30556717411521006]], 
b = [199.6767361930687, 199.65084675389429, 199.65084675389429, 199.65084675389429]

LAST -> 
W = [[-0.35120875115707406, -0.35486131467749377, -0.35486131467749377, -0.35486131467749377]], 
b = [199.6767361930687, 199.65084675389429, 199.65084675389429, 199.65084675389429]

EPOCH = 500

Darbināt neironu tīklu ar riska uzdevumu (examples.txt, d.txt), veicot normalizāciju divos veidos

1.


In [111]:
def normalize(n):
    if n == 1:
        return 0
    elif n == 2:
        return 0.5
    else:
        return 1

def get_example_data():
    # read example line by line
    with open('./ld3/data/examples.txt') as f:
        # `\d\\t\d\\t ... \d\\n` format
        data = [tuple(i for i in map(lambda x: int(x), d))
                for d in csv.reader(f, delimiter='\t')]
    

    data = [tuple(normalize(i) for i in col) for col in data]
    return data

samples = get_example_data()

def get_results(path='./data/d.txt'):
    with open(path) as f:
        results = [int(a[0])
                   for a in csv.reader(f, delimiter='\t')]
    data = [normalize(i) for i in results]
    return data

results = get_results('./ld3/data/d.txt')
samples


Out[111]:
[(0, 0.5, 0, 0),
 (0.5, 0.5, 0, 0.5),
 (0.5, 0, 0, 0.5),
 (0.5, 0, 0, 0),
 (0.5, 0, 0, 1),
 (0.5, 0.5, 0.5, 1),
 (0, 0, 0, 0),
 (0, 0, 0.5, 1),
 (1, 0, 0, 1),
 (1, 0.5, 0.5, 1),
 (1, 0.5, 0, 0),
 (1, 0.5, 0, 0.5),
 (1, 0.5, 0, 1),
 (0, 0.5, 0, 0.5)]

In [112]:
results


Out[112]:
[1, 1, 0.5, 1, 0, 0, 1, 0.5, 0, 0, 1, 0.5, 0, 1]

In [113]:
ml_preceptron = MLPerceptron(
    HIDDEN_COUNT=4,
    INPUT_SIZE=4,
    W=[[-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3]],
    b=[0, 0, 0, 0],
    g=0.2,
    nn=0.1
)

hidden, last, epochs = ml_preceptron.train_mlp(samples, results)

In [114]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
    hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
    last[0], last[1]
))
print("EPOCH = {}".format(epochs))


HIDDEN -> 
W = [[-0.30589747421095165, 0.2976379380462667, -0.2997563012181855, 0.3011470254480841], [-0.30557744508256024, 0.29718754740609876, -0.299864845292814, 0.3028288624070304], [-0.30557744508256024, 0.29718754740609876, -0.299864845292814, 0.3028288624070304], [-0.30557744508256024, 0.29718754740609876, -0.299864845292814, 0.3028288624070304]], 
b = [699.8732435536779, 699.7669138046491, 699.7669138046491, 699.7669138046491]

LAST -> 
W = [[-0.2508908207593402, -0.25268139702133263, -0.25268139702133263, -0.25268139702133263]], 
b = [699.8732435536779, 699.7669138046491, 699.7669138046491, 699.7669138046491]

EPOCH = 500

2.


In [115]:
def normalize(n):
    if n == 1:
        return 0.1
    elif n == 2:
        return 0.5
    else:
        return 0.9

def get_example_data():
    # read example line by line
    with open('./ld3/data/examples.txt') as f:
        # `\d\\t\d\\t ... \d\\n` format
        data = [tuple(i for i in map(lambda x: int(x), d))
                for d in csv.reader(f, delimiter='\t')]
    

    data = [tuple(normalize(i) for i in col) for col in data]
    return data

samples = get_example_data()

def get_results(path='./data/d.txt'):
    with open(path) as f:
        results = [int(a[0])
                   for a in csv.reader(f, delimiter='\t')]
    data = [normalize(i) for i in results]
    return data

results = get_results('./ld3/data/d.txt')
samples


Out[115]:
[(0.1, 0.5, 0.1, 0.1),
 (0.5, 0.5, 0.1, 0.5),
 (0.5, 0.1, 0.1, 0.5),
 (0.5, 0.1, 0.1, 0.1),
 (0.5, 0.1, 0.1, 0.9),
 (0.5, 0.5, 0.5, 0.9),
 (0.1, 0.1, 0.1, 0.1),
 (0.1, 0.1, 0.5, 0.9),
 (0.9, 0.1, 0.1, 0.9),
 (0.9, 0.5, 0.5, 0.9),
 (0.9, 0.5, 0.1, 0.1),
 (0.9, 0.5, 0.1, 0.5),
 (0.9, 0.5, 0.1, 0.9),
 (0.1, 0.5, 0.1, 0.5)]

In [116]:
results


Out[116]:
[0.9, 0.9, 0.5, 0.9, 0.1, 0.1, 0.9, 0.5, 0.1, 0.1, 0.9, 0.5, 0.1, 0.9]

In [117]:
ml_preceptron = MLPerceptron(
    HIDDEN_COUNT=4,
    INPUT_SIZE=4,
    W=[[-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3], [-0.3, 0.3, -0.3, 0.3]],
    b=[0, 0, 0, 0],
    g=0.2,
    nn=0.1
)

hidden, last, epochs = ml_preceptron.train_mlp(samples, results)

In [118]:
print("HIDDEN -> \nW = {}, \nb = {}\n".format(
    hidden[0], hidden[1]
))
print("LAST -> \nW = {}, \nb = {}\n".format(
    last[0], last[1]
))
print("EPOCH = {}".format(epochs))


HIDDEN -> 
W = [[-0.3065367740618869, 0.29587515530252806, -0.30193401273678977, 0.29746144331671065], [-0.30672262537424844, 0.29531768383036083, -0.3023544019218028, 0.2979194982822622], [-0.30672262537424844, 0.29531768383036083, -0.3023544019218028, 0.2979194982822622], [-0.30672262537424844, 0.29531768383036083, -0.3023544019218028, 0.2979194982822622]], 
b = [699.8484256183164, 699.7765776530761, 699.7765776530761, 699.7765776530761]

LAST -> 
W = [[-0.28100049022506196, -0.2831026940858736, -0.2831026940858736, -0.2831026940858736]], 
b = [699.8484256183164, 699.7765776530761, 699.7765776530761, 699.7765776530761]

EPOCH = 500

In [118]: