In [2]:
# Perceptron to implement a OR gate
# organize imports
import numpy as np
import matplotlib.pyplot as plt

# function to plot graph for binary inputs
def plot_graph(data, idx_subplot, title):
    xs = np.repeat(range(len(data)), 2)
    ys = np.repeat(data, 2)
    xs = xs[1:]
    ys = ys[:-1]
    plt.subplot(5,1,idx_subplot)
    plt.title(title)
    plt.plot(xs, ys)
    plt.ylim(-0.5, 1.5)


if __name__ == "__main__":
    ###########################################
    # INPUT DATA
    ###########################################
    # input data
    in_a = np.array([0,0,1,1])
    in_b = np.array([0,1,0,1])

    # output ground truth (OR gate)
    o_gt = np.array([0,1,1,1])

    # weights
    w = np.random.randn(1,2)
    bias = np.random.randn(1,1)

    print ("Weights before training - {}".format(w))
    print ("Bias before training - {}".format(bias))

    ###########################################
    # HYPER-PARAMETERS
    ###########################################
    learning_rate = 1
    epochs = 10

    ###########################################
    # TRAINING
    ###########################################
    x_axis = []
    y_axis = []
    for e in range(epochs):
        error = 0
        for a, b, o in zip(in_a, in_b, o_gt):
            # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            y = 0
            if summed >= 0:
                y = 1
            else:
                y = 0
           
            # calculate the error
            error = o - y
           
            # update rule
            dw0 = learning_rate * (error) * a
            dw1 = learning_rate * (error) * b

            # update weights
            w[0][0] = w[0][0] + dw0
            w[0][1] = w[0][1] + dw1

            # update bias too!
            bias = bias + (learning_rate * error)
           
            # for visualization purpose
            x_axis.append(e)
            y_axis.append(error)

    print ("Weights after training - {}".format(w))
    print ("Bias after training - {}".format(bias))

    ###########################################
    # TESTING
    ###########################################
    perceptron_output = []
    for a, b in zip(in_a, in_b):
        # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            if summed >= 0:
                perceptron_output.append(1)
            else:
                perceptron_output.append(0)

    # summary
    print ("\nInput A - {}".format(in_a))
    print("Input B - {}".format(in_b))
    print ("Ground truth - {}".format(o_gt))
    print ("Output - {}".format(perceptron_output))

    # display the plot
    plot_graph(in_a, 1, "Input A")
    plot_graph(in_b, 2, "Input B")
    plot_graph(o_gt, 3, "Ground Truth")
    plot_graph(perceptron_output, 4, "Actual Output")

    plt.subplot(5,1,5)
    plt.title("Epochs vs Error")
    plt.plot(x_axis, y_axis)
    plt.show()


Weights before training - [[ 0.86567137  1.03280243]]
Bias before training - [[ 0.93375096]]
Weights after training - [[ 0.86567137  1.03280243]]
Bias after training - [[-0.06624904]]

Input A - [0 0 1 1]
Input B - [0 1 0 1]
Ground truth - [0 1 1 1]
Output - [0, 1, 1, 1]

In [3]:
# Perceptron to implement a AND gate
# organize imports
import numpy as np
import matplotlib.pyplot as plt

# function to plot graph for binary inputs
def plot_graph(data, idx_subplot, title):
    xs = np.repeat(range(len(data)), 2)
    ys = np.repeat(data, 2)
    xs = xs[1:]
    ys = ys[:-1]
    plt.subplot(5,1,idx_subplot)
    plt.title(title)
    plt.plot(xs, ys)
    plt.ylim(-0.5, 1.5)


if __name__ == "__main__":
    ###########################################
    # INPUT DATA
    ###########################################
    # input data
    in_a = np.array([0,0,1,1])
    in_b = np.array([0,1,0,1])

    # output ground truth (AND gate)
    o_gt = np.array([0,0,0,1])

    # weights
    w = np.random.randn(1,2)
    bias = np.random.randn(1,1)

    print ("Weights before training - {}".format(w))
    print ("Bias before training - {}".format(bias))

    ###########################################
    # HYPER-PARAMETERS
    ###########################################
    learning_rate = 1
    epochs = 10

    ###########################################
    # TRAINING
    ###########################################
    x_axis = []
    y_axis = []
    for e in range(epochs):
        error = 0
        for a, b, o in zip(in_a, in_b, o_gt):
            # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            y = 0
            if summed >= 0:
                y = 1
            else:
                y = 0
           
            # calculate the error
            error = o - y
           
            # update rule
            dw0 = learning_rate * (error) * a
            dw1 = learning_rate * (error) * b

            # update weights
            w[0][0] = w[0][0] + dw0
            w[0][1] = w[0][1] + dw1

            # update bias too!
            bias = bias + (learning_rate * error)
           
            # for visualization purpose
            x_axis.append(e)
            y_axis.append(error)

    print ("Weights after training - {}".format(w))
    print ("Bias after training - {}".format(bias))

    ###########################################
    # TESTING
    ###########################################
    perceptron_output = []
    for a, b in zip(in_a, in_b):
        # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            if summed >= 0:
                perceptron_output.append(1)
            else:
                perceptron_output.append(0)

    # summary
    print ("\nInput A - {}".format(in_a))
    print("Input B - {}".format(in_b))
    print ("Ground truth - {}".format(o_gt))
    print ("Output - {}".format(perceptron_output))

    # display the plot
    plot_graph(in_a, 1, "Input A")
    plot_graph(in_b, 2, "Input B")
    plot_graph(o_gt, 3, "Ground Truth")
    plot_graph(perceptron_output, 4, "Actual Output")

    plt.subplot(5,1,5)
    plt.title("Epochs vs Error")
    plt.plot(x_axis, y_axis)
    plt.show()


Weights before training - [[ 0.83651758  1.18597161]]
Bias before training - [[ 0.41821391]]
Weights after training - [[ 1.83651758  1.18597161]]
Bias after training - [[-2.58178609]]

Input A - [0 0 1 1]
Input B - [0 1 0 1]
Ground truth - [0 0 0 1]
Output - [0, 0, 0, 1]

In [10]:
# Perceptron to implement a NOR gate
# organize imports
import numpy as np
import matplotlib.pyplot as plt

# function to plot graph for binary inputs
def plot_graph(data, idx_subplot, title):
    xs = np.repeat(range(len(data)), 2)
    ys = np.repeat(data, 2)
    xs = xs[1:]
    ys = ys[:-1]
    plt.subplot(5,1,idx_subplot)
    plt.title(title)
    plt.plot(xs, ys)
    plt.ylim(-0.5, 1.5)


if __name__ == "__main__":
    ###########################################
    # INPUT DATA
    ###########################################
    # input data
    in_a = np.array([0,0,1,1])
    in_b = np.array([0,1,0,1])

    # output ground truth (NOR gate)
    o_gt = np.array([1,0,0,0])

    # weights
    w = np.random.randn(1,2)
    bias = np.random.randn(1,1)

    print ("Weights before training - {}".format(w))
    print ("Bias before training - {}".format(bias))

    ###########################################
    # HYPER-PARAMETERS
    ###########################################
    learning_rate = 1
    epochs = 10

    ###########################################
    # TRAINING
    ###########################################
    x_axis = []
    y_axis = []
    for e in range(epochs):
        error = 0
        for a, b, o in zip(in_a, in_b, o_gt):
            # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            y = 0
            if summed >= 0:
                y = 1
            else:
                y = 0
           
            # calculate the error
            error = o - y
           
            # update rule
            dw0 = learning_rate * (error) * a
            dw1 = learning_rate * (error) * b

            # update weights
            w[0][0] = w[0][0] + dw0
            w[0][1] = w[0][1] + dw1

            # update bias too!
            bias = bias + (learning_rate * error)
           
            # for visualization purpose
            x_axis.append(e)
            y_axis.append(error)

    print ("Weights after training - {}".format(w))
    print ("Bias after training - {}".format(bias))

    ###########################################
    # TESTING
    ###########################################
    perceptron_output = []
    for a, b in zip(in_a, in_b):
        # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            if summed >= 0:
                perceptron_output.append(1)
            else:
                perceptron_output.append(0)

    # summary
    print ("\nInput A - {}".format(in_a))
    print("Input B - {}".format(in_b))
    print ("Ground truth - {}".format(o_gt))
    print ("Output - {}".format(perceptron_output))

    # display the plot
    plot_graph(in_a, 1, "Input A")
    plot_graph(in_b, 2, "Input B")
    plot_graph(o_gt, 3, "Ground Truth")
    plot_graph(perceptron_output, 4, "Actual Output")

    plt.subplot(5,1,5)
    plt.title("Epochs vs Error")
    plt.plot(x_axis, y_axis)


Weights before training - [[ 0.48271681 -0.53280765]]
Bias before training - [[-0.92262831]]
Weights after training - [[-0.51728319 -0.53280765]]
Bias after training - [[ 0.07737169]]

Input A - [0 0 1 1]
Input B - [0 1 0 1]
Ground truth - [1 0 0 0]
Output - [1, 0, 0, 0]

In [9]:
# Perceptron to implement a NAND gate
# organize imports
import numpy as np
import matplotlib.pyplot as plt

# function to plot graph for binary inputs
def plot_graph(data, idx_subplot, title):
    xs = np.repeat(range(len(data)), 2)
    ys = np.repeat(data, 2)
    xs = xs[1:]
    ys = ys[:-1]
    plt.subplot(5,1,idx_subplot)
    plt.title(title)
    plt.plot(xs, ys)
    plt.ylim(-0.5, 1.5)


if __name__ == "__main__":
    ###########################################
    # INPUT DATA
    ###########################################
    # input data
    in_a = np.array([0,0,1,1])
    in_b = np.array([0,1,0,1])

    # output ground truth (NAND gate)
    o_gt = np.array([1,1,1,0])

    # weights
    w = np.random.randn(1,2)
    bias = np.random.randn(1,1)

    print ("Weights before training - {}".format(w))
    print ("Bias before training - {}".format(bias))

    ###########################################
    # HYPER-PARAMETERS
    ###########################################
    learning_rate = 1
    epochs = 10

    ###########################################
    # TRAINING
    ###########################################
    x_axis = []
    y_axis = []
    for e in range(epochs):
        error = 0
        for a, b, o in zip(in_a, in_b, o_gt):
            # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            y = 0
            if summed >= 0:
                y = 1
            else:
                y = 0
           
            # calculate the error
            error = o - y
           
            # update rule
            dw0 = learning_rate * (error) * a
            dw1 = learning_rate * (error) * b

            # update weights
            w[0][0] = w[0][0] + dw0
            w[0][1] = w[0][1] + dw1

            # update bias too!
            bias = bias + (learning_rate * error)
           
            # for visualization purpose
            x_axis.append(e)
            y_axis.append(error)

    print ("Weights after training - {}".format(w))
    print ("Bias after training - {}".format(bias))

    ###########################################
    # TESTING
    ###########################################
    perceptron_output = []
    for a, b in zip(in_a, in_b):
        # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            if summed >= 0:
                perceptron_output.append(1)
            else:
                perceptron_output.append(0)

    # summary
    print ("\nInput A - {}".format(in_a))
    print("Input B - {}".format(in_b))
    print ("Ground truth - {}".format(o_gt))
    print ("Output - {}".format(perceptron_output))

    # display the plot
    plot_graph(in_a, 1, "Input A")
    plot_graph(in_b, 2, "Input B")
    plot_graph(o_gt, 3, "Ground Truth")
    plot_graph(perceptron_output, 4, "Actual Output")

    plt.subplot(5,1,5)
    plt.title("Epochs vs Error")
    plt.plot(x_axis, y_axis)
    plt.show()


Weights before training - [[ 0.14214807 -1.50080055]]
Bias before training - [[ 0.10910146]]
Weights after training - [[-0.85785193 -0.50080055]]
Bias after training - [[ 1.10910146]]

Input A - [0 0 1 1]
Input B - [0 1 0 1]
Ground truth - [1 1 1 0]
Output - [1, 1, 1, 0]

In [11]:
# Perceptron to implement a XNOR gate
# organize imports
import numpy as np
import matplotlib.pyplot as plt

# function to plot graph for binary inputs
def plot_graph(data, idx_subplot, title):
    xs = np.repeat(range(len(data)), 2)
    ys = np.repeat(data, 2)
    xs = xs[1:]
    ys = ys[:-1]
    plt.subplot(5,1,idx_subplot)
    plt.title(title)
    plt.plot(xs, ys)
    plt.ylim(-0.5, 1.5)


if __name__ == "__main__":
    ###########################################
    # INPUT DATA
    ###########################################
    # input data
    in_a = np.array([0,0,1,1])
    in_b = np.array([0,1,0,1])

    # output ground truth (XNOR gate)
    o_gt = np.array([1,0,0,1])

    # weights
    w = np.random.randn(1,2)
    bias = np.random.randn(1,1)

    print ("Weights before training - {}".format(w))
    print ("Bias before training - {}".format(bias))

    ###########################################
    # HYPER-PARAMETERS
    ###########################################
    learning_rate = 1
    epochs = 10

    ###########################################
    # TRAINING
    ###########################################
    x_axis = []
    y_axis = []
    for e in range(epochs):
        error = 0
        for a, b, o in zip(in_a, in_b, o_gt):
            # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            y = 0
            if summed >= 0:
                y = 1
            else:
                y = 0
           
            # calculate the error
            error = o - y
           
            # update rule
            dw0 = learning_rate * (error) * a
            dw1 = learning_rate * (error) * b

            # update weights
            w[0][0] = w[0][0] + dw0
            w[0][1] = w[0][1] + dw1

            # update bias too!
            bias = bias + (learning_rate * error)
           
            # for visualization purpose
            x_axis.append(e)
            y_axis.append(error)

    print ("Weights after training - {}".format(w))
    print ("Bias after training - {}".format(bias))

    ###########################################
    # TESTING
    ###########################################
    perceptron_output = []
    for a, b in zip(in_a, in_b):
        # weighted sum
            ws = w[0][0]*a + w[0][1]*b
            # activation
            summed = ws + bias
            # initialize output to zero
            if summed >= 0:
                perceptron_output.append(1)
            else:
                perceptron_output.append(0)

    # summary
    print ("\nInput A - {}".format(in_a))
    print("Input B - {}".format(in_b))
    print ("Ground truth - {}".format(o_gt))
    print ("Output - {}".format(perceptron_output))

    # display the plot
    plot_graph(in_a, 1, "Input A")
    plot_graph(in_b, 2, "Input B")
    plot_graph(o_gt, 3, "Ground Truth")
    plot_graph(perceptron_output, 4, "Actual Output")

    plt.subplot(5,1,5)
    plt.title("Epochs vs Error")
    plt.plot(x_axis, y_axis)
    plt.show()


Weights before training - [[ 0.58914063  0.10055661]]
Bias before training - [[ 0.36613621]]
Weights after training - [[ 1.58914063  0.10055661]]
Bias after training - [[-0.63386379]]

Input A - [0 0 1 1]
Input B - [0 1 0 1]
Ground truth - [1 0 0 1]
Output - [0, 0, 1, 1]

In [ ]: