In [1]:
import numpy as np

In [12]:
a = np.array([2, 3, 2])

nrn_a = np.array([1,2,3])
nrn_b = np.array([3,4,5])

la_error_vector = np.array([6,7])


la_nrns = np.column_stack((nrn_a, nrn_b))
la_nrns


Out[12]:
array([[1, 3],
       [2, 4],
       [3, 5]])

In [7]:
cnrn_a = np.array([6,7,8,9])
cnrn_b = np.array([5,4,3,2])
cnrn_c = np.array([2,9,8,3])

cur_nrns = np.column_stack((cnrn_a, cnrn_b, cnrn_c))
cur_nrns


Out[7]:
array([[6, 5, 2],
       [7, 4, 9],
       [8, 3, 8],
       [9, 2, 3]])

In [24]:
xa = cur_nrns.T
xb = la_nrns.T
def bptest():
    error_vector = []
    for i, neuron in enumerate(xa):
        temp_err = 0
        for j, la_neuron in enumerate(xb):
            temp_err += la_nrns[i][j] * la_error_vector[j]
        error_vector.append(a[i] * (1 - a[i]) * temp_err)
    return error_vector
%timeit bptest()
em = bptest()
print(em)


100000 loops, best of 3: 10.3 µs per loop
[-54, -240, -106]

Represent the layer as a matrix of weights. Each column is a neuron's weight vector.


In [21]:
def bptest2():
    temp = np.dot(la_nrns, la_error_vector)
    error_vector2 = np.multiply(temp, np.multiply(a, 1 - a))
    return error_vector2
%timeit bptest2()
print(bptest2())


The slowest run took 15.96 times longer than the fastest. This could mean that an intermediate result is being cached 
100000 loops, best of 3: 2.65 µs per loop
[ -54 -240 -106]