In [1]:
import numpy as np
from scipy.special import expit
from operator import add

In [10]:
x = np.random.random((8,3))

In [ ]:
sigmoid = lambda x: 1.0/(1.0+np.exp(-x))

In [ ]:
%timeit y = sigmoid(x)

In [ ]:
%timeit y = expit(x)

Expit it is! Now to determine the fastest gradient descent function.


In [ ]:
x

In [11]:
%timeit x.flatten()


The slowest run took 39.26 times longer than the fastest. This could mean that an intermediate result is being cached 
1000000 loops, best of 3: 617 ns per loop

In [5]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def loop(error, weights, vector):
    l_rate = .1
    correction = l_rate * error
    for idx, item in enumerate(vector):
            weights[idx] += (item * correction)
%timeit loop(error, weights, vector)


10000 loops, best of 3: 20.4 µs per loop

In [6]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def loop(error, weights, vector):
    l_rate = .1
    correction = l_rate * error
    y = [x * correction for x in vector]      
    z = map(add, weights, y)
    
%timeit loop(error, weights, vector)


100000 loops, best of 3: 11.2 µs per loop

In [13]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def map_loop(error, weights, vector):
    l_rate = .1
    error = .235
    correction = l_rate * error
    corr_matrix = np.multiply(vector, correction)
    weights = map(add, weights, corr_matrix)
    
%timeit map_loop(error, weights, vector)


The slowest run took 16.37 times longer than the fastest. This could mean that an intermediate result is being cached 
1000000 loops, best of 3: 1.46 µs per loop

Maybe there is a way to leave weights as a generator object.

Test by leaving it as a map object and updating again


In [8]:
def square(x):
    return x*x

a = [1,2,3,4,5]
b = map(square, a)
c = map(square, b)
d = list(c)
d
f = np.dot(np.fromiter(b, np.float), np.fromiter(c, np.float))
a
f


Out[8]:
0.0