In [1]:
import numpy as np
from scipy.special import expit
from operator import add

In [10]:
x = np.random.random((8,3))

In [ ]:
sigmoid = lambda x: 1.0/(1.0+np.exp(-x))

In [ ]:
%timeit y = sigmoid(x)

In [ ]:
%timeit y = expit(x)

Expit it is! Now to determine the fastest gradient descent function.


In [ ]:
x

In [11]:
%timeit x.flatten()


The slowest run took 39.26 times longer than the fastest. This could mean that an intermediate result is being cached 
1000000 loops, best of 3: 617 ns per loop

In [5]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def loop(error, weights, vector):
    l_rate = .1
    correction = l_rate * error
    for idx, item in enumerate(vector):
            weights[idx] += (item * correction)
%timeit loop(error, weights, vector)


10000 loops, best of 3: 20.4 µs per loop

In [6]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def loop(error, weights, vector):
    l_rate = .1
    correction = l_rate * error
    y = [x * correction for x in vector]      
    map(add, weights, y)
    
%timeit loop(error, weights, vector)


100000 loops, best of 3: 11.2 µs per loop

In [8]:
error = .235
vector = np.asarray([1,2,3,4,5,6,7])
weights = np.asarray([1,2,3,4,5,6,7])


def map_loop(error, weights, vector):
    l_rate = .1
    error = .235
    correction = l_rate * error
    corr_matrix = np.multiply(vector, correction)
    weights = np.asarray(map(add, weights, corr_matrix))
    
%timeit map_loop(error, weights, vector)


The slowest run took 10.40 times longer than the fastest. This could mean that an intermediate result is being cached 
100000 loops, best of 3: 4.97 µs per loop

Maybe there is a way to leave weights as a generator object.