• loop over positions from memoryview
    • STEP 1: calculate activation $r_{in}$ at new position for each input layer neuron $j$
    • STEP 2: calculate input $h$ for each output neuron $i$
      • for each output neuron $i$ sum over all input neuron activations $r_j$ * weight$_{ij}$
    • STEP 3: calculate output $r_i$ for each output neuron $i$
      • check if mean and sparseness of activity for all output neurons together are within 10% error bounds
      • if not, adjust $g$ and $\mu$
      • only do this for a maximum of 100 iterations at each time step
    • STEP 4: adjust the weights using the running average of input and output firing rates
    • STEP 5: re-normalize the weights to unit norm
      • this can be done using a memoryview to change current weights in place

In [4]:
%load_ext Cython

In [246]:
%%cython
from __future__ import print_function
import numpy as np
cimport numpy as np
cimport cython
from libc cimport math
from math import pi


def simulate(
    path, # the pre-calculated path with shape(t, 2)
    pref_loc, # preferred firing locations of the input neurons with shape(N_in, 2)
    dt = 0.01,
    N_in = 400, # number of input neurons
    N_out = 100, # number of output neurons
    n_snapshots = 20, # number of saved states of weights etc.
    tau_plus = 0.1,
    tau_minus = 0.3,
    a_0 = 0.1,
    s_0 = 0.3,
    errorbound = 0.1,
    sigma = 0.05,
    ):
    
    weights = c_simulate(
        path,
        pref_loc,
        dt,
        N_in,
        N_out,
        n_snapshots,
        tau_plus,
        tau_minus,
        a_0,
        s_0,
        errorbound,
        sigma,
        )
    
    return weights

@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False)  # turn off negative index wrapping for entire function
@cython.cdivision(True) # turn off zero-division checking
cdef double heaviside(double x) nogil:
    if x >= 0.0:        
        return 1.0
    else:
        return 0.0

@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False)  # turn off negative index wrapping for entire function
# @cython.cdivision(True) # turn off zero-division checking
cdef c_simulate(
    double[:, :] path, # a memoryview into the path ndarray
    double[:, :] pref_loc, # a memoryview into the preferred locations of the input neurons
    double dt,
    int N_in,
    int N_out,
    int n_snapshots,
    double tau_plus,
    double tau_minus,
    double a_0,
    double s_0,
    double errorbound,
    double sigma,
    ):
    
    
    cdef int steps = path.shape[0]
    cdef Py_ssize_t t
    cdef Py_ssize_t i
    cdef Py_ssize_t j
    cdef int maxiterations = 100
    cdef int it
    cdef double mu
    cdef double g
    cdef double PI = pi
    cdef double b3 = 0.01
    cdef double b4 = 0.1
    cdef double eta = 0.05
    cdef double epsilon = 0.005
    cdef double a = 0
    cdef double s = 0
    cdef double r_out_accumulator
    cdef double r_out_squared_accum
    
    assert(pref_loc.shape[0] == N_in)
    
    # initialize all needed arrays and assign memoryviews:
    # input neuron rates
    r_in_arr = np.zeros(N_in, dtype=np.float64)
    cdef double[:] r_in = r_in_arr
    
    # output neuron rates
    r_out_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] r_out = r_out_arr
    
    # input for output neurons
    h_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] h = h_arr
    
    # weights between input and output neurons
    w_arr = np.random.uniform(size=(N_out, N_in))
    normalization = 1 / np.sqrt(np.sum(w_arr ** 2.0, axis=1)) 
    w_arr *= normalization[:, np.newaxis]
    cdef double[:, :] w = w_arr
    
    # dynamical variables
    r_plus_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] r_plus = r_plus_arr
    r_minus_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] r_minus = r_minus_arr
    
    # running averages
    r_out_run_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] r_out_run = r_out_run_arr
    r_in_run_arr = np.zeros(N_in, dtype=np.float64)
    cdef double[:] r_in_run = r_in_run_arr
    
    # weight accumulator
    weight_accumulator_arr = np.zeros(N_out, dtype=np.float64)
    cdef double[:] weight_accumulator = weight_accumulator_arr
    
    
    for t in range(steps):
        
        # STEP 1
        for j in range(N_in):
            # calculate activation of each input neuron
            r_in[j] = math.exp(
                - (
                    (path[t, 0] - pref_loc[j, 0]) ** 2.0 +
                    (path[t, 1] - pref_loc[j, 1]) ** 2.0
                ) / (2.0 * sigma ** 2.0)
            )
            
            # calculate running average of input neuron activation
            r_in_run[j] += eta * (r_in[j] - r_in_run[j]) # no t-1 because we replace the value instead of progressing to the next. so i on the right is at t-1 if i on the left is at t
        
        
        
        
        # STEP 2
        for i in range(N_out):
            # calculate activation of each output neuron
            for j in range(N_in):
                # accumulate activity fed into output neuron j
                h[i] += w[i, j] * r_in[j]
                
            # integrate activation variables and replace directly (no t and t-1)
            r_plus[i] += dt * ((h[i] - r_plus[i] - r_minus[i]) / tau_plus)
            r_minus[i] += dt * ((h[i] - r_minus[i]) / tau_minus)

#         print("step %i:" % t)
#         print("r+: ", end="")
#         print(r_plus_arr)
#         print("r-: ", end="")
#         print(r_minus_arr)        

        
        

        # TODO find out real starting values for these
        mu = 0
        g = 1
        
        # STEP 3
        for it in range(maxiterations):
#             print("iteration %i: " % it)
#             print("mu: %.3f, g: %.3f" % (mu, g))
            
            # reset activity accumulators
            r_out_accumulator = 0
            r_out_squared_accum = 0
            
            for i in range(N_out):
                # calculate output rate of output neurons
                r_out[i] = 2 / PI * (
                    math.atan(g * (r_plus[i] - mu)) * heaviside(r_plus[i] - mu)
                )
                
                # accumulate activity for calculation of activity and sparseness measures
                r_out_accumulator += r_out[i]
                r_out_squared_accum += r_out[i] ** 2
                
            # DEBUG:    
            if r_out_squared_accum == 0:
                print("Zero Division error at step: %i" % t)
                print("iteration: %i" % it)
                print("r_in:")
                print(r_in_arr)
                print("\nr_out:")
                print(r_out_arr)
                print("\nh")
                print(h_arr)
                print("\nr_plus:")
                print(r_plus_arr)
                print("\nr_minus:")
                print(r_minus_arr)
                
                print("\nmu: %.12f, g: %.12f" % (mu, g))
                
            a = r_out_accumulator / N_out
            s = (r_out_accumulator ** 2) / (N_out * r_out_squared_accum)
#             print("accumulated activity: %.12f, a: %.12f, s: %.12f\n" % (r_out_accumulator, a, s))
            
            
            # if bounds are met finish the iterative for loop
            if (((a_0 * (1 - errorbound)) <= a <= (a_0 * (1 + errorbound)))
                and ((s_0 * (1 - errorbound)) <= s <= (s_0 * (1 + errorbound)))):
#                 print(it)
                break                
            
            # else change mu and g
            mu += b3 * (a - a_0)
            g += b4 * g  * (s - s_0) 
        
        
        
        # STEP 4

        for i in range(N_out):
            # calculate output firing rate running average
            r_out_run[i] += eta * (r_out[i] - r_out_run[i]) # no t-1 because we replace the value instead of progressing to the next. so i on the right is at t-1 if i on the left is at t
            
            # reset weight_accumulator
            weight_accumulator[i] = 0.0
            
            for j in range(N_in):
                # loop over all weights and update them in-place depending
                # on the learning dynamics
                w[i, j] += epsilon * (r_out[i] * r_in[j] - r_out_run[i] * r_in_run[j])                
                # accumulate the sum of squares of the weights
                # for normalization of each output neuron at the same time
                weight_accumulator[i] += w[i, j] ** 2         
        
        
        
        # STEP 5
        
        for i in range(N_out):
            for j in range(N_in):               
                # loop over the weights again and multiply
                # with normalization factor in-place
                w[i, j] /= weight_accumulator[i]
            
        # save weight and firing rate states into snapshot arrays
        
    # TODO: return various calculated snapshot arrays
    return w_arr

In [256]:
path = np.load("rat_path_1h.npy")
# path = np.array([[0.6, 0.6], [0.61, 0.61]])


x_axis = np.linspace(0, 0.95, 20) #grid point spaced 5cm apart in meters
x, y = np.meshgrid(x_axis, x_axis)
pref_loc = np.vstack((x.flatten(),y.flatten())).T

limit_steps = 10000
weights = simulate(path[0:limit_steps, :], pref_loc, N_in=400, N_out=100, dt=0.01)


Zero Division error at step: 9530
iteration: 99
r_in:
[  7.97210778e-162   3.14170601e-157   4.55473844e-153   2.42922007e-149
   4.76623878e-146   3.44025195e-143   9.13503513e-141   8.92350994e-139
   3.20676194e-137   4.23938878e-136   2.06179409e-135   3.68886577e-135
   2.42798463e-135   5.87901451e-136   5.23683209e-137   1.71608298e-138
   2.06877584e-140   9.17474722e-143   1.49685695e-145   8.98404677e-149
   3.09208110e-151   1.21854973e-146   1.76661192e-142   9.42203199e-139
   1.84864496e-135   1.33434448e-132   3.54313693e-130   3.46109425e-128
   1.24378248e-126   1.64429964e-125   7.99692467e-125   1.43077244e-124
   9.41724020e-125   2.28024886e-125   2.03117042e-126   6.65604115e-128
   8.02400427e-130   3.55853976e-132   5.80574573e-135   3.48457421e-138
   4.41198582e-141   1.73870735e-136   2.52071874e-132   1.34439784e-128
   2.63776890e-125   1.90393096e-122   5.05558211e-120   4.93851820e-118
   1.77471110e-116   2.34619548e-115   1.14105410e-114   2.04152076e-114
   1.34371412e-114   3.25360989e-115   2.89820830e-116   9.49727975e-118
   1.14491800e-119   5.07755989e-122   8.28402200e-125   4.97202096e-128
   2.31591630e-131   9.12673079e-127   1.32316237e-122   7.05694215e-119
   1.38460372e-115   9.99401386e-113   2.65374946e-110   2.59230089e-108
   9.31571980e-107   1.23155254e-105   5.98956094e-105   1.07162429e-104
   7.05335319e-105   1.70786772e-105   1.52131219e-106   4.98526193e-108
   6.00984310e-110   2.66528592e-112   4.34840508e-115   2.60988698e-118
   4.47215789e-122   1.76242039e-117   2.55509710e-113   1.36273317e-109
   2.67374362e-106   1.92989738e-103   5.12453175e-101   5.00587128e-099
   1.79891518e-097   2.37819363e-096   1.15661616e-095   2.06936366e-095
   1.36204012e-095   3.29798365e-096   2.93773498e-097   9.62680666e-099
   1.16053276e-100   5.14680926e-103   8.39700212e-106   5.03983096e-109
   3.17699780e-113   1.25201432e-108   1.81512775e-104   9.68078585e-101
   1.89941362e-097   1.37098910e-094   3.64044081e-092   3.55614503e-090
   1.27794003e-088   1.68945644e-087   8.21654132e-087   1.47006523e-086
   9.67586246e-087   2.34287051e-087   2.08695172e-088   6.83883360e-090
   8.24436459e-092   3.65626665e-094   5.96518682e-097   3.58026981e-100
   8.30275493e-105   3.27200984e-100   4.74364849e-096   2.52997318e-092
   4.96392091e-089   3.58293812e-086   9.51391527e-084   9.29361697e-082
   3.33976399e-080   4.41521955e-079   2.14730803e-078   3.84186333e-078
   2.52868651e-078   6.12284958e-079   5.45403231e-080   1.78725838e-081
   2.15457936e-083   9.55527445e-086   1.55893984e-088   9.35666460e-092
   7.98239183e-097   3.14575882e-092   4.56061407e-088   2.43235377e-084
   4.77238725e-081   3.44468989e-078   9.14681936e-076   8.93502131e-074
   3.21089867e-072   4.24485761e-071   2.06445381e-070   3.69362443e-070
   2.43111674e-070   5.88659847e-071   5.24358762e-072   1.71829673e-073
   2.07144457e-075   9.18658269e-078   1.49878790e-080   8.99563623e-084
   2.82325030e-089   1.11260694e-084   1.61301967e-080   8.60286449e-077
   1.68792062e-073   1.21833430e-070   3.23509057e-068   3.16018082e-066
   1.13564591e-064   1.50134143e-063   7.30165842e-063   1.30637865e-062
   8.59848930e-063   2.08200014e-063   1.85457701e-064   6.07735360e-066
   7.32638367e-068   3.24915425e-070   5.30098431e-073   3.18161940e-076
   3.67342555e-082   1.44765016e-077   2.09875393e-073   1.11934752e-069
   2.19621006e-066   1.58521557e-063   4.20928472e-061   4.11181714e-059
   1.47762694e-057   1.95344563e-056   9.50043239e-056   1.69977303e-055
   1.11877825e-055   2.70896013e-056   2.41305227e-057   7.90744836e-059
   9.53260324e-061   4.22758345e-063   6.89728827e-066   4.13971159e-069
   1.75832285e-075   6.92932612e-071   1.00459012e-066   5.35787181e-063
   1.05123849e-059   7.58779708e-057   2.01481734e-054   1.96816348e-052
   7.07281304e-051   9.35036802e-050   4.54747949e-049   8.13613809e-049
   5.35514694e-049   1.29667157e-049   1.15503224e-050   3.78498133e-052
   4.56287840e-054   2.02357621e-056   3.30145784e-059   1.98151545e-062
   3.09621788e-069   1.22017998e-064   1.76897541e-060   9.43463740e-057
   1.85111819e-053   1.33612966e-050   3.54787717e-048   3.46572473e-046
   1.24544649e-044   1.64649949e-043   8.00762347e-043   1.43268663e-042
   9.42983920e-043   2.28329952e-043   2.03388785e-044   6.66494603e-046
   8.03473930e-048   3.56330060e-050   5.81351303e-053   3.48923610e-056
   2.00571826e-063   7.90427988e-059   1.14593559e-054   6.11172250e-051
   1.19914738e-047   8.65539748e-045   2.29830144e-042   2.24508340e-040
   8.06795537e-039   1.06659615e-037   5.18730827e-037   9.28088989e-037
   6.10861424e-037   1.47911281e-037   1.31754487e-038   4.31752689e-040
   5.20487380e-042   2.30829268e-044   3.76597180e-047   2.26031398e-050
   4.77984551e-058   1.88367616e-053   2.73088958e-049   1.45649017e-045
   2.85769908e-042   2.06267568e-039   5.47710315e-037   5.35027876e-035
   1.92268181e-033   2.54181504e-032   1.23619217e-031   2.21173735e-031
   1.45574944e-031   3.52488726e-032   3.13985323e-033   1.02891378e-034
   1.24037823e-036   5.50091338e-039   8.97472180e-042   5.38657489e-045
   4.19047554e-053   1.65141297e-048   2.39416232e-044   1.27690036e-040
   2.50533580e-037   1.80834129e-034   4.80175912e-032   4.69057257e-030
   1.68560910e-028   2.22840125e-027   1.08376579e-026   1.93902318e-026
   1.27625096e-026   3.09025758e-027   2.75269946e-028   9.02045474e-030
   1.08743570e-031   4.82263347e-034   7.86811040e-037   4.72239328e-040
   1.35150693e-048   5.32611646e-044   7.72162237e-040   4.11824307e-036
   8.08017770e-033   5.83223971e-030   1.54865735e-027   1.51279760e-025
   5.43640541e-024   7.18701187e-023   3.49534788e-022   6.25371332e-022
   4.11614864e-022   9.96666011e-023   8.87797187e-024   2.90926579e-025
   3.50718401e-027   1.55538972e-029   2.53761313e-032   1.52306038e-035
   1.60353618e-044   6.31933159e-040   9.16155186e-036   4.88621375e-032
   9.58697065e-029   6.91983679e-026   1.83745124e-023   1.79490436e-021
   6.45018724e-020   8.52724710e-019   4.14716097e-018   7.41990688e-018
   4.88372876e-018   1.18252446e-018   1.05335376e-019   3.45178618e-021
   4.16120430e-023   1.84543906e-025   3.01082767e-028   1.80708094e-031
   6.99914139e-041   2.75827237e-036   3.99884940e-032   2.13274271e-028
   4.18453690e-025   3.02038187e-022   8.02013775e-020   7.83442843e-018
   2.81538846e-016   3.72198700e-015   1.81015972e-014   3.23865330e-014
   2.13165805e-014   5.16150245e-015   4.59769602e-016   1.50664137e-017
   1.81628937e-019   8.05500313e-022   1.31417107e-024   7.88757696e-028
   1.12387054e-037   4.42903049e-033   6.42105766e-029   3.42460107e-025
   6.71922097e-022   4.84990663e-019   1.28781461e-016   1.25799478e-014
   4.52074330e-013   5.97649243e-012   2.90662108e-011   5.20039079e-011
   3.42285941e-011   8.28796026e-012   7.38264145e-013   2.41925369e-014
   2.91646363e-016   1.29341304e-018   2.11019906e-021   1.26652898e-024
   6.63885723e-035   2.61628897e-030   3.79300670e-026   2.02295875e-022
   3.96913585e-019   2.86490627e-016   7.60729731e-014   7.43114746e-012
   2.67046498e-010   3.53039593e-009   1.71698088e-008   3.07194206e-008
   2.02192993e-008   4.89581163e-009   4.36102741e-010   1.42908629e-011
   1.72279501e-013   7.64036797e-016   1.24652348e-018   7.48156015e-022]

r_out:
[-0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
 -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
 -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
 -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
 -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.
 -0. -0. -0. -0. -0. -0. -0. -0. -0. -0.]

h
[ 1050.0166305    866.93090811  1027.20685695   990.48895041   992.30106225
  1049.28043182   984.04890545   443.76382558   365.03870857   405.34269633
   401.93771008  1051.11202206   413.23823089   908.00407035   991.89037395
   124.98433753   905.75669958  1050.53340426   127.34332619   463.49021157
   126.38060611   857.81795468  1051.28442407  1051.48626899  1025.40669479
  1049.32256955   835.52135872   124.2233007    438.96915369  1044.75627226
  1050.50065054   690.87920213   992.35022509   991.93202821  1025.43522395
   811.26441427  1050.8262832   1051.6115977     78.58543759   131.46343186
   126.92243866   116.44618864  1021.5731978   1042.34004791  1051.20243703
   128.50772707   129.41628332   407.96833659   126.31592265   114.1310319
   123.77885767   405.60941101   123.66114984   991.23381567  1050.29800725
  1050.33333355  1050.40656828   437.31333184   117.95054533   124.54387775
   985.54021544  1051.05636139   392.3398251    968.85714291  1033.53031354
  1050.60552246   118.3884153    118.83072336   990.77184562   103.2976951
   118.26855735   990.63117378   762.49548183   437.56463152   388.20326538
   126.43504855   115.54592247   114.90987034   547.69189794    73.66403412
   989.99921873   991.610405     124.5645568    125.67030269   127.62768836
  1051.47204172  1027.25514218  1025.87429406  1050.53260353  1025.80700146
   426.04830492   894.27239528   125.48182775  1051.13468708   125.64734699
   404.38776932  1019.7786772    114.17954815  1041.61096053  1049.95478925]

r_plus:
[  6.83368528e-04   7.54380369e-04   6.90824607e-04   6.95109450e-04
   6.95089571e-04   6.87972148e-04   7.02634655e-04   7.43489439e-04
   7.41663274e-04   7.45189875e-04   7.49209976e-04   6.91047525e-04
   7.49592743e-04   7.55898761e-04   6.94173266e-04   5.98075115e-04
   7.48356979e-04   6.85284496e-04   5.97952076e-04   7.45892071e-04
   5.95238744e-04   7.28695941e-04   6.89759734e-04   6.88302623e-04
   7.01773994e-04   6.92482883e-04   7.55367350e-04   6.02680310e-04
   7.55492186e-04   6.96971439e-04   6.86709899e-04   7.43375049e-04
   6.97707276e-04   6.95063203e-04   6.87613417e-04   7.55385268e-04
   6.83677840e-04   7.00951243e-04   3.88149069e-05   5.95499794e-04
   6.00322432e-04   6.02546426e-04   7.48129045e-04   7.50588853e-04
   6.97557304e-04   5.97431785e-04   5.97542529e-04   7.44257745e-04
   6.00171501e-04   5.97276991e-04   5.97247096e-04   7.39992057e-04
   6.00560553e-04   6.97107579e-04   6.85897257e-04   6.83614265e-04
   6.85915318e-04   7.51955832e-04   6.02261430e-04   6.05251973e-04
   7.03272644e-04   6.86454235e-04   7.28768810e-04   7.11288599e-04
   7.00422691e-04   6.83320838e-04   6.06101603e-04   6.02744967e-04
   6.99353937e-04   4.55240928e-05   6.01089770e-04   6.91004172e-04
   7.34543771e-04   7.45405758e-04   7.51045574e-04   6.03628835e-04
   6.00040724e-04   6.00464501e-04   7.49980857e-04   4.44551986e-05
   6.88849334e-04   6.90620934e-04   6.04506623e-04   5.92364761e-04
   5.97513436e-04   6.85400692e-04   6.96679988e-04   6.92000604e-04
   6.86154244e-04   7.14830072e-04   7.61797292e-04   7.42767346e-04
   6.01014900e-04   6.83328535e-04   5.98807683e-04   7.43994682e-04
   7.46128178e-04   6.03696996e-04   6.95517118e-04   6.92172388e-04]

r_minus:
[ 1050.01617492   866.93040518  1027.2063964    990.488487     992.30059885
  1049.27997317   984.04843702   443.76332991   365.03821412   405.34219954
   401.9372106   1051.11156136   413.23773116   908.00356642   991.88991117
   124.98393881   905.75620067  1050.5329474    127.34292755   463.48971431
   126.38020929   857.81746888  1051.28396423  1051.48581012  1025.40622694
  1049.3221079    835.52085514   124.22289891   438.96865003  1044.75580761
  1050.50019273   690.87870655   992.34975995   991.93156483  1025.43476554
   811.26391067  1050.82582742  1051.6111304     78.58541171   131.46303486
   126.92203844   116.44578694  1021.57269905  1042.33954752  1051.20197199
   128.50732878   129.41588496   407.96784041   126.31552253   114.13063371
   123.77845951   405.60891768   123.66074947   991.23335093  1050.29754998
  1050.3328778   1050.406111     437.31283053   117.95014382   124.54347425
   985.53974659  1051.05590375   392.33933925   968.85666871  1033.52984659
  1050.60506691   118.38801123   118.83032152   990.77137939   103.29766474
   118.26815662   990.63071311   762.49499213   437.56413458   388.20276468
   126.43464613   115.54552244   114.90947003   547.69139795    73.66400448
   989.9987595    991.60994458   124.56415379   125.66990778   127.62729001
  1051.47158479  1027.25467772  1025.87383272  1050.53214609  1025.8065249
   426.04779705   894.2719001    125.48142707  1051.13423153   125.64694779
   404.38727333  1019.77817978   114.17914569  1041.61049685  1049.95432779]

mu: 0.000910752817, g: 730.171779568896
---------------------------------------------------------------------------
ZeroDivisionError                         Traceback (most recent call last)
<ipython-input-256-6ced2b15e64c> in <module>()
      8 
      9 limit_steps = 10000
---> 10 weights = simulate(path[0:limit_steps, :], pref_loc, N_in=400, N_out=100, dt=0.01)

_cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.pyx in _cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.simulate (C:\Users\Julius\.ipython\cython\_cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.c:2359)()

_cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.pyx in _cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.c_simulate (C:\Users\Julius\.ipython\cython\_cython_magic_7828639d2da1f5ba66f5ecf93c2e54b6.c:3749)()

ZeroDivisionError: float division

In [250]:
fig, sps = plt.subplots(10, 10)
for i, sp in enumerate(sps.flat):
    sp.imshow(weights.reshape(100,20,20)[i, ...].squeeze(), interpolation="None", cmap="gray")
    sp.set_xlabel("")
    sp.set_ylabel("")
    sp.set_xticklabels("")
    sp.set_yticklabels("")

fig.set_size_inches(10, 10)
plt.show()
weights.sum()


Out[250]:
100.0

In [227]:
fig, sps = plt.subplots(10, 10)
for i, sp in enumerate(sps.flat):
    sp.imshow(weights.reshape(100,20,20)[i, ...].squeeze(), interpolation="None", cmap="gray")
    sp.set_xlabel("")
    sp.set_ylabel("")
    sp.set_xticklabels("")
    sp.set_yticklabels("")

fig.set_size_inches(10, 10)
plt.show()
weights.sum()


Out[227]:
100.00000000000001

In [217]:
import matplotlib.pyplot as plt
fig, sp = plt.subplots(1)
sp.imshow(weights, interpolation="None", cmap="gray")
fig.set_size_inches(10, 10)
plt.show()
weights.sum()


Out[217]:
100.0

In [257]:
path[9530,:]


Out[257]:
array([ 0.55408713,  1.24406657])

In [202]:
1/0.3


Out[202]:
3.3333333333333335