In [2]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from math import exp, log
from copy import copy
%matplotlib inline

In [27]:
b1 = 1
b2 = 1.5
w12 = 0.5
N = 10000
lam = np.zeros((2, N))
ts = np.arange(N)
spike = np.zeros((2, N)) != 0
dt = 0.001
t_sp = np.zeros((2, 1000), dtype='int')
i = 0
j = 0
for t in ts:
    lam[0, t] = exp(b1 +  np.sum(w12 / ((t - t_sp[1, :j]) * dt + 1)))
    lam[1, t] =  exp(b2)
    if np.random.random() < dt * lam[0, t]:# * exp(-dt * lam[0, t]):
        t_sp[0, i] = t
        i += 1
    if np.random.random() < dt * lam[1, t]:# * exp(-dt * lam[1, t]):
        t_sp[1, j] = t
        j += 1
    if i == 1000:
        last = t + 1
        break

In [28]:
x1 = np.zeros(last, dtype='int')
x1[t_sp[0, :]] = 1
plt.plot(np.arange(last) * dt, x1, ',')
x2 = np.zeros(last, dtype='int')
x2[t_sp[1, :]] = 1
#plt.plot(np.arange(last) * dt, x2, 'o')



In [68]:
ts = np.arange(last)
lam_test = np.zeros(last)
rat = copy(t_sp[1, :j])
spi = copy(t_sp[0, :i])
m = 20
LL = np.zeros(m)
ws = np.linspace(.4,.6, m)
for h, w12 in enumerate(ws):
    for t in ts:
        lam_test[t] = exp(b1 +  np.sum(w12 / ((t - rat[rat <= t]) * dt + 1)))
    LL[h] = np.sum(np.log(lam_test[x1==1])) - (np.sum(lam_test * dt))

In [69]:
plt.plot(ws, LL)


Out[69]:
[<matplotlib.lines.Line2D at 0x7f286430a390>]

In [70]:
ws[np.argmax(LL)]


Out[70]:
0.49696969696969695

In [71]:
last


Out[71]:
6753

In [87]:
def f(w12):
    for t in ts:
        lam_test[t] = exp(b1 +  np.sum(w12 / ((t - rat[rat <= t]) * dt + 1)))
    #print(- np.sum(np.log(lam_test[x1==1])) - (np.sum(lam_test * dt)))
    return - (np.sum(np.log(lam_test[x1==1])) - (np.sum(lam_test * dt)))
minimize(f, [.5])


Out[87]:
      fun: -4678.693344483803
 hess_inv: array([[  1.43247125e-05]])
      jac: array([ 0.])
  message: 'Optimization terminated successfully.'
     nfev: 21
      nit: 4
     njev: 7
   status: 0
  success: True
        x: array([ 0.49738822])

In [ ]: