In [1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
import csv
%matplotlib inline
In [2]:
import sys
import os
sys.path.append(os.path.join(os.getcwd(),".."))
sys.path.append(os.path.join(os.getcwd(),"..","code"))
In [3]:
import filters
import likelihood_functions as lk
import PoissonProcessClasses as PP
import auxiliary_functions as auxfun
In [4]:
import imp
imp.reload(filters)
imp.reload(lk)
imp.reload(auxfun)
imp.reload(PP)
Out[4]:
In [5]:
# Number of neurons
nofCells = 2
Reading input-output data:
In [6]:
# creating the path to the data
data_path = os.path.join(os.getcwd(),'..','data')
# reading stimulus
Stim = np.array(pd.read_csv(os.path.join(data_path,'Stim2.csv'),header = None))
# reading location of spikes
# (lengths of tsp sequences are not equal so reading them line by line)
tsp_list = []
with open(os.path.join(data_path,'tsp2.csv')) as csvfile:
tspreader = csv.reader(csvfile)
for row in tspreader:
tsp_list.append(row)
Extracting a spike train from spike positions:
In [ ]:
In [7]:
dt = 0.01
y_list = []
for tsp in tsp_list:
tsp = np.array(tsp).astype(np.float)
tsp_int = np.ceil((tsp - dt*0.001)/dt)
tsp_int = np.reshape(tsp_int,(tsp_int.shape[0],1))
tsp_int = tsp_int.astype(int)
y_list.append(np.array([item in tsp_int for item in np.arange(Stim.shape[0]/dt)+1]).astype(int))
In [ ]:
Creating filters:
In [8]:
# create a stimulus filter
kpeaks = np.array([0,round(20/3)])
pars_k = {'neye':5,'n':5,'kpeaks':kpeaks,'b':3}
K,K_orth,kt_domain = filters.createStimulusBasis(pars_k, nkt = 20)
In [9]:
# create a post-spike filter
hpeaks = np.array([0.1,2])
pars_h = {'n':5,'hpeaks':hpeaks,'b':.4}
H,H_orth,ht_domain = filters.createPostSpikeBasis(pars_h,dt)
In [10]:
# Interpolate Post Spike Filter
MSP = auxfun.makeInterpMatrix(len(ht_domain),1)
MSP[0,0] = 0
H_orth = np.dot(MSP,H_orth)
Conditional Intensity (spike rate):
$$\lambda_{\beta}(i) = \exp(K(\beta_k)*Stim + H(\beta_h)*y_i + \sum_{j\ne i}I({\beta_{I}}_j)*y_j + dc)$$$I(\beta_{I_j})$ corresponds to an interpolating filter with basis $I$ and coefficients $\beta_{I_j}$ associated with the $y_j$ output. It is to assume that the post-spike history basis and the interpolating bases are the same: $H = I$. The intensity can then be rewritten as
Creating a matrix of covariates:
In [11]:
M_k = lk.construct_M_k(Stim,K,dt)
In [12]:
# creating a H-matrix for each response and merging them in one covariate matrix M_h
M_h_list = []
for tsp in tsp_list:
tsp = np.array(tsp).astype(np.float)
M_h_list.append(lk.construct_M_h(tsp,H_orth,dt,Stim))
#M_h_list[1] = np.zeros(M_h_list[0].shape)
M_h = np.hstack(tuple(M_h_list))
In [13]:
# combining all covariate matrices
M = np.hstack((M_k,M_h,np.ones((M_h.shape[0],1))))
Conditional intensity as a function of the covariates: $$ \lambda_{\beta} = \exp(M\beta) $$
Create a Poisson process model with this intensity:
In [14]:
model = PP.PPModel(M.T,dt = dt/100)
Setting initial parameters:
In [15]:
coeff_k0 = np.array([ 0.061453,0.284916,0.860335,1.256983,0.910615,0.488660,-0.887091,0.097441,0.026607,-0.090147])
coeff_h0 = np.zeros((5*nofCells,))
dc0 = 3
pars0 = np.hstack((coeff_k0,coeff_h0,dc0))
In [16]:
print(pars0)
pars0 = np.array([0.15587,
0.52019,
1.09108,
1.42535,
0.83192,
0.37496,
-1.35405,
0.45916,
-0.01016,
-0.03577,
-15.18000,
38.24000,
-67.58000,
-14.06000,
-3.36000,
0.00000,
0.00000,
0.00000,
0.00000,
0.00000, 3])
pars0[15:] = np.zeros((6,))
pars0 = np.zeros((21,))
#pars0 = np.hstack((np.ones((20,)),np.ones((1,))))
print(pars0)
In [ ]:
Fitting the likelihood for each neuron:
In [17]:
res_list = []
for y in y_list:
print('Initial likelihood is '+str(model.negLogL(pars0,y)))
res_list.append(model.fit(y,start_coef = pars0, maxiter = 500, method = 'L-BFGS-B'))
In [18]:
[print('\n Optimization results:\n'+str(res)) for res in res_list]
Out[18]:
In [19]:
k_coeff = np.array([0.061453, 0.284916, 0.860335, 1.256983, 0.910615, 0.488660, -0.887091, 0.097441, 0.026607, -0.090147])
h_coeff = np.array([-15.18,38.24,-67.58,-14.06,-3.36])
In [20]:
for i in range(len(res_list)):
k_coeff_predicted = res_list[i].x[:10]
h_coeff_predicted = np.reshape(res_list[i].x[10:-1],(H.shape[1],nofCells),order = 'F')
print('Estimated dc for neuron '+str(i)+': '+str(res_list[i].x[-1]))
fig,axs = plt.subplots(1,2,figsize = (10,5))
fig.suptitle('Neuron%d'%(i+1))
axs[0].plot(-kt_domain[::-1],np.dot(K,k_coeff_predicted),'r',label = 'predicted')
axs[0].set_title('Stimulus Filter')
axs[0].hold(True)
axs[0].plot(-kt_domain[::-1],np.dot(K,k_coeff),'b',label = 'true')
axs[0].plot(-kt_domain[::-1],np.dot(K,pars0[:10]),'g',label = 'initial')
axs[0].set_xlabel('Time')
axs[0].legend(loc = 'upper left')
axs[1].plot(ht_domain,np.dot(H_orth,h_coeff_predicted),'r',label = 'predicted')
axs[1].plot(ht_domain,np.dot(H_orth,h_coeff),'b',label = 'true')
axs[1].plot(ht_domain,np.dot(H_orth,coeff_h0[:H_orth.shape[1]]),'g',label = 'initial')
axs[1].set_title('Post-Spike Filter')
axs[1].set_xlabel('Time')
axs[1].legend(loc = 'upper right')
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: