In [1]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Local libraries
import sys
sys.path.append("../")
from signals.spikes import inh_poisson_generator
from signals.spikes import sparse_to_dense
from scipy.signal import hamming, fftconvolve
from scipy.signal import boxcar
from inputs.sensors import Sensor, PerceptualSpace
from inputs.lag_structure import LagStructure
# Visualization libraries
from visualization.sensor_clustering import visualize_cluster_matrix
from visualization.sensors import visualize_SLM
from visualization.sensors import visualize_STDM_seaborn
from visualization.time_cluster import visualize_time_cluster_matrix
from visualization.code_vectors import visualize_code_vectors
%matplotlib inline
In [2]:
# We set the random set
np.random.seed(100)
In [3]:
Tperiod = 1000.0 # One second
w = 2 * np.pi / Tperiod
t_start = 0.0
t_stop = 5000.0
dt = 1.0
t = np.arange(t_start, t_stop, dt)
A_rate = 100.0
rates = A_rate * np.sin(w * t) + A_rate
plt.plot(t, rates)
Out[3]:
Then we get both a sparse (list with spike times) and a dense (binary vector) with the spikes
In [4]:
sparse_spikes = inh_poisson_generator(rates, t, t_stop)
dense_spikes = sparse_to_dense(sparse_spikes, dt, t_stop)
sparse_spikes2 = inh_poisson_generator(rates, t, t_stop)
dense_spikes2 = sparse_to_dense(sparse_spikes2, dt, t_stop)
As a sanity check we calculate the mean rate overall the time ms.
In [5]:
mean_rate = (1000.0 / t_stop) * sparse_spikes.size
print('mean rate', mean_rate)
In [6]:
plt.plot(t, rates, label='firing_rate')
ones = A_rate * np.ones_like(sparse_spikes)
plt.plot(sparse_spikes, ones, '*', label='spikes')
plt.title('Poisson Process (1)')
plt.legend()
plt.ylim([0, 250])
Out[6]:
In [7]:
plt.plot(t, rates, label='firing_rate')
ones = A_rate * np.ones_like(sparse_spikes2)
plt.plot(sparse_spikes2, ones, '*', label='spikes')
plt.title('Poisson Process (2)')
plt.legend()
plt.ylim([0, 250])
Out[7]:
In [8]:
kernel_duration = 50.0
kernel_size = int(kernel_duration / dt)
norm = 1000.0 / kernel_duration
kernel = norm * hamming(kernel_size)
plt.plot(kernel)
Out[8]:
In [9]:
convolution1 = fftconvolve(dense_spikes, kernel, 'valid')
convolution2 = fftconvolve(dense_spikes2, kernel, 'valid')
plt.plot(convolution1, label='convolution 1')
plt.plot(convolution2, label='convolution 2')
plt.plot(rates, label='original')
plt.legend()
Out[9]:
In [10]:
lag_times = np.arange(0, 2 * Tperiod) # Go two times the period
tau = 2 * Tperiod
window_size = 1 * Tperiod
Nwindowsize = int(window_size / dt)
# weights = np.exp( -np.arange(Nwindowsize) / tau)
weights = None
lag_structure = LagStructure(lag_times=lag_times, weights=weights, window_size=window_size)
sensor1 = Sensor(convolution1, dt, lag_structure)
sensor2 = Sensor(convolution2, dt, lag_structure)
sensors = [sensor1, sensor2]
perceptual_space = PerceptualSpace(sensors, lag_first=True)
In [11]:
from nexa.nexa import Nexa
Nspatial_clusters = 2 # Number of spatial clusters
Ntime_clusters = 4 # Number of time clusters
Nembedding = 3 # Dimension of the embedding space
# Now the Nexa object
nexa_object = Nexa(perceptual_space, Nspatial_clusters,
Ntime_clusters, Nembedding)
# Make all the calculations
nexa_object.calculate_all()
In [12]:
%matplotlib inline
fig = visualize_SLM(nexa_object)
plt.show(fig)
In [15]:
%matplotlib qt
# fig = visualize_STDM(nexa_object)
fig = visualize_STDM_seaborn(nexa_object)
plt.show(fig)
In [14]:
%matplotlib inline
fig = visualize_cluster_matrix(nexa_object)
In [ ]: