Start by making a gabor function
In [9]:
import numpy as np
def gabor(width, height, lambd, theta, psi, sigma, gamma, x_offset, y_offset):
x = np.linspace(-1, 1, width)
y = np.linspace(-1, 1, height)
X, Y = np.meshgrid(x, y)
X = X - x_offset
Y = Y + y_offset
cosTheta = np.cos(theta)
sinTheta = np.sin(theta)
xTheta = X * cosTheta + Y * sinTheta
yTheta = -X * sinTheta + Y * cosTheta
e = np.exp( -(xTheta**2 + yTheta**2 * gamma**2) / (2 * sigma**2) )
cos = np.cos(2 * np.pi * xTheta / lambd + psi)
return e * cos
g = gabor(500, 500, lambd=0.6,
theta=np.pi/4,
psi=np.pi/2,
sigma=0.2,
gamma=0.7,
x_offset=0,
y_offset=0)
g = g / np.linalg.norm(g)
import pylab
pylab.imshow(g, extent=(-1, 1, -1, 1), interpolation='none', vmin=np.min(g), vmax=np.max(g), cmap='gray')
pylab.show()
Now we make a bunch of these to be our encoders (and sample data)
In [10]:
width = 50
height = 50
N = 100
def make_random_gabor(width, height):
return gabor(width, height,
lambd=random.uniform(0.3, 0.8),
theta=random.uniform(0, 2*np.pi),
psi=random.uniform(0, 2*np.pi),
sigma=random.uniform(0.2, 0.5),
gamma=random.uniform(0.4, 0.8),
x_offset=random.uniform(-1, 1),
y_offset=random.uniform(-1, 1))
encoders = [make_random_gabor(width, height) for i in range(N)]
import pylab
pylab.figure(figsize=(10,8))
for i in range(N):
w = i%12
h = i/12
pylab.imshow(encoders[i], extent=(w, w+0.95, h, h+0.95), interpolation='none',
vmin=-1, vmax=1, cmap='gray')
pylab.xticks([])
pylab.yticks([])
pylab.xlim((0, 12))
pylab.ylim((0, N/12))
pylab.show()
In [11]:
N = 1000 # number of neurons
S = 500 # number of sample points (eval_points)
K = 3 # number of gabors per sample point
width = 50 # width (and height) of the patch
encoders = np.array([make_random_gabor(width, width) for i in range(N)])
encoders.shape = N, width * width
samples = np.zeros((S, width, width))
for i in range(K):
samples += np.array([make_random_gabor(width, width) for i in range(S)])
samples.shape = S, width * width
samples = samples / np.linalg.norm(samples, axis=1)[:, None] # normalize
Now let's create an integrator model
In [16]:
import nengo
neuron_type = nengo.LIFRate() # default is nengo.LIF()
stim_image = make_random_gabor(width, width)
stim_image.shape = width * width
stim_image = stim_image / np.linalg.norm(stim_image)
g = gabor(width, width, lambd=0.6,
theta=np.pi/4,
psi=np.pi/2,
sigma=0.2,
gamma=0.7,
x_offset=0,
y_offset=0)
g = g / np.linalg.norm(g)
model = nengo.Network()
with model:
stim = nengo.Node(lambda t: g.flatten() if t < 0.1 else np.zeros(width * width))
ens = nengo.Ensemble(n_neurons=N, dimensions=width * width, encoders=encoders, eval_points=samples, neuron_type=neuron_type)
conn = nengo.Connection(ens, ens, synapse=0.1)
nengo.Connection(stim, ens, synapse=None)
probe = nengo.Probe(ens, synapse=0.01)
Build the model, and compute its error. This is purely just the representation error (i.e. it does not include the spiking noise or the error when holding information over time).
In [17]:
sim = nengo.Simulator(model)
print 'rmse:', np.linalg.norm(sim.data[conn].solver_info['rmses'])
In [18]:
sim.run(1)
Plot the represented value over time. t=0 is in the top-left. The signal should strengthen for the first 0.1 seconds (the first row), and then decay (since the actual stimulus is only presented for 0.1 seconds). The actual input is shown on the right
In [20]:
data = sim.data[probe]
index = np.linspace(len(data)-1, 0, 100).astype(int)
vmin = np.min(data)
vmax = np.max(data)
import pylab
pylab.figure(figsize=(20,8))
for i in range(100):
w = 9 - i%10
h = i/10
d = data[index[i]]
d.shape = width, width
pylab.imshow(d, extent=(w, w+0.95, h, h+0.95), interpolation='none',
vmin=vmin, vmax=vmax, cmap='gray')
pylab.xticks([])
pylab.yticks([])
pylab.xlim((0, 20))
pylab.ylim((0, 10))
d = g
d.shape = width, width
pylab.imshow(d, extent=(10.0, 20.0, 0.0, 10.0), interpolation='none',
vmin=vmin, vmax=vmax, cmap='gray')
pylab.show()
In theory, the above system should work fine as you increase N and S. However, we start running into performance problems, and they get worse as width increases. So let's also try doing the SVD trick to just reduce the amount of math that needs to be done. Note that this SVD trick does not change anything about the model itself -- it just means that we can get away with less memory usage to store all the various vectors needed by the system.
In [21]:
N = 1000 # number of neurons
S = 2000 # number of sample points (eval_points)
K = 3 # number of gabors per sample point
width = 50 # width (and height) of the patch
encoders = np.array([make_random_gabor(width, width) for i in range(N)])
encoders.shape = N, width * width
samples = np.zeros((S, width, width))
for i in range(K):
samples += np.array([make_random_gabor(width, width) for i in range(S)])
samples.shape = S, width * width
samples = samples / np.linalg.norm(samples, axis=1)[:, None] # normalize
In [22]:
U, S, V = np.linalg.svd(encoders.T)
Figure out how many singular values are needed
In [23]:
plot(S)
show()
Get rid of any singular values below 1% of the largest one.
In [24]:
D = np.where(S < S[0]*0.01)[0][0]
print 'number of dimensions to keep:', D
basis = U[:,:D]
Helper functions for converting into and out of the compressed space
In [25]:
def compress(original):
return np.dot(original, basis)
def uncompress(compressed):
return np.dot(basis, compressed.T).T
Build and run the model using the compressed version
In [26]:
neuron_type = nengo.LIFRate() # default is nengo.LIF()
stim_image = make_random_gabor(width, width)
stim_image.shape = width * width
stim_image = stim_image / np.linalg.norm(stim_image)
g = gabor(width, width, lambd=0.6,
theta=np.pi/4,
psi=np.pi/2,
sigma=0.2,
gamma=0.7,
x_offset=0,
y_offset=0)
g = g / np.linalg.norm(g)
g.shape = width * width
import nengo
model = nengo.Network()
with model:
stim = nengo.Node(lambda t: compress(g) if t < 0.1 else np.zeros(D))
ens = nengo.Ensemble(n_neurons=N, dimensions=D, encoders=compress(encoders), eval_points=compress(samples), neuron_type=neuron_type)
conn = nengo.Connection(ens, ens, synapse=0.1)
nengo.Connection(stim, ens, synapse=None)
probe = nengo.Probe(ens, synapse=0.01)
In [27]:
sim = nengo.Simulator(model)
print 'rmse:', np.linalg.norm(sim.data[conn].solver_info['rmses'])
In [28]:
sim.run(1)
In [30]:
data = uncompress(sim.data[probe])
index = np.linspace(len(data)-1, 0, 100).astype(int)
vmin = np.min(data)
vmax = np.max(data)
import pylab
pylab.figure(figsize=(20,8))
for i in range(100):
w = 9 - i%10
h = i/10
d = data[index[i]]
d.shape = width, width
pylab.imshow(d, extent=(w, w+0.95, h, h+0.95), interpolation='none',
vmin=vmin, vmax=vmax, cmap='gray')
pylab.xticks([])
pylab.yticks([])
pylab.xlim((0, 20))
pylab.ylim((0, 10))
d = g
d.shape = width, width
pylab.imshow(d, extent=(10.0, 20.0, 0.0, 10.0), interpolation='none',
vmin=vmin, vmax=vmax, cmap='gray')
pylab.show()
In [ ]: