In [11]:
%matplotlib inline
import pylab
import numpy as np

In [18]:
class AdaptiveControl(object):
    def __init__(self, n_inputs, n_outputs, n_neurons, seed=None, learning_rate=1e-3):
        self.rng = np.random.RandomState(seed=seed)
        self.compute_encoders(n_inputs, n_neurons)
        self.initialize_decoders(n_neurons, n_outputs)
        self.learning_rate=learning_rate
        
    def step(self, state, error):
        # feed input over the static synapses
        current = self.compute_neuron_input(state)
        # do the neural nonlinearity
        activity = self.neuron(current)
        # apply the learned synapses        
        value = self.compute_output(activity)
        
        # update the synapses with the learning rule
        index = np.where(activity>0)
        self.decoder[:,index] -= error * self.learning_rate
        #  Note: that multiply can be changed to a shift if the learning_rate is a power of 2
        
        return value
    
    def compute_encoders(self, n_inputs, n_neurons):
        # generate the static synapses
        # NOTE: this algorithm could be changed, and just needs to produce a similar
        # distribution of connection weights.  Changing this distribution slightly
        # changes the class of functions the neural network will be good at learning
        max_rates = self.rng.uniform(0.5, 1, n_neurons)
        intercepts = self.rng.uniform(-1, 1, n_neurons)
        
        gain = max_rates / (1 - intercepts)
        bias = -intercepts * gain        
        
        enc = self.rng.randn(n_neurons, n_inputs)
        enc /= np.linalg.norm(enc, axis=1)[:,None]
        
        self.encoder = enc * gain[:, None]
        self.bias = bias
                
    def initialize_decoders(self, n_neurons, n_outputs):
        self.decoder = np.zeros((n_outputs, n_neurons))
        
    def compute_neuron_input(self, state):
        # there is currently still a multiply here.  But, since self.encoder is
        # randomly generated, we can replace this with any easy-to-compute
        # system.  For example, we could replace the multiplies with shifts
        # by rounding all the numbers to powers of 2.
        return np.dot(self.encoder, state) + self.bias
    
    def neuron(self, current):
        # this could be an LFSR
        # or instead of a random number generator, you can have an internal state
        # an build an accumulator (an integrate-and-fire neuron)
        spikes = np.where(self.rng.uniform(0, 1, len(current))<current, 1, 0)
        return spikes
    
    def compute_output(self, activity):
        return np.sum(self.decoder[:,np.where(activity>0)])

In [23]:
n_neurons = 100
ac = AdaptiveControl(n_inputs=1, n_outputs=1, n_neurons=n_neurons, seed=1)

steps = 1000
inputs = np.linspace(-1, 1, 100)
rates = np.zeros((len(inputs), n_neurons))
for j in range(steps):
    for i, input in enumerate(inputs):
        current = ac.compute_neuron_input([input])
        activity = ac.neuron(current)
        rates[i, :] += activity/float(steps)
pylab.plot(inputs, rates);


Now let's try teaching the model to just output the identity function (i.e. the output should be the same as the input). We train it over a sine wave.


In [24]:
n_neurons = 100
ac = AdaptiveControl(n_inputs=1, n_outputs=1, n_neurons=n_neurons, seed=1, learning_rate=1e-3)

inputs = []
outputs = []
errors = []
error = np.zeros(1)
for i in range(2000):
    input = np.sin(i*2*np.pi/1000)
    output = ac.step([input], error)
    error[:] = output - input
    inputs.append(input)
    outputs.append(output)
    errors.append(output-input)

In [25]:
pylab.plot(inputs, label='input')
pylab.plot(outputs, label='output')
pylab.plot(errors, label='error')
pylab.legend(loc='best')
pylab.show()



In [ ]:


In [ ]: