Simple 1-layer model with external sitimulus

Mimic Ed Large's GrFNN-Toobox1.0 example1.m

A one layer network driven with a sinusoidal input. Several parameter sets are provided for experimentation with different types of intrinsic oscillator dynamics.

Preliminaries

Start by importing the necessary modules and packages. Also use some ipython magic for inline plotting.


In [1]:
# 0. Preliminares

%matplotlib inline

import sys
sys.path.append('../')  # needed to run the examples from within the package folder

import numpy as np
import matplotlib.pyplot as plt
import mpld3
mpld3.enable_notebook()


from pygrfnn.network import Model, make_connections
from pygrfnn.oscillator import Zparam
from pygrfnn.grfnn import GrFNN
from pygrfnn.vis import plot_connections
from pygrfnn.vis import tf_detail

Stimulus

We will use a comlpex sinusoid as an external stimulus


In [2]:
# 1. Create Stimulus: Complex sinusoid

sr = 40.0  # sample rate
dt = 1.0/sr
t = np.arange(0, 50, dt)
fc = 1.0  # frequency
A = 0.25  # amplitude
s = A * np.exp(1j * 2 * np.pi * fc * t)

# ramp signal linearly up/down
ramp_dur = 0.02  # in secs
ramp = np.arange(0, 1, dt / ramp_dur)
env = np.ones(s.shape, dtype=float)
env[0:len(ramp)] = ramp
env[-len(ramp):] = ramp[::-1]
# apply envelope
s = s * env

# plot stimulus
mpld3.enable_notebook()
plt.plot(t, np.real(s))
plt.plot(t, np.imag(s))
plt.title('Stimulus')


Out[2]:
<matplotlib.text.Text at 0x10d563e50>

Create and Run the Model

Create a GrFNN layer and a simple model consisting on a simple un-connected GrFNN layer. Then, process the above stimulus with it and look at the Time-Frequency representation generated.


In [3]:
# 2. Define GrFNN params

# params = Zparam(-1,  0,  0, 0, 0, 1)  # Linear
params = Zparam( 0, -1, -1, 0, 0, 1)  # Critical
# params = Zparam( 0, -1, -1, 1, 0, 1)  # Critical with detuning
# params = Zparam( 1, -1, -1, 0, 0, 1)  # Limit Cycle
# params = Zparam(-1,  3, -1, 0, 0, 1)  # Double Limit-cycle

stimulus_conn_type = 'linear'
# stimulus_conn_type = 'active'
# stimulus_conn_type = 'allfreq'
# stimulus_conn_type = 'all2freq'

# 3. Create a GrFNN layer
layer = GrFNN(params, 
              frequency_range=(0.5, 2),
              num_oscs=200,
              stimulus_conn_type=stimulus_conn_type)


# 4. Create the model and add the layer
model = Model()
model.add_layer(layer)

# 5. run the model
model.run(s, t, dt)


2000/2000 done!

In [3]:
# 6. Plot results
TF = layer.TF
f = layer.f
T = 1.0 / f

mpld3.disable_notebook()
tf_detail(TF, t, f, None, np.max(t), np.real(s), np.abs)
# plt.show()
mpld3.enable_notebook()


2000/2000 done!
/usr/local/lib/python2.7/site-packages/matplotlib/image.py:349: UserWarning: Images are not supported on non-linear axes.
  warnings.warn("Images are not supported on non-linear axes.")

UPDATE

Taking advantage of Javascript and mpld3, it is possible to have an interactive display that allow us to explore the TF representation at different instants.

This is my first attempt to do so, so the example is very rough, but it gets the job done. This solution is heavily inspired by this plugin example.


In [4]:
class TFViewPlugin(mpld3.plugins.PluginBase):
    """ TF vis JavaScript / Python interaction. """

    JAVASCRIPT = """
    mpld3.register_plugin("tfview", TFViewPlugin);
    TFViewPlugin.prototype = Object.create(mpld3.Plugin.prototype);
    TFViewPlugin.prototype.constructor = TFViewPlugin;
    TFViewPlugin.prototype.requiredProps = ["id_image", 
                                            "id_detail", 
                                            "id_tf", 
                                            "id_t", 
                                            "initial_t", 
                                            "t_span",
                                            "callback_func"];
    TFViewPlugin.prototype.defaultProps = {}

    function TFViewPlugin(fig, props){
        mpld3.Plugin.call(this, fig, props);
    };

    TFViewPlugin.prototype.draw = function(){
      var image = mpld3.get_element(this.props.id_image),
          detail_line = mpld3.get_element(this.props.id_detail),
          tf_line = mpld3.get_element(this.props.id_tf),
          t_line = mpld3.get_element(this.props.id_t),
          init_t = this.props.initial_t,
          t_span = this.props.t_span,
          callback_func = this.props.callback_func;

      var div = d3.select("#" + this.fig.figid);
                
      // this is way too hacky! Need to figure out a proper way to get the selection
      d3.select(image.image[0][0])
          // why is 'mouseup' not working, while 'mousedown' does!?
          .on("mouseup", function() {
              console.log("up");
          })
          .on("mousedown", function() {
              // console.log("down");
              // console.log(d3.mouse(this));
              // console.log(this.x.baseVal.value);
              // console.log(this.y.baseVal.value);
              // console.log(this.width.baseVal.value);
              // console.log(this.height.baseVal.value);
              var m = d3.mouse(this);
              var w = this.width.baseVal.value;
              var t0 = (t_span[1] - t_span[0]) * m[0]/w + t_span[0];
              var command = callback_func + "(" + t0 + ")";
              //console.log("running "+command);
              var callbacks = { 'iopub' : {'output' : handle_output}};
              var kernel = IPython.notebook.kernel;
              kernel.execute(command, callbacks, {silent:false});
              
              // because of the strage bug (see below), I update this line immediately
              t_line.data[0][0] = t0;
              t_line.data[1][0] = t0;
              t_line.elements()
                  .attr("d", t_line.datafunc(t_line.data));

          });

      function handle_output(out){
        var res = null;
        // if out is a print statement
        if (out.msg_type == "stream"){
          res = out.content.data;
        }
        // if out is a python object
        else if(out.msg_type === "pyout"){
          //console.log(out)
          res = out.content.data["text/plain"];
          //console.log(res)
          //console.log(JSON.parse(res)[0])
          //console.log(JSON.parse(res)[1])
          //console.log(JSON.parse(res)[2])          
        }
        // if out is a python error
        else if(out.msg_type == "pyerr"){
          res = out.content.ename + ": " + out.content.evalue;
          alert(res);
        }
        // if out is something we haven't thought of
        else{
          res = "[out type not implemented]";  
        }

        // Update detail_line data
        detail_line.data = JSON.parse(res)[0];
        detail_line.elements()
          .attr("d", detail_line.datafunc(detail_line.data));
          
        tf_line.data = JSON.parse(res)[1];
        tf_line.elements()
          .attr("d", tf_line.datafunc(tf_line.data));
        
        // Starnge bug: for some reason beyond my comprehension, even though
        // it seems to be exactly the same as `tf_line`, the following doesn't work
        //t_line.data = JSON.parse(res)[2];
        //t_line.elements()
        //  .attr("d", t_line.datafunc(t_line.data));

       }

    };
    """

    def __init__(self, image, detail_line, tf_line, t_line, init_t, t_span, callback_func):
        self.dict_ = {"type": "tfview",
                      "id_image": mpld3.utils.get_id(image),
                      "id_detail": mpld3.utils.get_id(detail_line),
                      "id_tf": mpld3.utils.get_id(tf_line),
                      "id_t": mpld3.utils.get_id(t_line),
                      "initial_t": init_t,
                      "t_span": t_span,
                      "callback_func": callback_func}

In [24]:
from pygrfnn.utils import find_nearest

def updateSlider(t_detail):
    t0, idx = find_nearest(t, t_detail)
    d = np.abs(TF[:, idx])
    detail = map(list, zip(list(d), list(f)))
    tf_detail = map(list, zip(list([t0, t0]), list([np.min(f), np.max(f)])))
    t_detail = map(list, zip(list([t0, t0]), list([np.min(np.real(s)), np.max(np.real(s))])))
    return [detail, tf_detail, t_detail]


t_span = [np.min(t), np.max(t)]
td = 2.0*np.max(t)/3.0
fig, image, tf_line, t_line, detail = tf_detail(TF, t, f, None, td, np.real(s), np.abs)

mpld3.plugins.connect(fig, 
                      TFViewPlugin(image, 
                                   detail[0], 
                                   tf_line[0], 
                                   t_line[0], 
                                   td, 
                                   t_span,
                                   callback_func="updateSlider"))



In [18]: