In [1]:
import numpy as np
import tensorflow as tf

In [2]:
def snr(signal, recon):
    """Returns signal-noise ratio in dB."""
    ratio = np.var(signal)/np.var(signal-recon)
    return 10*np.log10(ratio)
    
# dynamic compressive gammachirp
def dcGC(t,f):
    """Dynamic compressive gammachirp filter as defined by Irino,
    with parameters from Park as used in Charles, Kressner, & Rozell.
    The log term is regularized to log(t + 0.00001).
    t : time in seconds, greater than 0
    f : characteristic frequency in Hz
    One but not both arguments may be numpy arrays.
    """
    ERB = 0.1039*f + 24.7
    return t**3 * np.exp(-2*np.pi*1.14*ERB*t) * np.cos(2*np.pi*f*t + 0.979*np.log(t+0.000001))

# adapted from scipy cookbook
lowcut = 100
highcut = 6000
def butter_bandpass(lowcut, highcut, fs, order=5):
    nyq = 0.5 * fs
    low = lowcut / nyq
    high = highcut / nyq
    b, a = scisig.butter(order, [low, high], btype='band')
    return b, a

def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
    b, a = butter_bandpass(lowcut, highcut, fs, order=order)
    y = scisig.lfilter(b, a, data)
    return y
    
def plot_spikegram( spikes, sample_rate, markerSize = .0001 ):
    """adapted from https://github.com/craffel/spikegram-coding/blob/master/plotSpikeGram.py"""
    nkernels = spikes.shape[0]
    indices = np.transpose(np.nonzero(spikes))
    scalesKernelsAndOffsets = [(spikes[idx[0],idx[1]], idx[0], idx[1]) for idx in indices]
    
    for scale, kernel, offset in scalesKernelsAndOffsets:
        # Put a dot at each spike location.  Kernels on y axis.  Dot size corresponds to scale
        plt.plot( offset/sample_rate, nkernels-kernel, 'k.', 
                 markersize=markerSize*np.abs( scale ) )
    plt.title( "Spikegram" )
    plt.xlabel( "Time (s)" )
    plt.ylabel( "Kernel" )
    plt.axis( [0.0, spikes.shape[1]/sample_rate, 0.0, nkernels] )
    plt.show()

In [4]:
class SignalSet:
    
    def __init__(self, sample_rate = 16000, data = '../Data/TIMIT/'):
        self.sample_rate = sample_rate
        if isinstance(data, str):
            self.load_from_folder(data)
        else:
            self.data = data
            self.ndata = len(data)            
            
    def load_from_folder(self, folder = '../Data/TIMIT/'):
        min_length = 800 # TODO: should not be hard-coded
        files = os.listdir(folder)
        file = None
        self.data = []
        for ff in files:
            if ff.endswith('.wav'):
                file = os.path.join(folder,ff)
                rate, signal = wavfile.read(file)
                if rate != self.sample_rate:
                    raise NotImplementedError('The signal in ' + ff +
                    ' does not match the given sample rate.')
                if signal.shape[0] > min_length:
                    # bandpass
                    signal = signal/signal.std()
                    signal = butter_bandpass_filter(signal, lowcut, highcut,
                                                    self.sample_rate, order=5)
                    self.data.append(signal)
        self.ndata = len(self.data)
        print("Found ", self.ndata, " files")
        
    def rand_stim(self):
        """Get one random signal."""
        which = np.random.randint(low=0, high=self.ndata)
        signal = self.data[which]
        signal /= np.max(signal) # as in Smith & Lewicki
        return signal
        
    def write_sound(self, filename, signal):
        signal /= np.max(signal)
        wavfile.write(filename, self.sample_rate, signal)
        
    def tiled_plot(self, stims):
        """Tiled plots of the given signals. Zeroth index is which signal.
        Kind of slow, expect about 10s for 100 plots."""
        nstim = stims.shape[0]
        plotrows = int(np.sqrt(nstim))
        plotcols = int(np.ceil(nstim/plotrows))
        f, axes = plt.subplots(plotrows, plotcols, sharex=True, sharey=True)
        for ii in range(nstim):
            axes.flatten()[ii].plot(stims[ii])
        f.subplots_adjust(hspace=0, wspace=0)
        plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
        plt.setp([a.get_yticklabels() for a in f.axes[:-1]], visible=False)

In [13]:
class MatchingPursuer:
    
    def __init__(self,
                 data = '../Data/TIMIT/',
                 data_dim=1,
                 nunits = 32,
                 filter_time = 0.05,
                 learn_rate = 0.01,
                 thresh = 0.5,
                 normed_thresh = None,
                 max_iter = 100,
                 min_spike = 0.01,
                 mask_epsilon = None,
                 sample_rate = 16000,
                 paramfile= 'dummy'):    
        
        self.thresh = thresh
        self.min_spike = min_spike
        self.sample_rate = sample_rate
        self.nunits = nunits
        self.lfilter = int(filter_time * self.sample_rate)
        self.normed_thresh = normed_thresh or 2/np.sqrt(self.lfilter)
        self.mask_epsilon = mask_epsilon or 0.01*np.sqrt(1/self.lfilter)
        self.max_iter = max_iter
        self.data_dim = data_dim
        
    def initial_filters(self, gammachirp=False):
        """If 1D, Return either a set of gammachirp filters or random (normal) filters,
        not normalized. Otherwise return Gaussian noise."""
        if self.data_dim==1:
            if gammachirp:
                gammachirps = np.zeros([self.nunits, self.lfilter])
                freqs = np.logspace(np.log10(100), np.log10(6000), self.nunits)
                times = np.linspace(0,self.lfilter/self.sample_rate,self.lfilter)
                for ii in range(self.nunits):
                    gammachirps[ii] = dcGC(times, freqs[ii])
                filters= gammachirps        
            else:
                filters = tf.random_normal([self.nunits, self.lfilter])
            return tf.expand_dims(filters,2)
        elif self.data_dim>2:
            normal = tf.random_normal([self.nunits, self.lfilter, self.nfreqs])
            return normal

In [22]:
g = tf.Graph()

self=MatchingPursuer()

with g.as_default():
    
    x = tf.placeholder(tf.float32, shape = [1,None,self.data_dim,1])
    
    phi = tf.Variable(self.initial_filters())
    phi_for_conv = tf.transpose(phi, [1,2,0])
    phi_for_conv = tf.expand_dims(phi_for_conv,2)
    rev_phi = tf.reverse(phi, dims=[False, True, False])
    
    with tf.variable_scope('inference'):
        def while_body(kk, winning_val, coeffs, resid, error):
            convs = tf.nn.convolution(resid,
                                      phi_for_conv,
                                      padding="SAME", name='convolutions')
            winning_val = tf.reduce_max(convs)
            winner = tf.argmax(convs)
            #coeffs = tf.select(convs == winning_val,
             #                  convs,
            #                 coeffs)
            update = tf.scatter_nd([winner], [winning_val], tf.shape(coeffs))
            coeffs = coeffs + update
            xhat = tf.convolution(coeffs,
                                  rev_phi,
                                  padding="SAME", name='reconstruction')
            resid = x - xhat
            error = tf.mean(tf.square(resid))
            return tf.add(kk, 1), winning_val, resid, error
        def while_cond(kk, winning_val, coeffs, resid, error):
            maxitercheck = kk < self.max_iter
            spikecheck = winning_val > self.min_spike
            return tf.logical_and(maxitercheck, spikecheck)
        kk = tf.constant(0)
        onespike = tf.constant(0.0)
        error = tf.constant(1.0)
        coeffs = tf.zeros_like(x) # WRONG
        resid = tf.identity(x)
        inf_loop = tf.while_loop(while_cond, while_body, [kk, onespike, coeffs, resid, error], back_prop=False)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in apply_op(self, op_type_name, name, **keywords)
    489                 as_ref=input_arg.is_ref,
--> 490                 preferred_dtype=default_dtype)
    491           except TypeError as err:

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
    668         if ret is None:
--> 669           ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
    670 

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    175   _ = as_ref
--> 176   return constant(v, dtype=dtype, name=name)
    177 

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name, verify_shape)
    164   tensor_value.tensor.CopyFrom(
--> 165       tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
    166   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
    359     if values is None:
--> 360       raise ValueError("None values not supported.")
    361     # if dtype is provided, forces numpy array to be the type

ValueError: None values not supported.

During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)
<ipython-input-22-a17894f1867b> in <module>()
     39         coeffs = tf.zeros_like(x)
     40         resid = tf.identity(x)
---> 41         inf_loop = tf.while_loop(while_cond, while_body, [kk, onespike, coeffs, resid, error], back_prop=False)

C:\Anaconda3\lib\site-packages\tensorflow\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name)
   2634     context = WhileContext(parallel_iterations, back_prop, swap_memory, name)
   2635     ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, context)
-> 2636     result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
   2637     return result
   2638 

C:\Anaconda3\lib\site-packages\tensorflow\python\ops\control_flow_ops.py in BuildLoop(self, pred, body, loop_vars, shape_invariants)
   2467       self.Enter()
   2468       original_body_result, exit_vars = self._BuildLoop(
-> 2469           pred, body, original_loop_vars, loop_vars, shape_invariants)
   2470     finally:
   2471       self.Exit()

C:\Anaconda3\lib\site-packages\tensorflow\python\ops\control_flow_ops.py in _BuildLoop(self, pred, body, original_loop_vars, loop_vars, shape_invariants)
   2417         structure=original_loop_vars,
   2418         flat_sequence=vars_for_body_with_tensor_arrays)
-> 2419     body_result = body(*packed_vars_for_body)
   2420     if not nest.is_sequence(body_result):
   2421       body_result = [body_result]

<ipython-input-22-a17894f1867b> in while_body(kk, winning_val, coeffs, resid, error)
     18                                       padding="SAME", name='convolutions')
     19             winning_val = tf.reduce_max(convs)
---> 20             winner = tf.argmax(convs)
     21             #coeffs = tf.select(convs == winning_val,
     22              #                  convs,

C:\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py in argmax(input, axis, name, dimension)
    247       raise ValueError("Cannot specify both 'axis' and 'dimension'")
    248     axis = dimension
--> 249   return gen_math_ops.arg_max(input, axis, name)
    250 
    251 

C:\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in arg_max(input, dimension, name)
    166   """
    167   result = _op_def_lib.apply_op("ArgMax", input=input, dimension=dimension,
--> 168                                 name=name)
    169   return result
    170 

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in apply_op(self, op_type_name, name, **keywords)
    501             # What type does convert_to_tensor think it has?
    502             observed = ops.convert_to_tensor(values,
--> 503                                              as_ref=input_arg.is_ref).dtype.name
    504             prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
    505                       (input_name, op_type_name, observed))

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
    667 
    668         if ret is None:
--> 669           ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
    670 
    671         if ret is NotImplemented:

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    174                                          as_ref=False):
    175   _ = as_ref
--> 176   return constant(v, dtype=dtype, name=name)
    177 
    178 

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name, verify_shape)
    163   tensor_value = attr_value_pb2.AttrValue()
    164   tensor_value.tensor.CopyFrom(
--> 165       tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
    166   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
    167   const_tensor = g.create_op(

C:\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
    358   else:
    359     if values is None:
--> 360       raise ValueError("None values not supported.")
    361     # if dtype is provided, forces numpy array to be the type
    362     # provided if possible.

ValueError: None values not supported.

In [ ]: