In [ ]:
import os, sys
import numpy as np
from TimeFreqAuditoryScene import *
from Chambers import *
from IPython.display import Audio, display, clear_output
from IPython.html import widgets
from IPython.html.widgets import interactive
%matplotlib inline

Shepard tones contextual effects (Chambers and Pressnitzer)


In [ ]:
# Parameterization
# Global parameters
fs = 44100
# Shepard tones
delay = 1./8.
duration = 1./8.
fb1 = 12.
# declare gaussian envelope on log frequency
mu_log=np.log(200)
sigma_log=2.
genv = GaussianSpectralEnvelope(mu_log=mu_log, sigma_log=sigma_log)

Context construction

The context is build relative to the tritone

The frequency axis is cut in semi octave ranges whose bound align to the tones in the tritone

Each band is labeled 'up' or 'down' whether it is place directly above or below the tones in the first shepard tone of the tritone

The terminology of the label should be understood with respect to the bias it induces


In [ ]:
def up_or_down(type='up', n_context=3, delay_context=0.2):
    
    range_context = [1,5] # semitones
    
    scene = Scene()
    run_time = 0

    # Constructing the context
    context = Context(fb_T1=fb1,
                    n_tones=n_context,
                    tone_duration=duration,
                    inter_tone_interval=delay,
                    env=genv,
                    type='chords',
                    bias=type,
                    range_st=range_context)
    
    run_time += context.getduration()+delay_context
    trt = Tritone(fb=fb1,
                  duration_sp=duration,
                  delay_sp=delay, env=genv, delay=run_time)
    
    scene.add([context,trt])

    # draw spectrogram
    sd = SceneDrawer()
    sd.draw(scene)
    plt.show()
    # generate sound
    x = scene.generate(fs=fs)
    print x.shape

    display(Audio(x, rate=fs, autoplay=True))
    return scene

w = interactive(up_or_down, 
type=('up','down'),n_context=(0,10),delay_context=(0.1,5))
display(w)

In [ ]: