In [ ]:
import os, sys
import numpy as np

from TimeFreqAuditoryScene import *
from IPython.display import Audio, display, clear_output
from IPython.html import widgets
from IPython.html.widgets import interactive
%matplotlib inline

In [ ]:
# Parameterization
# Global parameters
fs = 44100
# Shepard tones
delay = 1./8.
duration = 1./8.
mu_log=np.log(200)
sigma_log=2.
fb1 = 12.
fb2 = fb1*np.sqrt(2.)
# Context

In [ ]:
def up_or_down(type):
    abs_step = 0.5
    n_context = int(1*12/abs_step)

    if type == 0:
        step = -abs_step # semitones
    elif type == 1:
        step = abs_step # semitones

    scene = Scene()
    # declare gaussian envelope on log frequency
    genv = GaussianSpectralEnvelope(mu_log=mu_log, sigma_log=sigma_log)


    run_time = 0

    # Constructing the context
    context = Node()

    st = 0
    for i in range(n_context):
        st+=1
        tmp_st = ShepardTone(fb=fb1*2.**(step*st/12.), env=genv, delay=run_time, duration=duration)
        run_time += duration + delay
        context.add(tmp_st)

    run_time+=delay

    tone1 = ShepardTone(fb=fb1, env=genv, delay=run_time, duration=duration)
    run_time += duration + delay
    tone2 = ShepardTone(fb=fb1*np.sqrt(2.), env=genv, delay=run_time, duration=duration)
    scene.add([context ,tone1, tone2])


    # draw spectrogram
    sd = SceneDrawer()
    sd.draw(scene)
    plt.show()
    # generate sound
    x = scene.generate(fs=fs)
    display(Audio(x, rate=fs, autoplay=True))
    
w = interactive(up_or_down, type=(0,1))
display(w)