In [ ]:
import os, sys
import numpy as np
from TimeFreqAuditoryScene import *
from IPython.display import Audio, display, clear_output
from IPython.html import widgets
from IPython.html.widgets import interactive
%matplotlib inline

In [ ]:
fs = 44100
interval=2.
n_tones=3
mu_log=np.log(200)
sigma_log=2.
genv = GaussianSpectralEnvelope(mu_log=mu_log, sigma_log=sigma_log)
tone_duration=0.2

fmin = 100.
fmax = fmin*np.sqrt(interval)
band_base = [fmin,fmax]

In [ ]:
## Generating Shepard tones from coupled streams


scene = Scene(List=[])
band=band_base

context = Node(List=[])
freqs = list( band[0]* (band[1]/band[0])**np.random.rand(n_tones,)  )
for i in range(-8,8):
    ts = ToneSequence(freqs=freqs,env=genv,
                      intertone_delay=0.01,
                      tone_duration=tone_duration)
    ts.shift_tones(shift=interval**i)
    context.List.append(ts)

tritone = Tritone(fb=band_base[np.random.rand()>0.5],
                  delay=0.1+context.getduration(),
                  duration_sp=tone_duration,
                  delay_sp=0.05,
                  env=genv)
    
    
scene.add(context)
scene.add(tritone)
sd = SceneDrawer()
sd.draw(scene)
plt.show()
x = scene.generate(fs=fs)
print x.shape

display(Audio(x, rate=fs, autoplay=True))

In [ ]:
## Generating independent Uniform banded streams of tones 

context = Node(List=[])
band=band_base


for i in range(-8,8):
    band=[fmin*interval**i, fmax*interval**i]
    ts = UniformToneSequence(n_tones=n_tones,
                             tone_duration=tone_duration,
                             intertone_delay=0.,
                             band=band,
                             env=genv)
    context.List.append(ts)

tritone = Tritone(fb=band_base[np.random.rand()>0.5],
                  delay=0.1+context.getduration(),
                  duration_sp=tone_duration,
                  delay_sp=0.05,
                  env=genv)

scene = Scene(List=[])    
scene.add(context)
scene.add(tritone)

sd = SceneDrawer()
sd.draw(scene)
plt.show()
x = scene.generate(fs=fs)
print x.shape

display(Audio(x, rate=fs, autoplay=True))

In [ ]:
# Opposing coherent vs non-coherent contexts

fmin = 100.+50*np.random.rand()
fmax = fmin*np.sqrt(interval)
band_base = [fmin,fmax]
band = band_base

#-----------------------------
context1 = Node(List=[])
freqs = list( band[0]* (band[1]/band[0])**np.random.rand(n_tones,)  )
for i in range(-5,5):
    ts = ToneSequence(freqs=freqs,env=genv,
                      intertone_delay=0.,
                      tone_duration=tone_duration)
    ts.shift_tones(shift=interval**i)
    context1.List.append(ts)
    
#-----------------------------
context2 = Node(List=[])
for i in range(-5,5):
    band=[fmin*np.sqrt(2)*interval**(i), fmax*np.sqrt(2)*interval**(i)]
    ts = UniformToneSequence(n_tones=n_tones,
                             tone_duration=tone_duration,
                             intertone_delay=0.,
                             band=band,
                             env=genv)
    context2.List.append(ts)
    
#------------------------------

d = np.random.rand()>0.5
d = 0
print 'd=',d
tritone = Tritone(fb=band_base[d],
                  delay=0.1+context.getduration(),
                  duration_sp=tone_duration,
                  delay_sp=0.05,
                  env=genv)

scene = Scene(List=[])    
scene.add(context1)
scene.add(context2)
scene.add(tritone)

sd = SceneDrawer()
sd.draw(scene)
plt.show()

x = scene.generate(fs=fs)
print x.shape

display(Audio(x, rate=fs, autoplay=True))