In [ ]:
import os, sys
import copy
import numpy as np
from TimeFreqAuditoryScene import *
from IPython.display import Audio, display, clear_output
%matplotlib inline

In [ ]:
# Parameterization
# Global parameters
fs = 44100
delay = 1./8.
duration = 1./8.
mu_log=np.log(500)
sigma_log=2.
genv = GaussianSpectralEnvelope(mu_log=mu_log, sigma_log=sigma_log)

In [ ]:
duration=0.2
delay=0.1
fb = np.random.rand()*5

n_tones = 20
intervals_st = np.random.randint(6,12,n_tones)
intervals_st2 = 0.5*(intervals_st[0:-1]+intervals_st[1:])


scene = Scene()

run_time = 0.
# Constructing the context

n_context = 5
context = Node()
freqs = fb*2**(np.cumsum(intervals_st/12.))

for i in range(n_context):
    shift = +np.random.randint(0,np.max(intervals_st)/2)
    tmp_chord = Chord(fb=fb,
              freqs=freqs*2**(shift/12.),
              duration=duration,
              env=genv,
              delay=run_time)
    run_time += duration + delay
    context.add(tmp_chord)

run_time+=delay

# Constructing the test tones
tone1 = Chord(fb=fb,
              intervals=2.**(intervals_st/12.),
              duration=duration,
              env=genv,
              delay=run_time)
run_time+=duration + delay
tone2 = Chord(fb=fb*2.**(intervals_st[0]/2./12.),
              intervals=2.**(intervals_st2/12.),
              duration=duration,
              env=genv,
              delay=run_time)

scene.add([context,tone1,tone2])
# draw spectrogram
sd = SceneDrawer()
sd.draw(scene)
plt.show()
# generate sound
x = scene.generate(fs=fs)
display(Audio(x, rate=fs, autoplay=True))