In [6]:
import os, sys
import copy

from TimeFreqAuditoryScene import *
from IPython.display import Audio, display, clear_output
%matplotlib inline

In [ ]:
# Parameterization
# Global parameters
fs = 44100
delay = 1./8.
duration = 1./8.
mu_log=np.log(500)
sigma_log=2.
genv = GaussianSpectralEnvelope(mu_log=mu_log, sigma_log=sigma_log)

The Huh tone: sequence of chord with the next chord half way between the previous one


In [ ]:
duration=0.5
delay=0.1

n_tones = 10
fb = 10

# generating sequence of intervals
intervals_seq = []
intervals_st = np.random.randint(1,20,n_tones)
#intervals_st = 6.*np.ones(n_tones)

intervals_seq = [intervals_st]
fb_seq = [fb]

while (len(intervals_seq[-1])>1):
    tmp_int = np.asarray(intervals_seq[-1])
    if len(tmp_int)>2:
        intervals_seq.append( 0.5*(tmp_int[0:-1]+tmp_int[1:]))
    else:
        intervals_seq.append( np.array([np.mean(tmp_int)]))

fb_seq = [fb]
for i in range(len(intervals_seq)):
    first_interval = intervals_seq[i][0]
    fb_seq.append(fb_seq[-1]*2.**(first_interval/12./2.))

scene = Scene()
run_time = 0.
for i in range(len(intervals_seq)):
    tone = Chord(fb=fb_seq[i],
              intervals=2.**(intervals_seq[i]/12.),
              duration=duration,
              delay=run_time,
              env=genv)
    scene.add(tone)
    run_time += duration + delay


# draw spectrogram
sd = SceneDrawer()
sd.draw(scene)
plt.show()
# generate sound
x = scene.generate(fs=fs)
display(Audio(x, rate=fs, autoplay=True))