In [1]:
from traitlets.config.manager import BaseJSONConfigManager
path = "/Users/scot/anaconda2/envs/py36/etc/jupyter/nbconfig"
cm = BaseJSONConfigManager(config_dir=path)
cm.update("livereveal", {"autolaunch": True,
"theme": "sky",
}
)
# The UT Dallas Art Science Lab Training module
print ("Sonification Training Module.")
print("\n")
print("Hit the Space Bar or Right Arrow (Bottom Right corner) to go to next section")
print("Press Shift and Enter at Every New Section to initiate display")
In [2]:
# this session of entering a name is Skipped, and will be added in a Jupyter Hub version where each
# (contd..) where each user has his own account.
import random
import time
from IPython.display import Image, display, clear_output
from ipywidgets import widgets
from gtts import gTTS
import os
introflag=0
text=widgets.Text("Enter your name: ")
name = str()
display(text)
def handle_submit(sender):
global name
global introflag
introflag=1
#print("Thank you")
name= (text.value.split(': ')[1])
tts = gTTS(text='Thank you '+ name +' click the arrows on the lower right to navigate', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
text.on_submit(handle_submit)
In [2]:
import random
import time
from IPython.display import Image, display, clear_output
from ipywidgets import widgets
from gtts import gTTS
import os
print('Do you have a hearing disability?')
yesno=['Yes','No']
button1 = widgets.Button(description = yesno[0])
button2 = widgets.Button(description = yesno[1])
#tts = gTTS(text=name+' Do you have a hearing disability? Click Yes or No', lang='en')
#tts.save("answer.mp3")
#os.system("afplay answer.mp3")
container = widgets.HBox(children=[button1,button2])
display(container)
def on_button1_clicked(b):
choice = b.description
b.color = 'white'
tts = gTTS(text='Sorry you should consult a professional hearing specialist before using a sonification product. However be assured none of the sounds used in our system are dangerous', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
clear_output()
print("Sorry you should consult a professional hearing specialist before using a sonification product. However be assured none of the sounds used in our system are dangerous")
def on_button2_clicked(b):
# [insert code to record choice]
tts = gTTS(text='That is great. However , we advise you take some time in our next sections that includes a free online hearing test', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
container.close()
clear_output()
print("Proceed onto the next section (Spacebar/Right Arrow) to Listening Test")
button1.on_click(on_button1_clicked)
button2.on_click(on_button2_clicked)
In [4]:
%%HTML
<video width="640" height="480" controls>
<source src="Listening_TestV2.mp4" type="video/mp4">
</video>
<b>Press Space Bar to proceed</b>
In [5]:
print("Could you hear sound on both headphones?")
yesno=['Yes','No']
button1 = widgets.Button(description = yesno[0])
button2 = widgets.Button(description = yesno[1])
#tts = gTTS(text=name+' Do you have a hearing disability? Click Yes or No', lang='en')
#tts.save("answer.mp3")
#os.system("afplay answer.mp3")
container = widgets.HBox(children=[button1,button2])
display(container)
def on_button1_clicked(b):
choice = b.description
b.color = 'white'
tts = gTTS(text='Great. Please use the scroll on the lower right corner to proceed on to the next section', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
clear_output()
print("Please Proceed onto the next Section (Press Spacebar or Arrow at Bottom Right corner)")
def on_button2_clicked(b):
# [insert code to record choice]
tts = gTTS(text='Please check your system sound settings and headphones & try again', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
container.close()
clear_output()
print("Please check your system sound settings and headphones & start afresh")
button1.on_click(on_button1_clicked)
button2.on_click(on_button2_clicked)
You are about to embark on an adventure in listening It is important that you take the time to understanding your hearing. Everyone's hearing is slightly different. The first part of your path to listening for subtlties in spoken word, music and other sounds made from data is to self-evaluate your own hearing. We suggest that you grab your best headphones and that you take an online hearing test
As you move forward in these sonification training modules these print outs will aid you in self-evaluating your understanding of sound in relationship to your own ears and the way YOU are hearing the world.
ONLINE HEARING TEST link - click here REMEMBER: Print out the results for future reference
In [16]:
%%HTML
<video width="640" height="480" controls>
<source src="percentages.mp4" type="video/mp4">
</video>
In [6]:
%run ./pan.py
print("Use the Slider to set a Value and Click the Sonify Me Button to listen. Try to explore and listen to the variation of as many values in the range")
interact (f, percentage=(0,100,5))
button = widgets.Button(description="Sonify Me!")
display(button)
button.on_click(on_button_clicked)
In [8]:
from IPython.display import Image, display, clear_output
from ipywidgets import Button, HBox, VBox,Layout,widgets
button = widgets.Button(description="Click here to Reset Sound Engine. Then Use the Right Arrow to go to the next Section", layout=Layout(width='80%', height='50px'))
display(button)
def next_button_clicked(b):
pt.stop()
pt.join()
button = widgets.Button(description="Use the Space Bar or Right Arrow to go to the next section", layout=Layout(width='40%', height='50px'))
clear_output()
display(button)
button.on_click(next_button_clicked)
In [9]:
import random
import time
from IPython.display import Image, display, clear_output
from ipywidgets import Button, HBox, VBox,Layout
from ipywidgets import widgets
from ipywidgets import interact, interactive, fixed, interact_manual
from gtts import gTTS
import os
import numpy
import ctcsound
print ("Listen to the Sonification, Set the Slider to what Percentage you think it represents and Submit Response. You will undergo 5 trials")
count=0
pan = 0
user_input=0
cs = ctcsound.Csound()
index = 0
csd = '''
<CsoundSynthesizer>
<CsOptions>
-odac -d
</CsOptions>
<CsInstruments>
sr = 44100
ksmps = 32
nchnls = 2
0dbfs = 1
instr 1
;aMod1 poscil 200, 700, 1
aMod1 poscil p4, p5, 1 ; p4 = amp1, p5 = f1, p6 = amp2, p7 = f2
;aMod2 poscil 1800, 290, 1
aMod2 poscil p6, p7, 1
kenv linen p9 , 0.3 , p3, p9
aSig poscil kenv, 440+aMod1+aMod2, 1
outs aSig*(1-p8), aSig*p8
endin
</CsInstruments>
<CsScore>
f 0 14400
f 1 0 1024 10 1
</CsScore>
</CsoundSynthesizer>
'''
cs.compileCsdText(csd)
cs.start()
pt = ctcsound.CsoundPerformanceThread(cs.csound())
pt.play()
def f(percentage):
global user_input
user_input = percentage
def redraw():
global index
sonibutton = widgets.Button(description = 'Listen to Sonification')
answerbutton = widgets.Button(description='Submit Response')
choices = random.sample(range(100), 4)
choices = list(map(str, choices))
correct = random.choice(choices)
index = int(correct)
#display(Image(correct))
#display(correct)
time.sleep(0.5)
#display(button)
#button.on_click(on_button_clicked)
#buttons = [widgets.Button(description = choice) for choice in choices]
#sonibutton = [widgets.Button(description = 'Listen to Sonification')]
interact (f, percentage=(0,100,1))
#answerbutton = [widgets.Button(description='Submit Input')]
#container = widgets.HBox(children=buttons)
right_box = VBox([(sonibutton)])
left_box = VBox([(answerbutton)])
#HBox([left_box, right_box])
container = widgets.HBox([left_box,right_box])
display(container)
def ans_button_clicked(b):
global count
count = count + 1
tts = gTTS(text='Input Submitted', lang='en')
tts.save("answer.mp3")
os.system("afplay answer.mp3")
print(user_input)
text=list()
text.append(index)
text.append(user_input)
text.append(index-user_input)
with open('responses.csv','a') as file:
file.write('\n')
for line in text:
file.write(str(line))
file.write(',')
time.sleep(2)
container.close()
clear_output()
if count <5:
redraw()
if count == 5:
msg = widgets.Button(description = 'Thank you for finishing this module',layout=Layout(width='50%', height='80px'))
display(msg)
pt.stop()
pt.join()
file.close()
def son_button_clicked(b):
#tts = gTTS(text='Playing Sonification', lang='en')
#tts.save("answer.mp3")
#os.system("afplay answer.mp3")
in_min = 0
in_max = 100
out_min=690
out_max = 710
global index
if (index>50):
pan = 0
if (index<50):
pan = 1
if (index==50):
pan = 0.5
freq = (index - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
print(freq)
pt.scoreEvent(False, 'i', (1, 0, 4, 200, 700, 200, freq, pan, 0.5))
time.sleep(4.5)
answerbutton.on_click(ans_button_clicked)
sonibutton.on_click(son_button_clicked)
redraw()
In [20]:
%matplotlib notebook
%run ./pichart.py
import ipywidgets as widgets
from gtts import gTTS
import os
from matplotlib import pyplot as plt
# make a square figure and axes
plt.figure(figsize=(6,6))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
labels = 'Adenine', 'Thymine', 'Guanine', 'Cytosine'
fracs = [30,30,20,20]
explode=(0, 0, 0, 0)
p = plt.pie(fracs, explode=explode, labels=labels, shadow=True)
#autopct='%1.1f%%',
plt.title('ATGC Genomic Content - Demo', bbox={'facecolor':'0.8', 'pad':5})
w = p[0][0]
plt.plot()
class PieEventHandler:
def __init__(self,p):
self.p = p
self.fig = p[0].figure
self.ax = p[0].axes
self.fig.canvas.mpl_connect('button_press_event', self.onpress)
def onpress(self, event):
if event.inaxes!=self.ax:
return
for w in self.p:
(hit,_) = w.contains(event)
if hit:
w.set_picker(True)
print (w.get_label())
tts = gTTS(text=(str(w.get_label())) , lang='en')
tts.save("wordify.mp3")
os.system("afplay wordify.mp3")
#tts = gTTS(text=(str(fracs[labels.index(w.get_label())])+'percent') , lang='en')
#tts.save("wordify.mp3")
#os.system("afplay wordify.mp3")
pan_sonify(fracs[labels.index(w.get_label())])
handler = PieEventHandler(p[0])