In [1]:
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from gtts import gTTS
import matplotlib
import numpy
from traitlets.config.manager import BaseJSONConfigManager
path = "/Users/scot/anaconda3/envs/py36/etc/jupyter/nbconfig"
cm = BaseJSONConfigManager(config_dir=path)
cm.update("livereveal", {"autolaunch": True,
"theme": "sky",
}
)
#Supress default INFO logging
# The UT Dallas Art Science Lab Training module
print ("The UTDallas ArtSci Lab Sonification Training Orientation Module.")
print("\n")
print("Here are the instructions for those unfamiliar with these training modules" )
print("\n Basic Instructions for each cell :")
print('1. Press "Shift + Enter" at every new cell to initiate display/execute that cell')
print("2. Hit the Space Bar to go to next section (cell)")
print("\n")
print("These are the two main actions that you need to move forward, cell by cell in each unit")
print("This is how this system functions")
print("Do these now")
In the next cell there will be a video to play to check your headphone and volume level. There is no particular reason for this cell to use "Shift+Enter", since it is just text, but do it NOW anyway to get used to using the "Shift+Enter" action for each cell. Now (as before) use Space Bar or arrows at bottom right to navigate to next section.
In [14]:
%%HTML
# Okay, Hit "Shift+Enter" to load in the video with this command, not a normal webpage ;-)
<video width="640" height="480" controls>
<source src="dep/videos/Listening_TestV2.mp4" type="video/mp4">
</video>
</br>
<b> After you watch the video, then press Space Bar to proceed to the next cell</b>
In [15]:
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import gtts as gTTS
# Make sure and hit "Shift + Enter" if you have not yet
print("Could you hear sound on both headphones?")
yesno=['Yes','No']
button1 = widgets.Button(description = yesno[0])
button2 = widgets.Button(description = yesno[1])
#tts = gTTS(text=name+' Do you have a hearing disability? Click Yes or No', lang='en')
#tts.save("answer.mp3")
#os.system("afplay answer.mp3")
container = widgets.HBox(children=[button1,button2])
display(container)
def on_button1_clicked(b):
choice = b.description
b.color = 'white'
tts = gTTS(text='Great. Please use the scroll on the lower right corner to proceed on to the next section', lang='en')
tts.save("answer.mp3")
#os.system("afplay answer.mp3")
clear_output()
print("Please Proceed onto the next Section (Press Spacebar or Arrow at Bottom Right corner)")
def on_button2_clicked(b):
# [insert code to record choice]
tts = gTTS(text='Please check your system sound settings and headphones & try again', lang='en')
tts.save("answer.mp3")
#os.system("afplay answer.mp3")
container.close()
clear_output()
print("Please check your system sound settings and headphones & start afresh")
button1.on_click(on_button1_clicked)
button2.on_click(on_button2_clicked)
In [ ]:
# Hearing Test Training Module
_You are about to embark on an adventure in listening_
It is important that you take the time to understanding your hearing. Everyone's hearing is slightly different. The first part of your path to listening for subtlties in spoken word, music and other sounds made from data is to self-evaluate your own hearing.
We suggest that you grab your best headphones and that you take an online hearing test
Make sure and hit "Shift + Enter" if you have not yet
_As you move forward in these sonification training modules these print outs will aid you in self-evaluating your understanding of sound in relationship to your own ears and the way YOU are hearing the world.__
ONLINE HEARING TEST link - click here
REMEMBER: Print out the results for future reference
Hit space to go to the next slide and final slide for this orientation
In [ ]:
import random
import time
from IPython.display import Image, display, clear_output
from ipywidgets import Button, HBox, VBox,Layout
from ipywidgets import widgets
from ipywidgets import interact, interactive, fixed, interact_manual
from gtts import gTTS
import os
import numpy
import ctcsound
import platform
import os
speechflag = 0
if (platform.system()=='Windows'):
speechflag = 2
if (platform.system()!='Windows'):
speechflag = 1
print ("Listen to the Sonification, Set the Slider to what Percentage you think it represents and Submit Response. You will undergo 5 trials")
count=0
accuracy =0
pan = 0
user_input=0
cs = ctcsound.Csound()
index = 0
csd = '''
<CsoundSynthesizer>
<CsOptions>
-o dac
</CsOptions>
<CsInstruments>
;
sr = 44100
ksmps = 32
nchnls = 2
0dbfs = 1
giSine ftgen 0, 0, 2^10, 10, 1
instr 1 ;master instrument
inumparts = p4 ;number of partials
ibasfreq = 200 ;base frequency
ipart = 1 ;count variable for loop
;loop for inumparts over the ipart variable
;and trigger inumpartss instanes of the subinstrument
loop:
ifreq = ibasfreq * ipart
iamp = 1/ipart/inumparts
event_i "i", 10, 0, p3, ifreq, iamp
loop_le ipart, 1, inumparts, loop
endin
instr 10 ;subinstrument for playing one partial
ifreq = p4 ;frequency of this partial
iamp = p5 ;amplitude of this partial
aenv transeg 0, .01, 0, iamp, p3-0.1, -10, 0
apart poscil aenv, ifreq, giSine
outs apart, apart
endin
</CsInstruments>
<CsScore>
f 0 14400
f 1 0 1024 10 1
</CsScore>
</CsoundSynthesizer>
'''
cs.compileCsdText(csd)
cs.start()
pt = ctcsound.CsoundPerformanceThread(cs.csound())
pt.play()
def f(percentage):
global user_input
user_input = percentage
def redraw():
global index
global accuracy
sonibutton = widgets.Button(description = 'Listen to Sonification')
answerbutton = widgets.Button(description='Submit Response')
choices = random.sample(range(100), 4)
choices = list(map(str, choices))
correct = random.choice(choices)
index = int(correct)
#display(Image(correct))
#display(correct)
time.sleep(0.5)
#display(button)
#button.on_click(on_button_clicked)
#buttons = [widgets.Button(description = choice) for choice in choices]
#sonibutton = [widgets.Button(description = 'Listen to Sonification')]
interact (f, percentage=(0,100,1))
#answerbutton = [widgets.Button(description='Submit Input')]
#container = widgets.HBox(children=buttons)
left_box = VBox([(sonibutton)])
right_box = VBox([(answerbutton)])
#HBox([left_box, right_box])
container = widgets.HBox([left_box,right_box])
print("Trial " + str(count+1))
display(container)
def ans_button_clicked(b):
global count
global accuracy
count = count + 1
tts = gTTS(text='Input Submitted', lang='en')
tts.save("answer.mp3")
if (speechflag==1):
os.system("afplay answer.mp3")
if (speechflag==2):
os.system("cmdmp3 answer.mp3")
text=list()
text.append(index)
text.append(user_input)
text.append(index-user_input)
accuracy = accuracy + abs(index -user_input)
with open('drop_responses.csv','a') as file:
file.write('\n')
for line in text:
file.write(str(line))
file.write(',')
time.sleep(2)
container.close()
clear_output()
if count <5:
redraw()
if count == 5:
msg = widgets.Button(description = 'Thank you for finishing this module',layout=Layout(width='50%', height='80px'))
display(msg)
print("Your accuracy of response is " + str(100-(accuracy/5)) + "%")
pt.stop()
pt.join()
file.close()
def son_button_clicked(b):
#tts = gTTS(text='Playing Sonification', lang='en')
#tts.save("answer.mp3")
#os.system("afplay answer.mp3")
in_min = 0
in_max = 100
out_min=0
out_max = 20
global index
freq = (index - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
#print(freq)
pt.scoreEvent(False, 'i', (1, 0, 3,freq))
time.sleep(4.5)
answerbutton.on_click(ans_button_clicked)
sonibutton.on_click(son_button_clicked)
redraw()
Make sure and hit "Shift + Enter" if you have not yet
Now you have an idea of what is involved in using the Training, Exploring and Testing modules In the upcoming modules you will be interacting with actual code that converts data into sound This format of educating the listener, then giving a chance to explore and understand is followed by a set of simple game like interactions that will help us understand what is and is not working.
Please open the next notebook _Full_or_Empty_TrainingModule in the list you started with.
In [ ]: