In [1]:
import os
import copy
from matplotlib import pyplot as plt
from tomato.audio.audioanalyzer import AudioAnalyzer
In [2]:
# instantiate
audio_filename = os.path.join('..',
'sample-data',
'ussak--sazsemaisi--aksaksemai----neyzen_aziz_dede',
'f970f1e0-0be9-4914-8302-709a0eac088e',
'f970f1e0-0be9-4914-8302-709a0eac088e.mp3')
audioAnalyzer = AudioAnalyzer(verbose=True)
You can use the single line call "analyze," which does all the available analysis simultaneously
In [3]:
# NOTE: This will take several minutes depending on the performance of your machine
audio_features = audioAnalyzer.analyze(audio_filename)
# plot the features
plt.rcParams['figure.figsize'] = [20, 8]
audioAnalyzer.plot(audio_features)
plt.show()
... or call all the methods individually
In [4]:
# audio metadata extraction
metadata = audioAnalyzer.crawl_musicbrainz_metadata(audio_filename)
# predominant melody extraction
pitch = audioAnalyzer.extract_pitch(audio_filename)
# pitch post filtering
pitch_filtered = audioAnalyzer.filter_pitch(pitch)
# histogram computation
pitch_distribution = audioAnalyzer.compute_pitch_distribution(pitch_filtered)
pitch_class_distribution = copy.deepcopy(pitch_distribution)
pitch_class_distribution.to_pcd()
# tonic identification
tonic = audioAnalyzer.identify_tonic(pitch_filtered)
# get the makam from metadata if possible else apply makam recognition
makams = audioAnalyzer.get_makams(metadata, pitch_filtered, tonic)
makam = list(makams)[0] # for now get the first makam
# transposition (ahenk) identification
transposition = audioAnalyzer.identify_transposition(tonic, makam)
# stable note extraction (tuning analysis)
note_models = audioAnalyzer.compute_note_models(pitch_distribution, tonic, makam)
# get the melodic progression model
melodic_progression = audioAnalyzer.compute_melodic_progression(pitch_filtered)