In [9]:
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy
import shutil
import seaborn as sns
from statsmodels.stats.multicomp import (pairwise_tukeyhsd,
MultiComparison)
import msaf
# Plotting settings
%matplotlib inline
sns.set_style("dark")
ds_base = "/home/uri/datasets/"
dataset_path = "/home/uri/datasets/BeatlesTUT/"
# dataset_path = "/Users/uriadmin/datasets/BeatlesTUT/"
n_jobs = 8
n_octaves = [4, 5, 6, 7]
f_mins = [27.5 * 2 ** (i / 12.) for i in xrange(0, 12, 2)]
labels_ids = ["scluster", "siplca", "fmc2d", "cnmf", "cc"]
bounds_ids = ["sf", "cnmf", "foote", "cc", "olda", "scluster", "siplca"]
features = ["hpcp", "mfcc", "tonnetz", "cqt"]
n_mffc_coeffs = range(7, 20)
In [ ]:
# Boundaries for MFCC
feature = "mfcc"
for n_mfcc_coeff in n_mffc_coeffs:
print "MFCC Coeffs: ", n_mfcc_coeff
msaf.Anal.mfcc_coeff = n_mfcc_coeff
msaf.featextract.process(dataset_path, n_jobs=n_jobs, overwrite=True)
for bound_id in bounds_ids:
print "\t bounds_id:", bound_id
results = msaf.process(dataset_path, feature=feature, boundaries_id=bound_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, bound_id, feature=feature, save=True, n_jobs=n_jobs)
key = "mfcc_coeffE%d" % n_mfcc_coeff
shutil.move(os.path.join(dataset_path, "features"), os.path.join(dataset_path, "features_%s" % key))
shutil.move("results", "results_%s" % key)
In [12]:
# Labels for MFCC (Assuming features have already been computed)
for n_mfcc_coeff in n_mffc_coeffs:
print "MFCC Coeffs: ", n_mfcc_coeff
msaf.Anal.mfcc_coeff = n_mfcc_coeff
key = "mfcc_coeffE%d" % n_mfcc_coeff
shutil.move(os.path.join(dataset_path, "features_%s" % key), os.path.join(dataset_path, "features"))
for label_id in labels_ids:
print "\t bounds_id:", label_id
results = msaf.process(dataset_path, feature=feature, labels_id=label_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, "gt", labels_id=label_id, feature=feature, save=True, n_jobs=n_jobs)
shutil.move(os.path.join(dataset_path, "features"), os.path.join(dataset_path, "features_%s" % key))
shutil.move("results", "results_%s" % key)
In [ ]:
# Boundaries
for n_octave in n_octaves:
for f_min in f_mins:
msaf.Anal.f_min = f_min
msaf.Anal.n_octaves = n_octave
msaf.featextract.process(dataset_path, n_jobs=n_jobs, overwrite=True)
for bound_id in bounds_ids:
results = msaf.process(dataset_path, feature="hpcp", boundaries_id=bound_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, bound_id, save=True, n_jobs=n_jobs)
key = "noctavesE%d_fminE%.1f" % (n_octave, f_min)
shutil.move(os.path.join(dataset_path, "features"), os.path.join(dataset_path, "features_%s" % key))
shutil.move("results", "results_%s" % key)
In [4]:
# Labels (Assuming features have already been computed)
for n_octave in n_octaves:
for f_min in f_mins:
if n_octave == 7 and f_min > 40:
continue
msaf.Anal.f_min = f_min
msaf.Anal.n_octaves = n_octave
key = "noctavesE%d_fminE%.1f" % (n_octave, f_min)
shutil.move(os.path.join(dataset_path, "features_%s" % key), os.path.join(dataset_path, "features"))
for label_id in labels_ids:
results = msaf.process(dataset_path, feature="hpcp", labels_id=label_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, "gt", labels_id=label_id, save=True, n_jobs=n_jobs)
shutil.move(os.path.join(dataset_path, "features"), os.path.join(dataset_path, "features_%s" % key))
shutil.move("results", "results_%s" % key)
In [2]:
# Explore different features
# Run boundaries with different features
for bounds_id in bounds_ids:
print "bounds", bounds_id
if bounds_id != "siplca":
continue# Labels (Assuming features have already been computed)
for n_octave in n_octaves:
for f_min in f_mins:
if n_octave == 7 and f_min > 40:
continue
msaf.Anal.f_min = f_min
msaf.Anal.n_octaves = n_octave
key = "noctavesE%d_fminE%.1f" % (n_octave, f_min)
shutil.move(os.path.join(dataset_path, "features_%s" % key), os.path.join(dataset_path, "features"))
for label_id in labels_ids:
results = msaf.process(dataset_path, feature="hpcp", labels_id=label_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, "gt", labels_id=label_id, save=True, n_jobs=n_jobs)
shutil.move(os.path.join(dataset_path, "features"), os.path.join(dataset_path, "features_%s" % key))
shutil.move("results", "results_%s" % key)
for feature in features:
results = msaf.process(dataset_path, feature=feature, boundaries_id=bounds_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, bounds_id, save=True, n_jobs=n_jobs, feature=feature)
In [3]:
# Run labels with different features
for labels_id in labels_ids:
for feature in features:
try:
results = msaf.process(dataset_path, feature=feature, labels_id=labels_id, n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, "gt", labels_id=labels_id, save=True, n_jobs=n_jobs, feature=feature)
except RuntimeError as e:
print "Warning: ", e
In [16]:
# Explore different datasets
dataset_names = ["Cerulean", "Epiphyte", "Isophonics", "SALAMI"]
feature = "hpcp"
dataset_path = "/home/uri/datasets/Segments/"
In [ ]:
# Run boundaries with different datasets
for ds_name in dataset_names:
print "Computing boundaries for %s" % ds_name
for bounds_id in bounds_ids:
results = msaf.process(dataset_path, feature=feature, boundaries_id=bounds_id,
n_jobs=n_jobs, ds_name=ds_name)
results = msaf.eval.process(dataset_path, bounds_id, save=True, n_jobs=n_jobs,
feature=feature, ds_name=ds_name)
In [ ]:
# Run labels with different datasets
for ds_name in dataset_names:
print "Computing labels for %s" % ds_name
for labels_id in labels_ids:
try:
results = msaf.process(dataset_path, feature=feature, labels_id=labels_id,
n_jobs=n_jobs, ds_name=ds_name)
results = msaf.eval.process(dataset_path, "gt", labels_id=labels_id, save=True, n_jobs=n_jobs,
feature=feature, ds_name=ds_name)
except RuntimeError as e:
print "Warning: ", e
In [7]:
# Do the same for the Beatles-TUT / SPAM dataset
dataset_path = "/home/uri/datasets/BeatlesTUT/"
dataset_path = "/home/uri/datasets/Sargon/"
# Run boundaries for the Beatles-TUT
print "Computing boundaries for Beatles-TUT"
for bounds_id in bounds_ids:
results = msaf.process(dataset_path, feature=feature, boundaries_id=bounds_id,
n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, bounds_id, save=True, n_jobs=n_jobs,
feature=feature)
# Run labels for the Beatles-TUT
print "Computing labels for Beatles-TUT"
for labels_id in labels_ids:
try:
results = msaf.process(dataset_path, feature=feature, labels_id=labels_id,
n_jobs=n_jobs)
results = msaf.eval.process(dataset_path, "gt", labels_id=labels_id, save=True, n_jobs=n_jobs,
feature=feature)
except RuntimeError as e:
print "Warning: ", e
In [8]:
# Explore different datasets
ds_name = "*"
feature = "hpcp"
dataset_path = "/home/uri/datasets/SPAM/"
annotators = np.arange(5)
In [3]:
# Run boundaries for all the annotators in Cerulean
for annotator_id in annotators:
for bounds_id in bounds_ids:
if bounds_id != "cc":
continue
print "Bounds", bounds_id, "Annotator", annotator_id
results = msaf.process(dataset_path, feature=feature, boundaries_id=bounds_id,
n_jobs=n_jobs, annotator_id=annotator_id, ds_name=ds_name)
results = msaf.eval.process(dataset_path, bounds_id, save=True, n_jobs=n_jobs,
feature=feature, annotator_id=annotator_id, ds_name=ds_name)
# Run labels for all the annotators in SubSegments
print "Computing labels for multiple annotators"
for annotator_id in annotators:
for labels_id in labels_ids:
if labels_id != "cc":
continue
try:
results = msaf.process(dataset_path, feature=feature, labels_id=labels_id,
n_jobs=n_jobs, annotator_id=annotator_id, ds_name=ds_name)
results = msaf.eval.process(dataset_path, "gt", labels_id=labels_id, save=True, n_jobs=n_jobs,
feature=feature, annotator_id=annotator_id, ds_name=ds_name)
except RuntimeError as e:
print "Warning: ", e
In [2]:
# Explore different datasets
feature = "hpcp"
dataset_path = "/home/uri/datasets/BeatlesTUT/"
bounds_ids += ["gt"]
annotator_id = 0
ds_name="*"
print bounds_ids
In [3]:
for bounds_id in bounds_ids:
for labels_id in labels_ids:
print "Computing: ", bounds_id, labels_id
results = msaf.process(dataset_path, feature=feature, boundaries_id=bounds_id, labels_id=labels_id,
n_jobs=n_jobs, annotator_id=annotator_id, ds_name=ds_name)
results = msaf.eval.process(dataset_path, bounds_id, labels_id=labels_id, save=True, n_jobs=n_jobs,
feature=feature, annotator_id=annotator_id, ds_name=ds_name)
In [ ]: