In [ ]:
import sys, os
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import seaborn as sns
from ipywidgets import widgets
import ConfigReader as cr
import itertools as itt

In [ ]:
#Styles
sns.set_style('whitegrid', {'axes.linewidth':1.25, 'axes.edgecolor':'0.15',
                            'grid.linewidth':1.5, 'grid.color':'gray'})
#sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_color_codes()

In [ ]:
plt.rcParams['figure.figsize'] = (12.0, 9.0)

plt.rc('text', usetex=False)
plt.rc('font', size=20.0, family='serif')

markers = itt.cycle(('o', 's', 'p','H'))

In [ ]:
datasets_list =["1049", "1111", "1120", "1128", "179", "184", "293", "389", "38", "46", "554", "772", "917"]

In [ ]:
# Data location and scenario
experiments = ['full', 'GPU']
preprocessor='DeepNetIterative'

In [ ]:
prepro_color = itt.cycle(sns.color_palette('Set1',15))

In [ ]:
data_dir = '/mhome/mendozah/autonet_GPU/results/experiment'
plot_dir = '/mhome/mendozah/autonet_GPU/results/plots_distributions/plots_improvement/'

In [ ]:
# Plot improvement over default
dataset_improv = []
bar_width = 0.25
for w, experiment in enumerate(experiments):
    datadir = "/mhome/mendozah/autonet_" + experiment + "/results/experiment"
    reader = cr.ConfigReader(data_dir=datadir)
    for i, name in enumerate(datasets_list):
        dataset = name + '_bac'
        tdf = reader.load_validation_trajectories(dataset=dataset, preprocessor=preprocessor, load_config=False)
        mask_time = tdf.loc[:, ('smac', 'time')]==0
        default_perf = tdf[mask_time].loc[:, ('smac', 'test_performance')].mean()
        single_best_perf = tdf['smac']['test_performance'].min()
        improvement = (default_perf-single_best_perf)/default_perf
        dataset_improv.append([name, default_perf, single_best_perf, improvement, experiment])

In [ ]:
df_improv = pd.DataFrame(dataset_improv, columns=['dataset', 'default_performance', 'best_performance', 'improvement', 'experiment'])
df_improv = df_improv.sort_values(by=['improvement', 'experiment'])
df_improv.loc[df_improv.experiment == 'full', 'experiment'] = 'CPU'

In [ ]:
plt.rcdefaults()

In [ ]:
plt.rcParams['figure.figsize'] = (13.0, 8.0)
plt.rc('text', usetex=False)
plt.rc('font', size=14.0, family='serif')
fig_improvement, ax_improv = plt.subplots(1,1)
ax_improv.set_title('Perceived single best improvement over default config')
sns.barplot(x='dataset', y='improvement', data=df_improv, hue='experiment', ax=ax_improv)
ax_improv.set_ylabel('Improvement\n[(Default Config. - Best Config.) / Default Config.]')
ax_improv.set_xlabel('Representative Datasets')
fig_improvement.show()

In [ ]:
# SCATTER Plot improvement over default
prepro_color = sns.color_palette('Set1',14)
fig_improvement, ax_improv = plt.subplots(1,1)
ax_improv.set_title('Perceived improvement over default config\n(bigger marker means more improvement)')
for name in datasets_list:
    dataset = name + '_bac'
    tdf = reader.load_validation_trajectories(dataset=dataset, preprocessor=preprocessor, load_config=False)
    mask_time = tdf.loc[:, ('smac', 'time')]==0
    default_perf = tdf[mask_time].loc[:, ('smac', 'test_performance')].mean()
    single_best_perf = tdf['smac']['test_performance'].min()
    ax_improv.scatter(default_perf, single_best_perf, label=name, marker=next(markers),
                      s=70* default_perf/single_best_perf,
                      color=prepro_color.pop(), edgecolor='k', linewidth=1.8, alpha=0.8)
ax_improv.plot([0,1],[0,1], '--k')
ax_improv.set_xlim(0,2)
ax_improv.set_ylim(0,1)
ax_improv.set_xlabel('Default configuration performance')
ax_improv.set_ylabel('Single best test performance')
ax_improv.legend(ncol=3, loc='best')
ax_improv.text(0.3, 0.6, 'No improvement line', fontsize=12)
#fig_improvement.savefig(plot_dir+'Improvement_scatter_plot_Autonet_NoPreprocessing.png')

Comparison plots activation


In [ ]:
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

In [ ]:
x = np.linspace(-3, 3, 200)

In [ ]:
plt.rcParams['figure.figsize'] = (15.0, 7.0)
#plt.rc('text', usetex=False)
plt.rc('font', size=14.0, family='serif')
fig_tan, axs = plt.subplots(1,2, sharey=True)
ax0, ax1 = axs.flat
#ax_tan.set_title('Perceived single best improvement over default config')
ax0.plot(x, sigmoid(x), label='sigmoid')
ax0.set_ylabel('$\\sigma(x)$', fontsize=16)
ax1.plot(x, np.tanh(x), label=u'$\\alpha = 1., \\beta=1$')
ax1.plot(x, 1.7159 * np.tanh((2./3.) * x), label=u'$\\alpha = 2./3., \\beta=1.7159$')
#plt.plot(x, 2.4 * np.tanh((1./2.) * x))
ax1.plot(x, 1.48 * np.tanh((3./1.) * x), label=u'$\\alpha = 3.$, $\\beta=1.48$')
ax1.set_xlabel('$x$', fontsize=16)
ax0.set_xlabel('$x$', fontsize=16)
ax1.legend(loc='best', fancybox=True, fontsize=16)
ax0.legend(loc='best', fontsize=16, framealpha=1.5)
fig_tan.suptitle('Comparison of sigmoid and tanh functions', fontsize=20)
plt.savefig('/home/mendozah/workspace/thesis_autonet/images/tanh_compare.pdf')

In [ ]:
r = np.linspace(-2, 2, 300)
a = 0.25

In [ ]:
def elu(x):
    if x > 0:
        return x
    else:
        return 1.0*(np.exp(x)-1)

In [ ]:
velu = np.vectorize(elu)

In [ ]:
plt.rcParams['figure.figsize'] = (12.0, 9.0)
#plt.rc('text', usetex=False)
#plt.rc('font', size=14.0, family='serif')
fig_relu, axs = plt.subplots(1,1)
axs.plot(r, np.maximum(0, r), label='ReLU')
axs.plot(r, np.maximum(a*r, r), label='leaky ReLU')
axs.plot(r, velu(r), 'r--', lw=2.4, label='ELU')
axs.set_ylabel('$\\sigma(x)$', fontsize=16)
axs.set_xlabel('$x$', fontsize=16)
axs.legend(loc='best', fancybox=True, fontsize=16)
fig_relu.suptitle('Comparison of ReLU functions', fontsize=20)
plt.savefig('/home/mendozah/workspace/thesis_autonet/images/relu_compare.pdf')

Comparison plots policies


In [ ]:
x = np.linspace(0, 100, 101)

In [ ]:
lr = 0.1
gamma = 0.1
k = 0.8
s = 25

In [ ]:
def inv_policy(x):
    alpha = (1 + gamma*x)**(-k)
    return alpha

In [ ]:
def exp_policy(x):
    alpha = gamma**x
    return alpha

In [ ]:
def step_policy(x):
    alpha = gamma**(np.floor(x/s))
    return alpha

In [ ]:
plt.rcParams['figure.figsize'] = (12.0, 9.0)

fig_policies, axs = plt.subplots(1,1)
axs.plot(x, (lr*(x+1))/(x+1), lw=2.7, label='fixed')
axs.plot(x, lr * inv_policy(x), lw=2.7, label='inverse')
axs.step(x, lr * step_policy(x), lw=2.7, label='step')
axs.plot(x, lr * exp_policy(x), lw=2.7, label='exponential')
axs.set_ylabel('$\\eta_t$', fontsize=16)
axs.set_xlabel('epochs', fontsize=16)
axs.legend(loc='best', fancybox=True, fontsize=16)
fig_policies.suptitle('Comparison of learning rate policies', fontsize=20)
plt.savefig('/home/mendozah/workspace/thesis_autonet/images/policies_compare.pdf')