In [2]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
my_data = pd.read_csv('model-75-0.0-rmsprop/history.csv')
epoch = my_data.epoch
loss = my_data.loss
val_loss = my_data.val_loss
plt.plot(epoch, loss, 'g-', label='Training loss')
plt.plot(epoch, val_loss, 'b-', label='Validation loss')
plt.legend(loc='best', fontsize=16)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.savefig('figs/main.pdf')
plt.show()
In [3]:
#### changing number of neurons in hidden layer
data75 = pd.read_csv('model-75-0.0-rmsprop/history.csv')
epoch_75 = data75.epoch
loss_75 = data75.loss
val_loss_75 = data75.val_loss
data50 = pd.read_csv('model-50-0.0-rmsprop/history.csv')
epoch_50 = data50.epoch
loss_50 = data50.loss
val_loss_50 = data50.val_loss
data100 = pd.read_csv('model-100-0.0-rmsprop/history.csv')
epoch_100 = data100.epoch
loss_100 = data100.loss
val_loss_100 = data100.val_loss
data150 = pd.read_csv('model-150-0.0-rmsprop/history.csv')
epoch_150 = data150.epoch
loss_150 = data150.loss
val_loss_150 = data150.val_loss
plt.plot(epoch_50, loss_50, 'r--', label='N=50, Training loss')
plt.plot(epoch_50, val_loss_50, 'r', label='N=50, Validation loss')
plt.plot(epoch_75, loss_75, 'g--', label='N=75, Training loss')
plt.plot(epoch_75, val_loss_75, 'g', label='N=75, Validation loss')
plt.plot(epoch_100, loss_100, 'b--', label='N=100, Training loss')
plt.plot(epoch_100, val_loss_100, 'b', label='N=100, Validation loss')
plt.plot(epoch_150, loss_150, 'c--', label='N=150, Training loss')
plt.plot(epoch_150, val_loss_150, 'c', label='N=150, Validation loss')
plt.legend(loc='best', fontsize=12)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.title('Number of neurons in the hidden layer (N)')
plt.savefig('figs/hidden.pdf')
plt.show()
In [12]:
#### changing optimization methods
data75 = pd.read_csv('model-75-0.0-rmsprop/history.csv')
epoch_75 = data75.epoch
loss_75 = data75.loss
val_loss_75 = data75.val_loss
data75a = pd.read_csv('model-75-0.0-adagrad/history.csv')
epoch_75a = data75a.epoch
loss_75a = data75a.loss
val_loss_75a = data75a.val_loss
data150a = pd.read_csv('model-150-0.0-adagrad/history.csv')
epoch_150a = data150a.epoch
loss_150a = data150a.loss
val_loss_150a = data150a.val_loss
data150 = pd.read_csv('model-150-0.0-rmsprop/history.csv')
epoch_150 = data150.epoch
loss_150 = data150.loss
val_loss_150 = data150.val_loss
plt.plot(epoch_75, loss_75, 'r--', label='N=75, rmsprop, training loss')
plt.plot(epoch_75, val_loss_75, 'r', label='N=75, rmsprop, validation loss')
plt.plot(epoch_75a, loss_75a, 'g--', label='N=75, adagrad, training loss')
plt.plot(epoch_75a, val_loss_75a, 'g', label='N=75, adagrad, validation loss')
plt.plot(epoch_150, loss_150, 'b--', label='N=150, rmsprop, training loss')
plt.plot(epoch_150, val_loss_150, 'b', label='N=150, rmsprop, validation loss')
plt.plot(epoch_150a, loss_150a, 'c--', label='N=150, adagrad, training loss')
plt.plot(epoch_150a, val_loss_150a, 'c', label='N=150, adagrad, validation loss')
plt.legend(loc='best', fontsize=12)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.title('Comparision of different optimization methods')
plt.savefig('figs/opt.pdf')
plt.show()
In [11]:
#### changing dropouts
data75 = pd.read_csv('model-75-0.0-rmsprop/history.csv')
epoch_75 = data75.epoch
loss_75 = data75.loss
val_loss_75 = data75.val_loss
data75_1 = pd.read_csv('model-75-0.1-rmsprop/history.csv')
epoch_75_1 = data75_1.epoch
loss_75_1 = data75_1.loss
val_loss_75_1 = data75_1.val_loss
data75_2 = pd.read_csv('model-75-0.2-rmsprop/history.csv')
epoch_75_2 = data75_2.epoch
loss_75_2 = data75_2.loss
val_loss_75_2 = data75_2.val_loss
data75_3 = pd.read_csv('model-75-0.3-rmsprop/history.csv')
epoch_75_3 = data75_3.epoch
loss_75_3 = data75_3.loss
val_loss_75_3 = data75_3.val_loss
plt.plot(epoch_75, loss_75, 'r--', label='Dropout=0, training loss')
plt.plot(epoch_75, val_loss_75, 'r', label='Dropout=0, validation loss')
plt.plot(epoch_75_1, loss_75_1, 'g--', label='Dropout=0.1, training loss')
plt.plot(epoch_75_1, val_loss_75_1, 'g', label='Dropout=0.1, validation loss')
plt.plot(epoch_75_2, loss_75_2, 'b--', label='Dropout=0.2, training loss')
plt.plot(epoch_75_2, val_loss_75_2, 'b', label='Dropout=0.2, validation loss')
plt.plot(epoch_75_3, loss_75_3, 'c--', label='Dropout=0.3, training loss')
plt.plot(epoch_75_3, val_loss_75_3, 'c', label='Dropout=0.3, validation loss')
plt.legend(loc='best', fontsize=12)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.title('Comparision of different dropouts')
plt.savefig('figs/drop.pdf')
plt.show()
In [ ]: