In this notebook, we will now compare motor versus goal strategies in the context of a more dimensional arm. We will also show you how you can specify which configuration you want to use.
First, we will reload the configurations of the arm used in the previous tutorial.
In [1]:
from explauto.environment import environments
env_cls, env_configs, _ = environments['simple_arm']
print env_configs.keys()
Due to the arm higher dimensionality, running the experiment will take a few minutes. Please be patient :-)
In [2]:
from explauto.experiment import Experiment, make_settings
s = make_settings(environment='simple_arm',
babbling_mode='motor',
interest_model='random',
sensorimotor_model='nearest_neighbor',
environment_config='high_dimensional')
motor_xp = Experiment.from_settings(s)
motor_xp.evaluate_at([1, 10, 20, 30, 100, 200, 300, 400], s.default_testcases)
motor_xp.run()
In [3]:
from explauto.experiment import Experiment
s = make_settings(environment='simple_arm',
babbling_mode='goal',
interest_model='random',
sensorimotor_model='nearest_neighbor',
environment_config='high_dimensional')
goal_xp = Experiment.from_settings(s)
goal_xp.evaluate_at([1, 10, 20, 30, 100, 200, 300, 400], s.default_testcases)
goal_xp.run()
In [4]:
%pylab inline
ax_motor = subplot(121)
ax_motor.axis([0, 1, -1, 1])
data = motor_xp.log.scatter_plot(ax_motor, (('sensori', [0, 1]), ), color='green')
legend(('motor', ))
ax_goal = subplot(122)
ax_goal.axis([0, 1, -1, 1])
data = goal_xp.log.scatter_plot(ax_goal, (('sensori', [0, 1]), ), color='red')
legend(('goal', ))
Out[4]:
In [5]:
ax = axes()
motor_xp.log.plot_learning_curve(ax)
goal_xp.log.plot_learning_curve(ax)
legend(('motor', 'goal'))
Out[5]:
In [6]:
ax_goal = subplot(122)
ax_goal.axis([0, 1, -1, 1])
data = goal_xp.log.scatter_plot(ax_goal, (('choice', [0, 1]), ), color='green')
data = goal_xp.log.scatter_plot(ax_goal, (('sensori', [0, 1]), ), color='red')
In [ ]: