In [ ]:
# Pre-configured async. PPO launcher
# for Atari Gym Environment.
#
# Point tensorboard to User/tmp/test_gym_ppo
#
# Note: this one may need finetuning.
# 
# Paper: https://arxiv.org/pdf/1707.06347.pdf

In [ ]:
import os
from btgym.algorithms import AtariRescale42x42, Launcher, PPO, BaseAacPolicy

In [ ]:
cluster_config=dict(
    host='127.0.0.1',
    port=12230,
    num_workers=4,  # Set according CPU's available 
    num_ps=1,
    num_envs=4, # Number of invironments to run for every worker
    log_dir=os.path.expanduser('~/tmp/test_gym_ppo'),
)

env_config = dict(
    class_ref=AtariRescale42x42,  # Gym env. preprocessed to normalized grayscale 42x42 pix.
    kwargs={'gym_id': 'Breakout-v0'}
)

policy_config = dict(
    class_ref=BaseAacPolicy,
    kwargs={}
)

trainer_config=dict(
    class_ref=PPO,
    kwargs=dict(
        opt_learn_rate=[1e-4, 1e-4],  # Random log-uniform
        opt_end_learn_rate=1e-5,
        opt_decay_steps=100*10**6,
        model_gae_lambda=0.95,
        model_beta=[0.01, 0.001],  # Entropy reg, random log-uniform
        pi_prime_update_period=1, 
        replay_memory_size=2000,
        num_epochs=1,  # PPO specific: mum. of SGD runs for every train step
        rollout_length=20,
        time_flat=False,
        use_reward_prediction=True,
        use_pixel_control=True,
        use_value_replay=True,
        vr_lambda=[1.0, 0.5], # Random log-uniforms
        pc_lambda=[1.0, 0.5],
        rp_lambda=[1.0, 0.1],
        model_summary_freq=100,
        episode_summary_freq=10,
        env_render_freq=100,
    )
)

launcher = Launcher(
    cluster_config=cluster_config,
    env_config=env_config,
    trainer_config=trainer_config,
    policy_config=policy_config,
    test_mode=True,
    max_env_steps=100*10**6,
    root_random_seed=0,
    verbose=1
)

In [ ]:
launcher.run()

In [ ]: