In [ ]:
import gc
from itertools import count
import time
import numpy as np
import gym
from agent.agent import Agent
from visdom import Visdom
from config import ACCESS_SIZE

viz = Visdom()
assert viz.check_connection()


MAX_EPISODES = 1000

env = gym.make('BipedalWalker-v2')
print(env.action_space.high)
print(env.action_space.low)
print(env.observation_space.high)
print(env.observation_space.low)

state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
agent = Agent(state_size, action_size, ACCESS_SIZE)
agent.restore_models(1000)

state = env.reset()
for _ in range(ACCESS_SIZE):
    action = np.clip(agent(state) + agent.get_noise(), -1, 1)
    next_state, reward, done, info = env.step(action)
    agent.append(state, action, reward, done, next_state)
    state = next_state
    if done:
        state = env.reset()


def to_np(scale):
    return np.array([scale])


viz_reward = viz.line(X=to_np(0), Y=to_np(0))
time.sleep(1)
viz_length = viz.line(X=to_np(0), Y=to_np(0))

for _ep in range(MAX_EPISODES):
    episode_length = 0
    episode_reward = 0
    state = env.reset()
    agent.noise.reset()
    for step in count(1):
        # env.render()
        action = np.clip(agent(state) + agent.get_noise(), -1, 1)
        next_state, reward, done, info = env.step(action)
        agent.append(state, action, reward, done, next_state)
        state = next_state
        agent.optimize()

        episode_reward += reward
        if step >= 1000:
            viz.line(X=to_np(_ep+1), Y=to_np(episode_reward), win=viz_reward, update="append")
            time.sleep(0.01)
            viz.line(X=to_np(_ep+1), Y=to_np(step), win=viz_length, update="append")
            break

        if done:
            viz.line(X=to_np(_ep+1), Y=to_np(episode_reward), win=viz_reward, update="append")
            time.sleep(0.01)
            viz.line(X=to_np(_ep+1), Y=to_np(step), win=viz_length, update="append")
            break
    print(_ep, done, step, episode_reward)
    gc.collect()

    if _ep % 1000 == 0:
        print("{} - score: {}".format(_ep, step))
        agent.save_models(_ep)


[1 1 1 1]
[-1 -1 -1 -1]
[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf
 inf inf inf inf inf inf]
[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf
 -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf]
0 True 119 -105.73508366038931
0 - score: 119
Models saved successfully
1 False 1000 -97.78962076844766
2 True 101 -109.33096594442954
3 True 124 -110.8350541820713
4 False 1000 -95.66608028034044
5 False 1000 -95.03100084150942
6 False 1000 -96.98732777140096
7 True 125 -105.07305734532817
8 False 1000 -94.02503646728714
9 False 1000 -97.14691743580622
10 True 117 -104.70388737601519
11 False 1000 -96.3454637041083
12 False 1000 -97.13613829268392
13 False 1000 -96.75461067048347
14 False 1000 -96.97419939989386
15 True 111 -108.02517780331638
16 False 1000 -96.21011059619447
17 True 116 -103.87809168884304
18 True 116 -129.65305478632163
19 True 173 -113.00450275646624
20 True 102 -105.0503903969272
21 True 66 -104.24024699573778
22 True 139 -112.31378371439476
23 False 1000 -92.55380275235497

In [ ]: