In [ ]:
import sys
sys.path.insert(0, '../')
import logging
logging.basicConfig(level=logging.ERROR)

from datetime import datetime, timedelta

from cryptotrader.exchange_api.poloniex import Poloniex
from cryptotrader.envs.trading import BacktestDataFeed, BacktestEnvironment
from cryptotrader.envs.utils import make_balance, convert_to
from cryptotrader.agents import apriori
from cryptotrader.utils import array_normalize

from bokeh.io import output_notebook
from jupyterthemes import jtplot
output_notebook()
jtplot.style()
%matplotlib inline

In [ ]:
# Simulation Params
test_name = 'MultiFactor_agent'
obs_steps = 300 # Observation steps, number of candles required by the agent for calculations
period = 120 # Observation period in minutes, also trading frequency
pairs = ["USDT_BTC", "USDT_ETH", "USDT_LTC", "USDT_XRP", "USDT_XMR", "USDT_ETC", "USDT_ZEC", "USDT_DASH"] # Universe
fiat_symbol = 'USDT' # Quote symbol
# Initial portfolio
init_funds = {"BTC":'0.00000000',
              "ETH":'0.00000000',
              "LTC":'0.00000000',
              "XRP":'0.00000000',
              "XMR":"0.00000000",
              "ETC":"0.00000000",
              "ZEC":"0.00000000",
              "DASH":"0.00000000",
              "USDT":'100.00000000'}
# init_funds = make_balance(crypto=1 / len(pairs), fiat=0.0, pairs=pairs)
data_dir = './data' # Data directory for offline testing

In [ ]:
## Environment setup
# Data feed setup
tapi = Poloniex()
tapi = BacktestDataFeed(tapi, period, pairs=pairs, balance=init_funds, load_dir=data_dir)

# Download new data from the exchange
tapi.download_data(end=datetime.timestamp(datetime.utcnow() - timedelta(days=100)),
                       start=datetime.timestamp(datetime.utcnow() - timedelta(days=300)))

# And save it to disk, if you want to
# tapi.save_data(data_dir + '/train')

# Or load data from disk
# tapi.load_data('/train')

# Environment setup
env = BacktestEnvironment(period, obs_steps, tapi, fiat_symbol, test_name)
obs = env.reset()

# Agent setup
mom = apriori.MomentumTrader(ma_span=[133,234], std_span=39, mean_type='kama')
fib = apriori.HarmonicTrader()

agent = apriori.FactorTrader([mom, fib])

In [ ]:
# Training run
# Train params
nb_steps = 100
batch_size = 1
nb_max_episode_steps = 7

# Params search space
search_space = {
                'std_window':[2, env.obs_steps],
                'std_weight':[0.0001, 3],
                'alpha_up':[1e-4, 1],
                'alpha_down':[1e-4, 1]
                }

# Optimization session, this may take some time
opt_params, info = agent.fit(env, nb_steps, batch_size, search_space, nb_max_episode_steps=nb_max_episode_steps)
print('\n', opt_params, '\n', env.status)

# Run on test data
agent.test(env, verbose=True)
env.plot_results();

In [ ]:
# Validation run
# Download data
tapi.download_data(end=datetime.timestamp(datetime.now() - timedelta(days=50)),
                       start=datetime.timestamp(datetime.now() - timedelta(days=100)))

# or load from disk
# env.tapi.load_data('/eval')

# Run evaluation
agent.test(env, verbose=True)
# Show results
env.plot_results();

In [ ]:
# Test run
# Download data
tapi.download_data(end=datetime.timestamp(datetime.now()),
                       start=datetime.timestamp(datetime.now() - timedelta(days=50)))
# Or load form disk
# env.tapi.load_data('/test')
# Run test
agent.test(env, verbose=True)
# Show results
env.plot_results();