In [ ]:
import gc
gc.collect()
import sys
sys.path.insert(0, '../')
import logging
logging.basicConfig(level=logging.ERROR)
from datetime import datetime, timedelta
from cryptotrader.exchange_api.poloniex import Poloniex
from cryptotrader.envs.trading import BacktestDataFeed, BacktestEnvironment
from cryptotrader.envs.utils import make_balance, convert_to
from cryptotrader.agents import apriori
from cryptotrader.utils import array_normalize, simplex_proj
from bokeh.io import output_notebook
from jupyterthemes import jtplot
output_notebook()
jtplot.style()
%matplotlib inline
In [ ]:
# Simulation Params
test_name = 'Fibonacci_agent'
obs_steps = 300 # Observation steps, number of candles required by the agent for calculations
period = 120 # Observation period in minutes, also trading frequency
pairs = ["USDT_BTC", "USDT_ETH", "USDT_LTC", "USDT_XRP", "USDT_XMR", "USDT_ETC", "USDT_ZEC", "USDT_DASH"] # Universe
fiat_symbol = 'USDT' # Quote symbol
# Initial portfolio
init_funds = make_balance(crypto=1 / len(pairs), fiat=0.0, pairs=pairs)
# init_funds = make_balance(crypto=0.0, fiat=100.0, pairs=pairs) # Full fiat
data_dir = './data' # Data directory for offline testing
In [ ]:
## Environment setup
# Data feed setup
tapi = Poloniex()
tapi = BacktestDataFeed(tapi, period, pairs=pairs, balance=init_funds, load_dir=data_dir)
# Download new data from the exchange
tapi.download_data(end=datetime.timestamp(datetime.utcnow() - timedelta(days=100)),
start=datetime.timestamp(datetime.utcnow() - timedelta(days=300)))
# And save it to disk, if you want to
# tapi.save_data(data_dir + '/train')
# Or load data from disk
# tapi.load_data('/train')
# Environment setup
env = BacktestEnvironment(period, obs_steps, tapi, fiat_symbol, test_name)
obs = env.reset()
# Agent setup
agent = apriori.HarmonicTrader(activation=simplex_proj)
In [ ]:
# Training run
# Optimization params
nb_steps = 100
batch_size = 1
nb_max_episode_steps = 66
# Params search space
search_space = {
'err_allowed':[1e-8, 1e-1],
'peak_order':[1, 20],
'alpha_up':[1e-1, 1],
'alpha_down':[1e-1, 1],
'decay':[0.9, 1.0]
}
# Optimization session, this may take some time
params, info = agent.fit(env, nb_steps, batch_size, search_space, nb_max_episode_steps=nb_max_episode_steps, verbose=True)
print("\n",params,"\n", env.status)
# Run on training data
agent.test(env, verbose=True)
# Display results
env.plot_results();
In [ ]:
# Validation run
# Download data
tapi.download_data(end=datetime.timestamp(datetime.now() - timedelta(days=50)),
start=datetime.timestamp(datetime.now() - timedelta(days=100)))
# or load from disk
# env.tapi.load_data('/eval')
# Run evaluation
agent.test(env, verbose=True)
# Show results
env.plot_results();
In [ ]:
# Test run
# Download data
tapi.download_data(end=datetime.timestamp(datetime.now()),
start=datetime.timestamp(datetime.now() - timedelta(days=50)))
# Or load form disk
# env.tapi.load_data('/test')
# Run test
agent.test(env, verbose=True)
# Show results
env.plot_results();