In [ ]:
from cbench.commands import *
from cbench import state
from plumbum.machines.paramiko_machine import ParamikoMachine
from time import sleep
import _thread
In [ ]:
state.RUN_NAME = "Baseline_READ_MODIFY_WRITE_ALL_5_vs_m4.2xlarge_190"
state.WORKLOAD = "workloads/workload_read_modify_write"
#state.WORKLOAD = "workloads/workload_read"
#state.WORKLOAD = "workloads/workload_scan"
In [ ]:
print("Instances:")
list_instances()
print("Loading state from AWS..")
load_state()
print("Cluster instances: {0}".format(state.CLUSTER_INSTANCES))
In [ ]:
create_instances(5, state.CLUSTER_INSTANCES, type="i2.xlarge")
create_instances(1, state.YCSB_INSTANCES, 'ycsb', type="m4.2xlarge")
In [ ]:
print(state.CLUSTER_INSTANCES)
In [ ]:
#Wait for Instances to finish boot
sleep(5 * 60)
In [ ]:
# create the cluster
create_cluster(state.CLUSTER_INSTANCES)
In [ ]:
prepare_benchmark(workload=state.WORKLOAD, name=state.RUN_NAME)
In [ ]:
temp = state.CLUSTER_INSTANCES[-1:]
state.CLUSTER_INSTANCES = state.CLUSTER_INSTANCES[:-1]
In [ ]:
print(temp)
In [ ]:
state.CLUSTER_INSTANCES.append(temp[0])
In [ ]:
print(state.CLUSTER_INSTANCES)
In [ ]:
# Baseline benchmark
bench = _thread.start_new_thread(start_benchmark, (190, ["-p","maxexecutiontime=300"]))
sleep(5 * 60)
wait_for_finish()
gather_results()
In [ ]:
plot(state.RUN_NAME, granularity=10)
In [ ]:
# Max Load benchmark
for num in range(7):
num = 170 + num * 10
state.RUN_NAME = "MAX_READ_AGAIN_6_vs_m4.2xlarge_" + str(num)
bench = _thread.start_new_thread(start_benchmark, (num, ["-p","maxexecutiontime=300"]))
sleep(5 * 60)
wait_for_finish()
gather_results()
# Wait for compaction etc. to finish
sleep(1 * 60)
In [ ]:
print(state.CLUSTER_INSTANCES)
# Create additional instance
create_instances(1, state.CLUSTER_INSTANCES, type="i2.xlarge")
print("New instance: {0}".format(state.CLUSTER_INSTANCES[-1:]))
In [ ]:
scale_cluster(state.CLUSTER_INSTANCES[-1:])
In [ ]:
sleep(5)
state.CLUSTER_INSTANCES.append(temp[0])
In [ ]:
# Scaling benchmark
state.RUN_NAME = "Scale_READ_MODIFY_WRITE_ALL_5_vs_m4.2xlarge_190"
bench = _thread.start_new_thread(start_benchmark, (190, []))
sleep(3 * 60)
scale_cluster(state.CLUSTER_INSTANCES[-1:])
wait_for_finish()
gather_results()
In [ ]:
# Giving the cluster a short pause
sleep(2 * 60)
In [ ]:
#Reduce benchmark
state.RUN_NAME = "Reduce_READ_MODIFY_WRITE_ALL_5_vs_m4.2xlarge_190"
bench = _thread.start_new_thread(start_benchmark, (190, []))
sleep(3 * 60)
remove_cassandra_instance(state.CLUSTER_INSTANCES[-1])
wait_for_finish()
gather_results()
In [ ]:
#state.RUN_NAME = "Reduce_READ_MODIFY_WRITE_4_vs_m4.2xlarge_100_2"
wait_for_finish()
#gather_results()
In [ ]:
gather_results()
In [ ]:
cleanup_logs()
In [ ]:
terminate_all()
In [ ]:
plot("Scale_READ_MODIFY_WRITE_ALL_5_vs_m4.2xlarge_190", granularity=30)
plot("Reduce_READ_MODIFY_WRITE_ALL_5_vs_m4.2xlarge_190", granularity=30)
In [ ]:
#plot("Scale_READ_5_to_6_vs_m4.2xlarge_190", measurements=["avg"], op_types=["READ"], granularity=10)
plot("Reduce_READ_MODIFY_WRITE_5_vs_m4.2xlarge_190", measurements=["avg"], op_types=["READ"], granularity=10)
In [ ]:
gather_lois("Scale_READ_5_to_6_vs_m4.2xlarge_190")
In [ ]:
from cbench import util
for inst in state.CLUSTER_INSTANCES:
util.docker_status(inst)
In [ ]:
print("Hello World!")