With static dataset, e.g. load the grabbed data.
In [1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline
import time
import dateutil.parser
In [2]:
# The filtered file make sure to use the fi
#filename = 'data/aws-recent-filtered.csv'
filename = './data/subset.csv'
In [3]:
start = time.time()
# parse the data file and extra the results
df = pd.read_csv(filename)
print(time.time()-start)
df.head(3)
Out[3]:
In [4]:
df = df.dropna()
df = df[df.AvailabilityZone != "AvailabilityZone"]
df['TimeStamp'] = pd.to_datetime(df.TimeStamp)
df.index = df.TimeStamp
df = df.drop('TimeStamp', axis=1)
df= df.sort_index()
df.head(3)
Out[4]:
In [5]:
print (len(df))
print (df['InstanceType'].unique())
print (df['AvailabilityZone'].unique())
print (df['SpotPrice'].unique())
For each machine type there exists a region that is more favorable to use, as the market volatility is very low and the prices tend to stay cheaper than the other regions.
With in proving this hypothesis users will be able to find the best region they should be bidding in, as long as latency is not an issue for them.
Data Science tools & Techniques: We can use clustering and classification methods.
In [6]:
def corrGraph(title, df):
corr_df = df.corr()
mask = np.zeros_like(corr_df)
mask[np.triu_indices_from(mask)] = True
seaborn.heatmap(corr_df, cmap='RdYlGn_r', vmax=1.0, vmin=-1.0 , mask = mask, linewidths=2.5)
plt.yticks(rotation=0)
plt.title(title)
plt.xticks(rotation=90)
plt.show()
In [7]:
# Some info about the data
df = df.truncate(before='2016-10-13 00:00:00', after='2016-12-11 00:00:00')
print (df.index.min())
print (df.index.max())
print(df.index.max()- df.index.min())
df.head(3)
Out[7]:
In [8]:
depa = df.groupby(['AvailabilityZone', 'InstanceType'])
In [9]:
#depa = awsResampler(df)
# Initialize dictionary of all combos of empty dfs we want to graph and corr
zonedfs={}
typedfs={}
for item in df['InstanceType'].unique():
typedfs.update({item: pd.DataFrame()})
for item in df['AvailabilityZone'].unique():
zonedfs.update({item: pd.DataFrame()})
#Fill zonedfs with dataframes of all machines in that zone pricing
for name, group in depa:
#We have to create the data frame by merging the rows and the first run we need our first row
if zonedfs[name[0]].empty:
#RESAMPLE DATA HOURLY getting the avg of the hour
zonedfs[name[0]] = group
zonedfs[name[0]] = zonedfs[name[0]].resample('H').mean()
zonedfs[name[0]] = zonedfs[name[0]].fillna(method="ffill")
#zonedfs[name[0]] = zonedfs[name[0]].drop('InstanceType', axis=1).drop(['AvailabilityZone'],axis=1)
#Remove index so that you can merge
zonedfs[name[0]] = zonedfs[name[0]].reset_index()
#print(zonedfs[name[0]].head(40) )
zonedfs[name[0]].rename(columns = {'SpotPrice':name[1]}, inplace = True)
else:
#RESAMPLE DATA HOURLY getting the avg of the hour
group1 = group.resample('H').mean()
group1 = group1.fillna(method="ffill")
#print(zonedfs[name[0]].head(20) )
#group1 = group.drop('InstanceType', axis=1).drop(['AvailabilityZone'],axis=1)
group1.rename(columns = {'SpotPrice':name[1]}, inplace = True)
#Remove index so that you can merge
group1 = group1.reset_index()
#print(group1.head(20))
zonedfs[name[0]] = zonedfs[name[0]].merge(group1,how='right')
#Fill typedfs with dataframes of all machines in that zone pricing
for name, group in depa:
if typedfs[name[1]].empty:
typedfs[name[1]] = group
typedfs[name[1]] = typedfs[name[1]].resample('H').mean()
typedfs[name[1]] = typedfs[name[1]].fillna(method="ffill")
#typedfs[name[1]] = typedfs[name[1]].drop('InstanceType', axis=1).drop(['AvailabilityZone'],axis=1)
typedfs[name[1]].rename(columns = {'SpotPrice':name[0]}, inplace = True)
typedfs[name[1]] = typedfs[name[1]].reset_index()
else:
group1 = group.resample('H').mean()
group1 = group1.fillna(method="ffill")
#group1 = group.drop('InstanceType', axis=1).drop(['AvailabilityZone'],axis=1)
group1.rename(columns = {'SpotPrice':name[0]}, inplace = True)
group1 = group1.reset_index()
typedfs[name[1]] = typedfs[name[1]].merge(group1,how='right')
In [23]:
#for key in typedfs:
# print(typedfs[key])
type(typedfs)
df = typedfs["c4.2xlarge"]
df.head(10)
#print(typedfs)
#s = pd.Series(data, index=index)
#df.index
#df.columns
#s = df['c4.2xlarge'].values
#s = list(df.values)
array = []
def populate_array(df):
for c in df.columns:
#print(c)
#if c != 'TimeStamp':
xs = df[c].values
array.append(xs.tolist())
print("finished")
for x in [ 'd2.2xlarge', 'r3.2xlarge', 'c4.4xlarge',
'd2.4xlarge', 'r3.xlarge', 'm4.10xlarge', 'm4.2xlarge', 'c4.xlarge',
'm4.xlarge', 'm4.large', 'c4.large', 'c4.8xlarge', 'm4.4xlarge']:
populate_array(typedfs[x])
In [24]:
len(array[23])
df = pd.DataFrame(array)
df
df.to_csv("vmarrays.csv")
df
Out[24]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: