In [1]:
#air=0,2
#fridge=0,.12
#install sqlalchemy,pymongo,scikit-learn,update pandas
import sys
sys.path.append('../../') # or non-Unix equivalent (add wikienergy/ to path)
import numpy as np
import pickle
import matplotlib.pyplot as plt
%matplotlib inline
from disaggregator import PecanStreetDatasetAdapter as psda
from disaggregator import utils
from disaggregator import fhmm
from disaggregator import evaluation_metrics as metric
import random
import pandas as pd
reload(metric)
reload(fhmm)
from copy import deepcopy
#pi_prior=np.array([0.5,0.5])
#a_prior=np.array([[0.98,0.02],[0.02,0.98]])
#mean_prior=np.array([[0],[2]])
#cov_prior=np.tile(1, (2, 1, 1))
#gen.generate_and_pickle_models('air1',pi_prior,a_prior,mean_prior,cov_prior,'dataid',2,'D','1T')


/usr/local/lib/python2.7/dist-packages/pandas/io/excel.py:626: UserWarning: Installed openpyxl is not supported at this time. Use >=1.6.1 and <2.0.0.
  .format(openpyxl_compat.start_ver, openpyxl_compat.stop_ver))

In [2]:
#Load Datasets
devices_types={}
devices_types_unsampled={}
ids_for_devices={}
db_url='postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres'
psda.set_url(db_url)
schema = 'curated'
tables= psda.get_table_names(schema)
print tables


[u'group1_disaggregated_2013_07', u'group2_disaggregated_2013_05', u'group2_disaggregated_2013_11', u'group2_disaggregated_2013_08', u'group1_disaggregated_2013_01', u'group1_disaggregated_2013_08', u'group3_disaggregated_2013_07', u'group2_disaggregated_2013_03', u'group1_disaggregated_2013_04', u'group3_disaggregated_2013_05', u'west_pv_fall_2013', u'group1_disaggregated_2013_09', u'south_pv_fall_2013', u'pv_summer_2013', u'group1_disaggregated_2013_03', u'southwest_pv_fall_2013', u'group2_disaggregated_2013_10', u'group2_disaggregated_2013_09', u'group3_disaggregated_2013_10', u'group2_disaggregated_2013_07', u'group1_disaggregated_2013_11', u'group1_disaggregated_2012_12', u'group1_disaggregated_2013_02', u'group3_disaggregated_2013_09', u'group2_disaggregated_2013_06', u'group3_disaggregated_2013_06', u'group3_disaggregated_2013_11', u'group2_disaggregated_2013_04', u'group3_disaggregated_2013_08', u'group2_disaggregated_2013_01', u'group1_disaggregated_2013_06', u'group2_disaggregated_2013_02', u'group1_disaggregated_2013_05', u'group1_disaggregated_2013_10', u'ev_fall_2013']

In [3]:
table=tables[1]
ids_device_name='air1'
ids_for_devices[ids_device_name]=psda.get_dataids_with_real_values(schema,table,ids_device_name)
print ids_for_devices[ids_device_name]


[93, 94, 624, 739, 1953, 2818, 2864, 3367, 3723, 5814, 5972, 6101, 6636, 6730, 7531, 7536, 7769, 7800, 9019, 9609, 9922, 9926, 9933, 9982]

In [4]:
ids_for_devices[ids_device_name]=[93,739,1953,2818,2864,3367,3723,5814,5972,6101,6636,7531,7536,7800,9609,9922,9926,9933]
#Try 1: [93,739,1953,2818,2864,3367,3723,5814,5972,6101], Top Models: [2787, 2365, 6836, 7769, 8079, 4922, 2575, 7531, 6910,7617]
#Try 2: [6636,7531,7536,7800,9609,9922,9926,9933], Top Models:[8079,6836,2365,2787,4922,2575,6910,9930,5109,7617]

In [5]:
num_houses=30
device_name='air1'
devices_types_unsampled[device_name]=psda.generate_type_for_appliance_by_dataids(schema,table,device_name,ids_for_devices[ids_device_name][:num_houses])
device_name='use'
devices_types_unsampled[device_name]=psda.generate_type_for_appliance_by_dataids(schema,table,device_name,ids_for_devices[ids_device_name][:num_houses])


select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=93
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=739
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=1953
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2818
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2864
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3367
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3723
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5814
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5972
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6101
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6636
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7531
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7536
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7800
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9609
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9922
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9926
select air1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9933
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=93
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=739
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=1953
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2818
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2864
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3367
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3723
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5814
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5972
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6101
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6636
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7531
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7536
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7800
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9609
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9922
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9926
select use,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9933

In [56]:
device_name='refrigerator1'
devices_types_unsampled[device_name]=psda.generate_type_for_appliance_by_dataids(schema,table,device_name,ids_for_devices[ids_device_name][:num_houses])


select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=93
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=739
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=1953
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2818
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=2864
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3367
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=3723
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5814
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=5972
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6101
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=6636
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7531
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7536
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=7800
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9609
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9922
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9926
select refrigerator1,utc_15min from "PecanStreet_CuratedSets".group2_disaggregated_2013_05 where dataid=9933

In [179]:
devices_models={}

month=5
rate='15T'
with open('../../data/shared/air1/'+str(month)+'/air1_shared_'+str(month)+'_'+str(rate)+'.pkl','rb') as f:
    devices_models['air1']=pickle.load(f)

#means_list=([devices_models['air1'][x].means_[1][0] for x in devices_models['air1']])
i=0
for l,key in enumerate(devices_models['air1']):
    if devices_models['air1'][key]._means_[1]<.1:
        devices_models['air1'].pop(key,None)
        i=i+1
print "Deleted " + str(i) + " of "+str(l+1) +" models due to low on-states."


Deleted 12 of 182 models due to low on-states.

In [81]:
month=5
schema_train='shared'
hmm_models=get_models(schema_train,month,sample_rate)


Deleted 44 of 133 models due to low on-states.

In [8]:
state_0=[]
state_1=[]
remove_zeroes_threshold=0.05
for model_num in devices_models['air1']:
    a=devices_models['air1'][model_num].means_
    if(a[1][0]>remove_zeroes_threshold):
        state_0.append(a[0][0])
        state_1.append(a[1][0])
high_lim=10
bins=80
plt.hist(state_1,bins,range=[0,high_lim])
plt.xlim([0 ,high_lim])
state_1_nump=np.array(state_1)
mean=np.mean(state_1_nump,axis=0)
std=np.std(state_1_nump,axis=0)

print mean
print std
print mean - 2*std
print
state0other=([devices_models['air1'][x].means_[1][0] for x in devices_models['air1']])
mean=np.mean(state0other)
std=np.std(state0other)

print mean
print std
print mean - std


1.47770636637
0.713499482127
0.0507074021113

1.47770636637
0.713499482127
0.764206884238

In [195]:
#Resamples the data
sample_rate='15T'
length='D'
devices_types_unsplit={}
for key in devices_types_unsampled:
    devices_types_unsplit[key]=devices_types_unsampled[key].resample(sample_rate)
    #devices_types[key]=devices_types_unsplit[key].split_by(length)
    devices_types[key]=devices_types_unsplit[key]
    print key


air1
use
refrigerator1

In [10]:
non_zero=0
total=0
for instance in devices_types['air1'].instances:
    for trace in instance.traces:
        non_zero=non_zero+ np.count_nonzero(trace.series)
        total=total+trace.series.count()
print non_zero/float(total)*100


31.4068100358

In [11]:
#Create single FHMM
type_models={}
house_id=26
type_models[ids_device_name]=devices_models[ids_device_name][house_id]
model_fhmm,means_fhmm=fhmm.generate_FHMM_from_HMMs(type_models)

In [12]:
print devices_models[ids_device_name].keys()


[26, 59, 86, 93, 94, 280, 410, 434, 484, 499, 580, 661, 739, 744, 774, 821, 871, 936, 1086, 1167, 1450, 1507, 1617, 1632, 1681, 1696, 1714, 1718, 1782, 1790, 1830, 1953, 1994, 2034, 2094, 2129, 2156, 2158, 2171, 2242, 2365, 2470, 2575, 2638, 2641, 2769, 2787, 2814, 2845, 2864, 2945, 2953, 2974, 3044, 3134, 3192, 3221, 3263, 3367, 3394, 3456, 3482, 3504, 3531, 3649, 3652, 3723, 3736, 3778, 3795, 3893, 4135, 4154, 4298, 4313, 4352, 4373, 4505, 4526, 4641, 4767, 4874, 4922, 4956, 4957, 4998, 5026, 5109, 5209, 5218, 5275, 5357, 5395, 5545, 5568, 5677, 5785, 5814, 5852, 5874, 5889, 5949, 5972, 6139, 6165, 6412, 6636, 6673, 6730, 6826, 6836, 6910, 6941, 7319, 7390, 7531, 7536, 7617, 7731, 7769, 7788, 7800, 7850, 7863, 7875, 7940, 7951, 8046, 8079, 8084, 8188, 8197, 8201, 8218, 8292, 8419, 8645, 8669, 8741, 8956, 9019, 9036, 9121, 9141, 9160, 9343, 9356, 9484, 9488, 9499, 9555, 9578, 9609, 9613, 9643, 9654, 9701, 9729, 9737, 9771, 9830, 9875, 9915, 9922, 9926, 9932, 9934, 9938, 9939, 9982]

In [13]:
#Removes Houses that are in test data from training data
for id_val in ids_for_devices[ids_device_name]:
    devices_models[ids_device_name].pop(id_val,None)

In [14]:
#Generate Test Data
test_data={}
house_num=random.randint(0,9)
trace_num=0
#print 'house num: ' + str(house_num)
#print 'trace num: ' + str(trace_num)
for device_type_name in devices_models:
        test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[house_num].traces[trace_num].series)
power_total=utils.trace_series_to_numpy_array(devices_types['use'].instances[house_num].traces[trace_num].series)
power_total_minus_bottom=[]
for i in power_total:
    power_total_minus_bottom.append(i-power_total.min())

In [15]:
#Predict and Plot FHMM Results
plt.plot(power_total_minus_bottom,label='total')
plt.title('Aggregated Energy without constant power')
plt.ylabel('Energy (Wh)')
plt.xlabel('Time')
for i,device_type in enumerate(type_models):
    plt.figure(1)
    plt.plot(test_data[device_type],label=device_type)
    plt.legend(bbox_to_anchor=(0., 1.05, 1, .102), loc=3,
       ncol=2, mode="expand", borderaxespad=1.)
    plt.figure()
[decoded_states, decoded_power]=fhmm.predict_with_FHMM(model_fhmm,means_fhmm,test_data,power_total_minus_bottom,plot=True)


<matplotlib.figure.Figure at 0x7f72afca8390>

In [82]:
#Generate Test Data
test_data={}
#house_num=random.randint(0,9)
bests={}
averages={}
worsts={}
trace_num=0
for house_num,device_instance in enumerate(devices_types[device_type_name].instances):
    for device_type_name in devices_models:
        test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[house_num].traces[trace_num].series)
        power_total=utils.trace_series_to_numpy_array(devices_types['use'].instances[house_num].traces[trace_num].series)
        power_total_minus_bottom=[]
    for i in power_total:
        power_total_minus_bottom.append(i-power_total.min())
    dataid=devices_types[device_type_name].instances[house_num].metadata['dataid']
    get_scores(test_data,power_total_minus_bottom,dataid,deepcopy(devices_models))
#Below removes houses with bad data
for house_id in deepcopy(bests):
    if(np.count_nonzero(bests[house_id]['f1_score']['precision'])==0 or len(bests[house_id]['f1_score'])<10):
        bests.pop(house_id,None)
        averages.pop(house_id,None)
        worsts.pop(house_id,None)
get_summary_scores(bests,averages,worsts)


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-82-4e456c99fe58> in <module>()
     14         power_total_minus_bottom.append(i-power_total.min())
     15     dataid=devices_types[device_type_name].instances[house_num].metadata['dataid']
---> 16     get_scores(test_data,power_total_minus_bottom,dataid,deepcopy(devices_models))
     17 #Below removes houses with bad data
     18 for house_id in deepcopy(bests):

TypeError: get_scores() takes exactly 8 arguments (4 given)

In [178]:
with open('air1_shared_5_15T_best.pkl','w') as f:
    pickle.dump(devices_models['air1'][best_model_for_set(bests)],f)


---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
<ipython-input-178-99438fbf6025> in <module>()
      1 with open('air1_shared_5_15T_best.pkl','w') as f:
----> 2     pickle.dump(devices_models['air1'][best_model_for_set(bests)],f)

KeyError: 142.0

In [106]:
best_model_for_set(bests)


Out[106]:
142.0

In [184]:
device_type_name='air1'
[bests,averages,worsts]=score_models(device_type_name,devices_types,devices_models)


Deleted 23 of 182 models for house 93 due to low number of on-state guesses.
Deleted 81 of 182 models for house 739 due to low number of on-state guesses.
Deleted 21 of 182 models for house 1953 due to low number of on-state guesses.
Deleted 27 of 182 models for house 2818 due to low number of on-state guesses.
Deleted 25 of 182 models for house 2864 due to low number of on-state guesses.
Deleted 15 of 182 models for house 3367 due to low number of on-state guesses.
Deleted 21 of 182 models for house 3723 due to low number of on-state guesses.
Deleted 50 of 182 models for house 5814 due to low number of on-state guesses.
Deleted 20 of 182 models for house 5972 due to low number of on-state guesses.
Deleted 20 of 182 models for house 6101 due to low number of on-state guesses.
Deleted 17 of 182 models for house 6636 due to low number of on-state guesses.
Deleted 15 of 182 models for house 7531 due to low number of on-state guesses.
Deleted 16 of 182 models for house 7536 due to low number of on-state guesses.
Deleted 26 of 182 models for house 7800 due to low number of on-state guesses.
Deleted 24 of 182 models for house 9609 due to low number of on-state guesses.
Deleted 16 of 182 models for house 9922 due to low number of on-state guesses.
Deleted 22 of 182 models for house 9926 due to low number of on-state guesses.
Deleted 18 of 182 models for house 9933 due to low number of on-state guesses.
AVERAGE
Accuracy: 0.667666596494
F1 Score: 0.24184080471
Power Percentage Error: 156.969968757
Power Percentage Error std: 127.347143452
BEST
Power Percentage Error: 9.04772408748
F1 Score: 0.458675643909
Precision: 0.386832499307
Recall: 0.712361062933
WORST
Power Percentage Error: 440.285770995
F1 Score: 0.0732970655464

In [187]:
best_models={}
best_models['air1']=devices_models['air1'][best_model_for_set(bests)]

In [217]:
for model in devices_models['air1']:
    print model._covars_


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-217-9cb67502f10d> in <module>()
      1 for model in devices_models:
----> 2     print model._covars_

AttributeError: 'str' object has no attribute '_covars_'

In [212]:
best_models['air1']._covars_=[1,1]

In [214]:
best_models['air1'].__dict__


Out[214]:
{'_algorithm': 'viterbi',
 '_covariance_type': 'full',
 '_covars_': [1, 1],
 '_log_startprob': array([ -2.22044605e-16,  -3.60436084e+01]),
 '_log_transmat': array([[-0.01503788, -4.20469262],
        [-2.07944154, -0.13353139]]),
 '_means_': array([[ 0.     ],
        [ 2.36215]]),
 'covars_prior': 0.01,
 'covars_weight': 1,
 'init_params': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
 'means_prior': None,
 'means_weight': 0,
 'n_components': 2,
 'n_features': 1,
 'n_iter': 10,
 'params': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
 'random_state': None,
 'startprob_prior': None,
 'thresh': 0.01,
 'transmat_prior': None}

In [213]:
thresh=10
model_eval_dict=evaluate_FHMM_single_type(devices_types,best_models,device_type_name,thresh,plot_sum=True,plot_ind=True)


Average Power of Houses w/ <10% Error: 694.618185484
Average Power of Houses w/ >=10% Error: 753.023018433

Percentage of Houses with <10% Error (Model):    22.2222222222
Percentage of Houses with <10% Error (Baseline): 11.1111111111

In [39]:


In [206]:
def eval_model_with_instance(types_eval,device_type_name,best_models,house_num,model_eval_dict,thresh,plot=False):
    for trace_num,trace in enumerate(types_eval[device_type_name].instances[house_num].traces):
        test_data[device_type_name]=utils.trace_series_to_numpy_array(types_eval[device_type_name].instances[house_num].traces[trace_num].series)
        power_total=utils.trace_series_to_numpy_array(types_eval['use'].instances[house_num].traces[trace_num].series)
        power_total_minus_bottom=[]
        for i in power_total:
            power_total_minus_bottom.append(i-power_total.min())
        model_fhmm,means_fhmm=fhmm.generate_FHMM_from_HMMs(best_models)
        [decoded_states, decoded_power]=fhmm.predict_with_FHMM(model_fhmm,means_fhmm,test_data,power_total_minus_bottom)
        if(plot):
            plt.figure()
            #plt.plot(power_total)
            plt.plot(test_data['air1'])
            plt.plot(decoded_power['air1'],'r')
        truth_states=metric.guess_truth_from_power(test_data[device_type_name],2)
        eval_metrics=metric.get_positive_negative_stats(truth_states,decoded_states[device_type_name])
        diff_power_perc=(metric.sum_error(test_data[device_type_name],decoded_power[device_type_name])*100/np.sum(test_data[device_type_name]))  
        precision_val=(metric.get_precision(eval_metrics['tp'],eval_metrics['fp']))
        recall_val=(metric.get_sensitivity(eval_metrics['tp'],eval_metrics['fn']))
        model_eval_dict['precision'].append(precision_val)
        model_eval_dict['recall'].append(recall_val)
        test_energy=(np.sum(test_data[device_type_name])/len(test_data[device_type_name]))*24*30
        pred_energy=(np.sum(decoded_power[device_type_name])/len(decoded_power[device_type_name]))*24*30
        error_perc=float(test_energy-pred_energy)/test_energy*100
        power_total_sum=np.sum(power_total/len(power_total)*24*30)
        model_eval_dict['diff_power_perc'].append(diff_power_perc)
        model_eval_dict['test_energy'].append(test_energy)
        model_eval_dict['pred_energy'].append(pred_energy)
        model_eval_dict['error_perc'].append(error_perc)
        model_eval_dict['power_total_sums'].append(power_total_sum)
        if(abs(error_perc)<thresh):
            model_eval_dict['num_less_than']=model_eval_dict['num_less_than']+1
            model_eval_dict['power_avg_good']=model_eval_dict['power_avg_good']+power_total_sum
        else:
            model_eval_dict['power_avg_bad']=model_eval_dict['power_avg_bad']+power_total_sum
    return model_eval_dict

In [51]:
def plot_and_sum_model_eval(model_eval_dict):
    #Baseline: Get average energy usage of a house and use that.
    if(model_eval_dict['num_less_than']>0):
        print 'Average Power of Houses w/ <'+str(thresh)+'% Error: ' + str(model_eval_dict['power_avg_good']/float(model_eval_dict['num_less_than']))
        print 'Average Power of Houses w/ >='+str(thresh)+'% Error: '+str(model_eval_dict['power_avg_bad']/float(len(model_eval_dict['error_perc'])-model_eval_dict['num_less_than']))
    #plt.plot(np.array(power_total_sum_list))
    print
    print 'Percentage of Houses with <'+str(thresh)+'% Error (Model):    ' + str(model_eval_dict['num_less_than_perc'])
    print 'Percentage of Houses with <'+str(thresh)+'% Error (Baseline): ' + str(model_eval_dict['baseline_less_than_perc'])
    baseline_val=np.sum(model_eval_dict['test_energy'])/len(model_eval_dict['test_energy'])
    a=np.empty(len(model_eval_dict['test_energy']))
    a[:]=(baseline_val)
    base_diff_power_perc=(metric.sum_error(test_data[device_type_name],a)*100/np.sum(test_data[device_type_name]))           
    plt.figure()
    plt.plot(np.absolute(model_eval_dict['error_perc']),'r')
    plt.plot(model_eval_dict['baseline_perc'],'k')
    plt.title('Percent Error Model (Red), Percent Error Baseline (Black)')
    plt.figure()
    plt.plot(model_eval_dict['test_energy'],'b')
    plt.plot(model_eval_dict['pred_energy'],'r')

    plt.plot(a,'k')
    plt.title('Predicted Energy (Red), Actual Energy (Blue)')

In [204]:
def evaluate_FHMM_single_type(types_eval,best_models,device_type_name,thresh,plot_sum=True,plot_ind=False):
    model_eval_dict={}
    test_data={}
    model_eval_dict['precision']=[]
    model_eval_dict['recall']=[]
    model_eval_dict['test_energy']=[]
    model_eval_dict['pred_energy']=[]
    model_eval_dict['error_perc']=[]
    model_eval_dict['power_total_sums']=[]
    model_eval_dict['diff_power_perc']=[]
    model_eval_dict['power_avg_good']=0
    model_eval_dict['power_avg_bad']=0
    model_eval_dict['num_less_than']=0
    for house_num,instance_test in enumerate(types_eval[device_type_name].instances):
        model_eval_dict=eval_model_with_instance(types_eval,device_type_name,best_models,house_num,model_eval_dict,thresh,plot_ind)
    for house_num,instance_test in enumerate(types_eval[device_type_name].instances):
        for trace_num,trace in enumerate(types_eval[device_type_name].instances[house_num].traces):
            test_data[device_type_name]=utils.trace_series_to_numpy_array(types_eval[device_type_name].instances[house_num].traces[trace_num].series)
            model_eval_dict['baseline_perc']=(model_eval_dict['test_energy']-(np.sum(model_eval_dict['test_energy'])/len(model_eval_dict['test_energy'])))
            model_eval_dict['baseline_less_than']=sum(abs(i) < thresh for i in model_eval_dict['baseline_perc'])
            model_eval_dict['baseline_less_than_perc']=model_eval_dict['baseline_less_than']/float(len(model_eval_dict['baseline_perc']))*100
            model_eval_dict['num_less_than_perc']=model_eval_dict['num_less_than']/float(len(model_eval_dict['error_perc']))*100
    if(plot_sum):
        plot_and_sum_model_eval(model_eval_dict)
    return model_eval_dict

In [510]:
print np.sum(eval_model(8))
print np.sum(eval_model(12))
print np.sum(eval_model(2))
print np.sum(eval_model(4))
print np.sum(eval_model(3))


2832.629
2105.903

1453.159
1724.033
994.33
Out[510]:
<matplotlib.text.Text at 0x7f047f0515d0>
<matplotlib.figure.Figure at 0x7f047ef89a10>
<matplotlib.figure.Figure at 0x7f047f8fd510>
<matplotlib.figure.Figure at 0x7f047f161610>
<matplotlib.figure.Figure at 0x7f047f362050>

In [28]:
def eval_model(house_num,plot=False):
    test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[house_num].traces[trace_num].series)
    power_total=utils.trace_series_to_numpy_array(devices_types['use'].instances[house_num].traces[trace_num].series)
    power_total_minus_bottom=[]
    for i in power_total:
        power_total_minus_bottom.append(i-power_total.min())

    type_models[device_name]=best_model
    model_fhmm,means_fhmm=fhmm.generate_FHMM_from_HMMs(type_models)
    [decoded_states, decoded_power]=fhmm.predict_with_FHMM(model_fhmm,means_fhmm,test_data,power_total_minus_bottom)
    if(plot):
        plt.figure()
        plt.plot(power_total)
        #plt.plot(test_data['air1'])
        #plt.plot(decoded_power['air1'],'r')
    truth_states=metric.guess_truth_from_power(test_data[device_name],2)
    eval_metrics=metric.get_positive_negative_stats(truth_states,decoded_states[device_name])
    precision_val=(metric.get_precision(eval_metrics['tp'],eval_metrics['fp']))
    recall_val=(metric.get_sensitivity(eval_metrics['tp'],eval_metrics['fn']))
    diff_power_perc.append(metric.sum_error(test_data[device_name],decoded_power[device_name])*4)
    #plt.title('Precision:' + str(precision_val) + ', Recall: ' + str(recall_val))
    if(precision_val>0 and recall_val>0 and np.sum(decoded_power['air1'])>0):
        precision_list.append(precision_val)
        recall_list.append(recall_val)
        test_energy=(np.sum(test_data['air1'])/len(test_data['air1']))*24*30
        pred_energy=(np.sum(decoded_power['air1'])/len(decoded_power['air1']))*24*30
        error_perc=float(test_energy-pred_energy)/test_energy*100
        test_energy_list.append(test_energy)
        pred_energy_list.append(pred_energy)
        error_perc_list.append(error_perc)
        return np.sum(power_total/len(power_total)*24*30)
    else:
        return 0

In [30]:
device_type_name='air1'
device_name=device_type_name
print best_model_for_set(bests)
print
diff_power_perc=[]
precision_list=[]
recall_list=[]
test_energy_sum=0
pred_energy_sum=0
test_energy_list=[]
pred_energy_list=[]
error_perc_list=[]
power_total_sum_list=[]
for house_num,device_instance in enumerate(devices_types['air1'].instances):
        power=eval_model(house_num)
        if(power>0):
            power_total_sum_list.append(power)


9654.0


In [41]:
#Baseline: Get average energy usage of a house and use that.
baseline_list=test_energy_list-sum(test_energy_list)/len(devices_types['air1'].instances)
baseline_less_25=sum(abs(i) < 25 for i in baseline_list)
plt.figure()
plt.plot(np.absolute(error_perc_list))
#num_less_25=sum(abs(i) < 25 for i in error_perc_list)
power_avg_good=0
power_avg_bad=0
num_less_25=0
for i,val in enumerate(error_perc_list):
    if(abs(val)<25):
        num_less_25=num_less_25+1
        power_avg_good=power_avg_good+power_total_sum_list[i]
    else:
        power_avg_bad=power_avg_bad+power_total_sum_list[i]
print 'Average Power of Houses w/ <25% Error: ' + str(power_avg_good/float(num_less_25))
print 'Average Power of Houses w/ >=25% Error: '+str(power_avg_bad/float(len(error_perc_list)-num_less_25))
#plt.plot(np.absolute(baseline_list),'r')
#plt.plot(np.array(power_total_sum_list))
print
print 'Percentage of Houses with <25% Error (Model):    ' + str(num_less_25/float(len(error_perc_list))*100)
print 'Percentage of Houses with <25% Error (Baseline): ' + str(baseline_less_25/float(len(error_perc_list))*100)
plt.figure()
plt.plot(test_energy_list)
plt.plot(pred_energy_list,'r')
plt.plot(baseline_list,'g')


Average Power of Houses w/ <25% Error: 681.374135945
Average Power of Houses w/ >=25% Error: 830.143245968

Percentage of Houses with <25% Error (Model):    46.6666666667
Percentage of Houses with <25% Error (Baseline): 20.0
Out[41]:
[<matplotlib.lines.Line2D at 0x7f72afef0a90>]

In [18]:
test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[8].traces[trace_num].series)
plt.plot(test_data[device_type_name])
plt.figure()
test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[12].traces[trace_num].series)
plt.plot(test_data[device_type_name])
plt.figure()
test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[2].traces[trace_num].series)
plt.plot(test_data[device_type_name])


Out[18]:
[<matplotlib.lines.Line2D at 0x7f72aedba510>]

In [69]:
def get_scores_old(test_data,power_total_minus_bottom,house_num,devices_models):
    precision=[]
    recall=[]
    f1_score=[]
    diff_power_perc=[]
    diff_power=[]
    accuracy=[]
    device_name='air1'

    i=0
    for l,house_id in enumerate(devices_models[device_name]):
        type_models[device_name]=devices_models[device_name][house_id]
        model_fhmm,means_fhmm=fhmm.generate_FHMM_from_HMMs(type_models)
        [decoded_states, decoded_power]=fhmm.predict_with_FHMM(model_fhmm,means_fhmm,test_data,power_total_minus_bottom)
        truth_states=metric.guess_truth_from_power(test_data[device_name],2)
        eval_metrics=metric.get_positive_negative_stats(truth_states,decoded_states[device_name])
        precision_val=(metric.get_precision(eval_metrics['tp'],eval_metrics['fp']))
        recall_val=(metric.get_sensitivity(eval_metrics['tp'],eval_metrics['fn']))
        if(len([x for x in decoded_states['air1'] if x > 0])>1):
            precision.append(precision_val)
            recall.append(recall_val)
            f1_score.append(metric.get_f1_score(eval_metrics))
            accuracy.append(metric.get_accuracy(eval_metrics))
            diff_power.append(metric.sum_error(test_data[device_name],decoded_power[device_name])/4)
            diff_power_perc.append(metric.sum_error(test_data[device_name],decoded_power[device_name])*100/np.sum(test_data[device_name]))
        else:
            i=i+1
            devices_models[device_name].pop(house_id,None)
    print "Deleted " + str(i) + " of " + str(l+1) + " models for housenum "+str(house_num)+" due to low number of on-state guesses."
    eval_df=pd.DataFrame(np.array([devices_models[device_name].keys(),precision,recall,f1_score,accuracy,diff_power_perc]).T,columns=['house_id','precision','recall','f1_score','accuracy','diff_power_perc'])

    #Best
    best_scores={}
    best_scores['f1_score']=eval_df.sort('f1_score', ascending=0).head(10)
    best_scores['accuracy']=eval_df.sort('accuracy', ascending=0).head(10)
    best_scores['precision']=eval_df.sort('precision', ascending=0).head(10)
    best_scores['recall']=eval_df.sort('recall', ascending=0).head(10)
    best_scores['diff_power_perc']=eval_df.sort(['diff_power_perc'], ascending=1).head(10)
    bests[house_num]=best_scores

    #Average
    scores={}
    scores['precision']=eval_df['precision'].mean()
    scores['precision_std']=eval_df['precision'].std()
    scores['recall']=eval_df['recall'].mean()
    scores['recall_std']=eval_df['recall'].std()
    scores['f1_score']=eval_df['f1_score'].mean()
    scores['f1_score_std']=eval_df['f1_score'].std()
    scores['accuracy']=eval_df['accuracy'].mean()
    scores['accuracy_std']=eval_df['accuracy'].std()
    scores['diff_power_perc']=eval_df['diff_power_perc'].mean()
    scores['diff_power_perc_std']=eval_df['diff_power_perc'].std()
    averages[house_num]=scores

    #Worst
    worst_scores={}
    worst_scores['f1_score']=eval_df.sort('f1_score', ascending=1).head(10)
    worst_scores['accuracy']=eval_df.sort('accuracy', ascending=1).head(10)
    worst_scores['diff_power_perc']=eval_df.sort(['diff_power_perc'], ascending=0).head(10)
    worsts[house_num]=worst_scores
    
    avg_accuracy_sum=[]
    avg_diff_power_perc_sum=[]
    avg_diff_power_perc_std_sum=[]
    avg_f1_score_sum=[]

    best_f1_score_sum=[]
    best_diff_power_perc_sum=[]

    worst_f1_score_sum=[]
    worst_diff_power_perc_sum=[]

In [20]:
def get_summary_scores(bests,averages,worsts):
    avg_accuracy_sum=[]
    avg_diff_power_perc_sum=[]
    avg_diff_power_perc_std_sum=[]
    avg_f1_score_sum=[]
    best_diff_power_perc_sum=[]
    best_precision_sum=[]
    best_recall_sum=[]
    best_f1_score_sum=[]
    worst_diff_power_perc_sum=[]
    worst_f1_score_sum=[]
    for index in bests:
        avg_accuracy_sum.append(averages[index]['accuracy'])
        avg_diff_power_perc_sum.append(averages[index]['diff_power_perc'])
        avg_diff_power_perc_std_sum.append(averages[index]['diff_power_perc_std'])
        avg_f1_score_sum.append(averages[index]['f1_score'])

        best_diff_power_perc_sum.append(np.mean(bests[index]['diff_power_perc']['diff_power_perc']))
        best_f1_score_sum.append(np.mean(bests[index]['f1_score']['f1_score']))
        best_precision_sum.append(np.mean(bests[index]['f1_score']['precision']))
        best_recall_sum.append(np.mean(bests[index]['f1_score']['recall']))
        worst_diff_power_perc_sum.append(np.mean(worsts[index]['diff_power_perc']['diff_power_perc']))
        worst_f1_score_sum.append(np.mean(worsts[index]['f1_score']['f1_score']))

    print "AVERAGE"
    print "Accuracy: " + str(np.mean(avg_accuracy_sum))
    print "F1 Score: " + str(np.mean(avg_f1_score_sum))
    print "Power Percentage Error: " + str(np.mean(avg_diff_power_perc_sum))
    print "Power Percentage Error std: " + str(np.mean(avg_diff_power_perc_std_sum))
    print "BEST"
    print "Power Percentage Error: " + str(np.mean(best_diff_power_perc_sum))
    print "F1 Score: " + str(np.mean(best_f1_score_sum))
    print "Precision: " + str(np.mean(best_precision_sum))
    print "Recall: " + str(np.mean(best_recall_sum))
    print "WORST"
    print "Power Percentage Error: " + str(np.mean(worst_diff_power_perc_sum))
    print "F1 Score: " + str(np.mean(worst_f1_score_sum))

In [120]:
def score_models(device_type_name,devices_types,models_list):
    #Generate Test Data
    test_data={}
    #house_num=random.randint(0,9)
    bests={}
    averages={}
    worsts={}
    trace_num=0
    for house_num,device_instance in enumerate(devices_types[device_type_name].instances):
        test_data[device_type_name]=utils.trace_series_to_numpy_array(devices_types[device_type_name].instances[house_num].traces[trace_num].series)
        power_total=utils.trace_series_to_numpy_array(devices_types['use'].instances[house_num].traces[trace_num].series)
        power_total_minus_bottom=[]
        for i in power_total:
            power_total_minus_bottom.append(i-power_total.min())
            dataid=devices_types[device_type_name].instances[house_num].metadata['dataid']
        [bests,averages,worsts]=get_scores(bests,averages,worsts,device_type_name,test_data,power_total_minus_bottom,dataid,deepcopy(models_list))

    #Below removes houses with bad data
    for house_id in deepcopy(bests):
        if(np.count_nonzero(bests[house_id]['f1_score']['precision'])==0 or len(bests[house_id]['f1_score'])<1):
            bests.pop(house_id,None)
            averages.pop(house_id,None)
            worsts.pop(house_id,None)
    get_summary_scores(bests,averages,worsts)
    return [bests,averages,worsts]

In [ ]:


In [ ]:


In [ ]:


In [21]:
#first_10_top=[2787, 2365, 6836, 7769, 8079, 4922, 2575, 7531, 6910,7617]
#second_10_top=[8079,6836,2365,2787,4922,2575,6910,9930,5109,7617]
#set(first_10_top) & set(second_10_top)

def best_model_for_set(bests):
    best_models=[]
    for house in bests:
        best_id=bests[house]['diff_power_perc']['f1_score'].argmin()
        best_models.append(bests[house]['diff_power_perc']['house_id'][best_id])

    best_dict={}
    for val in best_models:
        best_dict[val]=best_models.count(val)

    #The house that is best in the most models is the best house, 
    #as long as it was best for more than one model.
    best_model_key_f1=list(reversed(sorted(best_dict.keys())))[0]
    return best_model_key_f1

In [ ]:


In [153]:
house_ids_top_10=[]
house_ids_dict={}
i=0
for val in bests:
    for val2 in bests[val]:
        i=i+1
        for val3 in bests[val][val2]['house_id']:
            house_ids_dict[val3]=0

for val in bests:
    for val2 in bests[val]:
        for val3 in bests[val][val2]['house_id']:
            house_ids_top_10.append(val3)
            house_ids_dict[val3]=house_ids_dict[val3]+1   
import operator           
x=house_ids_dict
sorted_x = sorted(x.iteritems(), key=operator.itemgetter(1))
print sorted_x
print i


[(4352.0, 1), (9729.0, 1), (7940.0, 1), (3649.0, 1), (9737.0, 1), (3723.0, 1), (9484.0, 1), (2829.0, 1), (2094.0, 1), (8342.0, 1), (3736.0, 1), (8218.0, 1), (7319.0, 1), (1790.0, 1), (8741.0, 1), (2470.0, 1), (5785.0, 1), (6826.0, 1), (9643.0, 1), (3394.0, 1), (1714.0, 1), (2864.0, 1), (2171.0, 1), (59.0, 1), (5568.0, 1), (1696.0, 1), (9160.0, 1), (2378.0, 1), (9938.0, 1), (5972.0, 1), (2638.0, 1), (4154.0, 1), (6412.0, 1), (5218.0, 1), (3044.0, 1), (9654.0, 1), (5949.0, 1), (2034.0, 1), (9555.0, 1), (6139.0, 1), (5677.0, 1), (7951.0, 2), (4505.0, 2), (2974.0, 2), (2606.0, 2), (4135.0, 2), (4526.0, 2), (434.0, 2), (2242.0, 2), (9915.0, 2), (8645.0, 2), (9356.0, 2), (3531.0, 2), (1617.0, 2), (5852.0, 2), (8188.0, 2), (9578.0, 2), (1994.0, 2), (936.0, 2), (9875.0, 2), (1086.0, 2), (5395.0, 3), (9121.0, 3), (5814.0, 3), (7863.0, 3), (3134.0, 3), (94.0, 3), (5109.0, 3), (8956.0, 3), (9934.0, 4), (871.0, 4), (5209.0, 5), (9701.0, 6), (6910.0, 6), (4298.0, 7), (2575.0, 9), (7617.0, 9), (9939.0, 9), (7769.0, 10), (4922.0, 11), (2787.0, 13), (6836.0, 14), (2365.0, 14), (8079.0, 14)]
24

In [182]:
def get_scores(bests,averages,worsts,device_type_name,test_data,power_total_minus_bottom,house_num,models_list):
    precision=[]
    recall=[]
    f1_score=[]
    diff_power_perc=[]
    diff_power=[]
    accuracy=[]
    house_ids=[]
    i=0
    for model_name in models_list[device_type_name]:
        type_models={}
        type_models[device_type_name]=models_list[device_type_name][model_name]
        model_fhmm,means_fhmm=fhmm.generate_FHMM_from_HMMs(type_models)
        [decoded_states, decoded_power]=fhmm.predict_with_FHMM(model_fhmm,means_fhmm,test_data,power_total_minus_bottom)
        truth_states=metric.guess_truth_from_power(test_data[device_type_name],2)
        eval_metrics=metric.get_positive_negative_stats(truth_states,decoded_states[device_type_name])
        precision_val=(metric.get_precision(eval_metrics['tp'],eval_metrics['fp']))
        recall_val=(metric.get_sensitivity(eval_metrics['tp'],eval_metrics['fn']))
        if(len([x for x in decoded_states[device_type_name] if x > 0])>1):
            house_ids.append(model_name)
            precision.append(precision_val)
            recall.append(recall_val)
            f1_score.append(metric.get_f1_score(eval_metrics))
            accuracy.append(metric.get_accuracy(eval_metrics))
            diff_power.append(metric.sum_error(test_data[device_type_name],decoded_power[device_type_name])/4)
            diff_power_perc.append(metric.sum_error(test_data[device_type_name],decoded_power[device_type_name])*100/np.sum(test_data[device_type_name]))  
        else:
            i=i+1
    print "Deleted " + str(i) + " of " + str(l+1) + " models for house "+str(house_num)+" due to low number of on-state guesses."
    eval_df=pd.DataFrame(np.array([house_ids,precision,recall,f1_score,accuracy,diff_power_perc]).T,
                         columns=['house_id','precision','recall','f1_score','accuracy','diff_power_perc'])
    #Best
    best_scores={}
    best_scores['f1_score']=eval_df.sort('f1_score', ascending=0).head(10)
    best_scores['accuracy']=eval_df.sort('accuracy', ascending=0).head(10)
    best_scores['precision']=eval_df.sort('precision', ascending=0).head(10)
    best_scores['recall']=eval_df.sort('recall', ascending=0).head(10)
    best_scores['diff_power_perc']=eval_df.sort(['diff_power_perc'], ascending=1).head(10)
    bests[house_num]=best_scores

    #Average
    scores={}
    scores['precision']=eval_df['precision'].mean()
    scores['precision_std']=eval_df['precision'].std()
    scores['recall']=eval_df['recall'].mean()
    scores['recall_std']=eval_df['recall'].std()
    scores['f1_score']=eval_df['f1_score'].mean()
    scores['f1_score_std']=eval_df['f1_score'].std()
    scores['accuracy']=eval_df['accuracy'].mean()
    scores['accuracy_std']=eval_df['accuracy'].std()
    scores['diff_power_perc']=eval_df['diff_power_perc'].mean()
    scores['diff_power_perc_std']=eval_df['diff_power_perc'].std()
    averages[house_num]=scores

    #Worst
    worst_scores={}
    worst_scores['f1_score']=eval_df.sort('f1_score', ascending=1).head(10)
    worst_scores['accuracy']=eval_df.sort('accuracy', ascending=1).head(10)
    worst_scores['diff_power_perc']=eval_df.sort(['diff_power_perc'], ascending=0).head(10)
    worsts[house_num]=worst_scores
    return [bests,averages,worsts]

In [ ]:
#Get different model sets for May, possibly some with different transition matrices