In [136]:
    
import xlwings
    
In [137]:
    
xlwings.__version__
    
    Out[137]:
In [116]:
    
import os
import pandas as pd
import scipy.stats as stats
import scipy.optimize as opt
import numpy as np
    
In [117]:
    
raw_data_path = 'C:\\Users\\EEU227\\Documents\\Projects\\Inflation\\SPF\\data'
out_data = 'C:\\Users\\EEU227\\Documents\\Projects\\Inflation\\SPF\\CleanData\\'
    
def fit_normal_to_hist(h):
    if not all(h==0):
        bins =np.array([-2.0,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,5.0])
        orig_hist = np.array(h).astype(float)
        norm_hist = orig_hist/float(np.sum(orig_hist))
        mid_points = (bins[1:] + bins[:-1])/2
        popt,pcov = opt.curve_fit(lambda x,mu,sig: stats.norm.pdf(x,mu,sig), mid_points,norm_hist)
    else:
        popt = [float('nan'),float('nan')]
    return popt[1]
def ZL_std(h):
    intervals =[[-2.0,-1.1],[-1.0,-0.6],[-0.5,-0.1],[0.0,0.4],[0.5,0.9],[1.0,1.4],
           [1.5,1.9],[2.0,2.4],[2.5,2.9],[3.0,3.4],[3.5,3.9],[4.0,5.0]]
    if not all(h==0):
        sum_i1 = 0
        sum_i2 = 0
        for i in range(1,len(h)):
            p = h[i]/100
            v1,v2 = intervals[i]
            sum_i1 += p(v23 - v13)/(3(v2-v1))
            sum_i2 += p(v22 - v12)/(2(v2-v1))
            zl_std = np.sqrt(sum_i1 - sum_i2**2)
    else:
            zl_std = float('nan')
    return zl_std
In [118]:
    
def Hist_std(h):
    if not all(h==0):
        bins =np.array([-2.0,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,5.0])  
        orig_hist = np.array(h).astype(float)
        norm_hist = orig_hist/float(np.sum(orig_hist))
        mid_points = (bins[1:] + bins[:-1])/2
        MeanCrude = np.dot(norm_hist,mid_points)
        VarCrude  = np.dot(norm_hist,(mid_points-MeanCrude)**2)
        bin_widths = np.diff(bins)
        BinWidth = bin_widths.mean()
        VarSheppard = VarCrude - (BinWidth**2)/12       #variance, Sheppard's correction
        if VarSheppard>0:
            hist_std = np.sqrt(VarSheppard)
        else:
            hist_std = np.sqrt(VarCrude)
    else:
        hist_std = float('nan')
    return hist_std
    
In [119]:
    
years = [year for year in range(2010,2017)]
    
In [120]:
    
years
    
    Out[120]:
In [124]:
    
#writer = pd.ExcelWriter(out_data + 'SPF-aggregate-moments.xlsx')
#years = [2015]
years = [year for year in range(2010,2017)]
quarters = [1,2,3,4]
months = ['Dec','Mar','Jun','Sep']
dfList = []
    
In [125]:
    
for year in years:
    for q in quarters:
        f = str(year) + 'Q' + str(q)
        fname = f + '.csv'
        if os.path.isfile(raw_data_path + '\\' + fname):
            raw_df = pd.read_csv(raw_data_path + '\\' + fname,header = 1)
            # find the row where the growth expectations start
            dum = raw_df[raw_df['TARGET_PERIOD'] == 'GROWTH EXPECTATIONS; YEAR-ON-YEAR CHANGE IN REAL GDP'].index[0]
            mask_columns = ~raw_df.columns.str.contains('Unnamed')
            df = raw_df.iloc[0:dum-1,mask_columns]    
            df['source'] = str(year) + '-Q' + str(q)
            df = df.rename(columns={'TARGET_PERIOD':'target','FCT_SOURCE':'id','POINT':'point',
                                   'TN1_0':'[-2.0,-1.1]','FN1_0TN0_6':'[-1.0,-0.6]',
                                   'FN0_5TN0_1':'[-0.5,-0.1]','F0_0T0_4':'[0.0,0.4]',
                                   'F0_5T0_9':'[0.5,0.9]','F1_0T1_4':'[1.0,1.4]',
                                   'F1_5T1_9':'[1.5,1.9]','F2_0T2_4':'[2.0,2.4]',
                                   'F2_5T2_9':'[2.5,2.9]','F3_0T3_4':'[3.0,3.4]',
                                   'F3_5T3_9':'[3.5,3.9]','F4_0':'[4.0,5.0]'})
            df = df[['source','target','id','point','[-2.0,-1.1]','[-1.0,-0.6]','[-0.5,-0.1]',
                     '[0.0,0.4]','[0.5,0.9]','[1.0,1.4]','[1.5,1.9]','[2.0,2.4]','[2.5,2.9]',
                     '[3.0,3.4]','[3.5,3.9]', '[4.0,5.0]']]
            # remove rows where point is missing
            maskNaN = df.point.isnull()
            df = df[~maskNaN]                    
            df.fillna(0,inplace = True)
            for colname in df.columns[3:]:
                df[colname] = df[colname].astype('float')
            mask = df.columns.str.contains(',')
#            df['GA_std'] = df.iloc[:,mask].apply(fit_normal_to_hist,axis=1)
#            df['ZL_std'] = df.iloc[:,mask].apply(ZL_std,axis=1)
            df['Hist_std'] = df.iloc[:,mask].apply(Hist_std,axis=1)
            df = df[['source','target','id','point','Hist_std']]
            df['id'] = df['id'].astype('int')
            df['point'] = df['point'].astype('float32')
            #df.to_excel(writer,f,index=False)             
            # create time series representation of the targets
            mask_t0 = str(year)
            mask_t1 = str(year+1)
            mask_t2 = str(year+2)
            if q<3:
                mask_t4or5 = str(year+4)
            else:
                mask_t4or5 = str(year+5)
            if q==1:
                mask_Rt1 = str(year) + months[q-1]
                mask_Rt2 = str(year+1) + months[q-1]
            else:
                mask_Rt1 = str(year+1) + months[q-1]
                mask_Rt2 = str(year+2) + months[q-1]
                #
            df.loc[df.loc[:,'target'] == mask_t0,'targetNew'] = 't'
            df.loc[df.loc[:,'target'] == mask_t1,'targetNew'] = 't+1'
            df.loc[df.loc[:,'target'] == mask_t2,'targetNew'] = 't+2'
            df.loc[df.loc[:,'target'] == mask_t4or5,'targetNew'] = 't+4'
            df.loc[df.loc[:,'target'] == mask_Rt1,'targetNew'] = 'roll 1'
            df.loc[df.loc[:,'target'] == mask_Rt2,'targetNew'] = 'roll 2'
            
            dfList.append(df) 
#writer.save()
    
In [126]:
    
# Define the aggregation calculations
aggregations = {
    'point': { # 
        'std': 'std',  # 
    },
    'Hist_std': {
        'mean': 'mean',
    }
}
    
In [127]:
    
dfAll = pd.concat(dfList,axis=0)
    
In [128]:
    
dfAll.head()
    
    Out[128]:
In [129]:
    
dfAll.tail()
    
    Out[129]:
In [130]:
    
grouped = dfAll.groupby(['source','targetNew']).agg(aggregations)
    
In [131]:
    
grouped.head()
    
    Out[131]:
In [132]:
    
grouped['AggVar']= grouped['point']['std'] + grouped['Hist_std']['mean']
    
In [133]:
    
grouped.head(10)
    
    Out[133]:
In [134]:
    
dfAll.loc[:,'targetNew'].unique()
    
    Out[134]:
In [135]:
    
grouped.xs('t+4',level=1).sortlevel(0,axis=1)
    
    Out[135]:
In [ ]:
    
for h in dfAll.loc[:,'targetNew'].unique():
    df_h = grouped.xs(h,level=1).sortlevel(0,axis=1)