In [322]:
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
In [323]:
# Libraries To Use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
In [324]:
%matplotlib inline
In [325]:
%load_ext line_profiler
In [326]:
datapath = 'C:/Users/LZJF_02/Desktop/myjob/FOFNetWorth/'
In [327]:
# Daily networth data
filenameleigen = 'leigen.csv'
filenameqilin = 'qilin.csv'
filenameyinnuo = 'yinnuo.csv'
filenamelanse = 'lanse.csv'
filenameminghong = 'minghong.csv'
In [328]:
# Weekly networth data
filenameaiye = 'aiye.csv'
filenameqianshi = 'qianshi.csv'
In [382]:
from os import walk
f = []
for (dirpath, dirnames, filenames) in walk(datapath ):
f.extend(filenames)
In [383]:
f
Out[383]:
In [331]:
# 雷根
dataleigen = pd.read_csv(datapath+filenameleigen,infer_datetime_format=True,parse_dates=[0],index_col=0)
dataleigen.columns =['time','leigen_NW']
dataleigen.reset_index( drop=True,inplace =True)
dataleigen.set_index('time',inplace=True)
dataleigen
Out[331]:
In [332]:
# 千石
dataqianshi = pd.read_csv(datapath+filenameqianshi,infer_datetime_format=True,parse_dates=[0],index_col=0)
#dataqianshi.columns =['time','Lei_NW']
#dataqianshi.reset_index( drop=True,inplace =True)
#dataqianshi
dataqianshi.drop('return',axis=1,inplace =True)
dataqianshi
Out[332]:
In [333]:
# 启林
dataqilin = pd.read_csv(datapath+filenameqilin,infer_datetime_format=True,parse_dates=[0],index_col=0)
#dataqianshi.columns =['time','Lei_NW']
#dataqianshi.reset_index( drop=True,inplace =True)
#dataqianshi
dataqilin
Out[333]:
In [334]:
# 因诺
datayinnuo = pd.read_csv(datapath+filenameyinnuo,infer_datetime_format=True,parse_dates=[0],index_col=0)
#dataqianshi.columns =['time','Lei_NW']
#dataqianshi.reset_index( drop=True,inplace =True)
#dataqianshi
datayinnuo
Out[334]:
In [335]:
# 蓝色天际一号
datalanse = pd.read_csv(datapath+filenamelanse,infer_datetime_format=True,parse_dates=[0],index_col=0)
#dataqianshi.columns =['time','Lei_NW']
#dataqianshi.reset_index( drop=True,inplace =True)
#dataqianshi
datalanse.dropna(axis=1,inplace =True)
datalanse
Out[335]:
In [336]:
datalanse.merge(datayinnuo,left_index=True, right_index=True,how ='inner')
Out[336]:
In [337]:
# 明泓多策略对冲一号
dataminghong = pd.read_csv(datapath+filenameminghong,infer_datetime_format=True,parse_dates=[0],index_col=0)
dataminghong.dropna(axis=1,inplace= True)
dataminghong
Out[337]:
In [338]:
dataList = [dataleigen,datayinnuo,datalanse,dataqilin,dataminghong]
In [339]:
mergedNW = reduce(lambda x,y: x.merge(y,left_index=True, right_index=True,how ='inner'),dataList)
In [340]:
# daily return
returndf = mergedNW.pct_change().iloc[1:]
returndf
Out[340]:
In [341]:
# annualized downsideRisk
downsideRisk = returndf[returndf < returndf.mean()].std(skipna = True) * np.sqrt(252)
downsideRisk
Out[341]:
In [342]:
mergedNW = mergedNW/mergedNW.iloc[0]
mergedNW
Out[342]:
In [386]:
# get the last day of each month
endOfMonthList = mergedNW .iloc[mergedNW .resample('M').size().cumsum().sub(1)].index
In [388]:
# Monthly return
mergedNW.loc[endOfMonthList].pct_change().mean()
Out[388]:
In [394]:
# get the last day of each week
endOfWeekList = set(mergedNW .iloc[mergedNW .resample('W').size().cumsum().sub(1)].index)
# Weekly return
mergedNW.loc[endOfWeekList].pct_change().mean()
Out[394]:
In [343]:
# annualized Return
annualizedReturn = (1+returndf.mean())**252 - 1
annualizedReturn
Out[343]:
In [344]:
# annualized Volatility
annualizedVol = (returndf.std() * np.sqrt(252))
annualizedVol
Out[344]:
In [345]:
# Shape Ratio
shapeRatio = annualizedReturn/ annualizedVol
shapeRatio
Out[345]:
In [346]:
# Sortino Ratio
sortinoRatio = annualizedReturn/downsideRisk
sortinoRatio
Out[346]:
In [367]:
# Max draw down
maxdd = mergedNW.copy()
maxdd.iloc[0] = 0
for date in mergedNW.index[1:]:
maxdd.loc[date] = 1-mergedNW.loc[date]/mergedNW.loc[:date].max()
maxddInfo = pd.concat([maxdd.max(),maxdd.idxmax()],axis=1)
maxddInfo.columns = ['Max_drawdown','Time']
maxddInfo
Out[367]:
In [348]:
# calmar Ratio
calmarRatio = annualizedReturn/ maxddInfo['Max_drawdown']
calmarRatio
Out[348]:
In [379]:
# information integration
infodf = pd.concat([annualizedReturn, annualizedVol,downsideRisk,maxddInfo['Max_drawdown'],shapeRatio,sortinoRatio,calmarRatio], axis=1)
infodf.columns = ['Return','Volatility','Downside_Risk','Max_DD','Shape_Ratio','Sortino_Ratio','Calmar_Ratio']
rankdict = dict(zip(infodf.columns,[1,-1,-1,-1,1,1,1]))
#infodf = np.round(infodf)
adjInfodf = infodf.apply(lambda x: x * rankdict[x.name],axis=0)
#infodf*rankdict.rank(axis=0)
adjInfodf.rank()
Out[379]:
In [378]:
infodf
Out[378]:
In [381]:
# define risk preference vector
riskPref = np.array([0.2,0.1,0.1,0.2,0.1,0.1,0.2])
(adjInfodf.rank()*riskPref).sum(axis=1).rank()
Out[381]:
In [350]:
# correlation matrix
np.round(mergedNW.corr(method='pearson'),4)
Out[350]:
In [351]:
np.round(mergedNW.corr(method='spearman'),4)
Out[351]: