In [1]:
import utils as ut
from pint import UnitRegistry
import pandas as pd
import seaborn as sns
from tabulate import tabulate
from numpy import average as avg
import numpy as np
from matplotlib import pyplot as plt
from functools import partial
In [2]:
%load_ext autoreload
%autoreload 2
%matplotlib inline
In [3]:
sqdb = ut.sqlitedb('fcat_biomass')
In [6]:
wood_dens = ut.gData('138FWlGeW57MKdcz2UkWxtWV4o50SZO8sduB1R6JOFp8', 1297253755)
In [7]:
sathre4 = ut.gData('13UQtRfNBSJ81PXxbYSnB2LrjHePNcvhJhrsxRBjHpoY',546564075, hrow =3)
sathre4.to_sql('so4',sqdb['cx'], if_exists = 'replace')
In [8]:
HWu = pd.read_sql('''SELECT *
FROM so4
WHERE harvestslash = "X"
AND processingresidues = "X"
AND "post-usewoodproduct" = "X"
AND stumps is null''', sqdb['cx'],index_col = 'index')
HWo = pd.read_sql('''SELECT *
FROM so4
WHERE harvestslash is null
AND processingresidues = "X"
AND stumps is null
AND "post-usewoodproduct" = "X"''', sqdb['cx'], index_col = 'index')
In [9]:
#HWo
print tabulate(HWo[['reference','df']], headers = ['index','reference','displacement factor'],tablefmt="pipe")
In [10]:
#HWu
print tabulate(HWu[['reference','df']], headers = ['index','reference','displacement factor'],tablefmt="pipe")
In [11]:
constants = {'me' : {'value':0.5,
'desc': 'Mill Efficiency'},
'DFu' : {'value': np.average(HWu.df),
'desc': 'Displacement factor with logging residual utilization',
'source': '''\cite{Sathre2010}'''},
'DFo' : {'value': np.average(HWo.df),
'desc': 'Displacement factor without logging residual utilization'},
'wDens' : {'value': sum(wood_dens.pct/100 * wood_dens.density_lbscuft),
'units' : 'lbs/cuft',
'desc': 'average harvested wood density weighted by species harvested',
'source': '\cite{Mciver2012}'}
}
In [32]:
constants['wDens']['value']
Out[32]:
In [18]:
tpoData = ut.gData('1GDdquzrCoq2cxVN2fbCpP4gwi2yrMnONNrWbfhZKZu4', 872275354, hrow=1)
tpoData.to_sql('tpo', sqdb['cx'], if_exists = 'replace')
print tabulate(tpoData, headers = ['Ownership','Roundwood Products','Logging Residues', 'Year'],tablefmt="pipe")
In [34]:
pd.read_csv('boe_hh.csv').to_sql('boe',sqdb['cx'], if_exists = 'replace')
In [35]:
pd.read_sql('select * from boe', sqdb['cx'])
Out[35]:
Figure 2 from morgan and mciver presents total roundwood harvest from 1947 through 2012 in MMBF. To convert MMBF to MCF we use a sawlog conversion of 5.44. This is an approximation as the actual sawlog conversion varies with the log size on average over time has changed.
In [19]:
mm_histHarvest = ut.gData('13UQtRfNBSJ81PXxbYSnB2LrjHePNcvhJhrsxRBjHpoY', 2081880100).fillna(value=0)
In [20]:
mm_histHarvest.to_sql('mm_hist', sqdb['cx'], if_exists = 'replace')
In [21]:
mm_histHarvest
Out[21]:
To apply the apropriate DF for harvested wood we need to know what fraction of the logging residues were utilized as bioenergy feedstock. McIver and Morgan (Table 6) reports bioenergy consumption from 2000 forward. For years previous, we use the average bioenergy consumption from 2000 -- 2012.
In [12]:
bioEnergy = ut.gData('138FWlGeW57MKdcz2UkWxtWV4o50SZO8sduB1R6JOFp8', 529610043)
bioEnergy.set_index('producttype').transpose().to_sql('mciver_bio', sqdb['cx'], if_exists = 'replace')
bio_pct = pd.read_sql('select "index" as year,"Bioenergy"/100 as biopct from mciver_bio where "Bioenergy" is not null', sqdb['cx'])
bio_dict = bio_pct.set_index('year').to_dict('index')
print tabulate(bio_pct, headers = ['year', 'bioenergy % of harvest'],tablefmt="pipe")
In [13]:
def bioPct(year):
# if year < 1980:
# return 0
if year in bio_dict.keys():
return bio_dict[year]['biopct']
else:
return np.average(bio_pct.biopct)
In [22]:
def WPu (rw_harvest, lr, year, mill_efficiency = constants['me']['value'], wdens = constants['wDens']['value'], df = constants['DFu']['value']):
'''
Calculates the emissions reduction resulting from harvested wood with with utilization of loggin residuals for bioenergy
'''
# establish the aproporiate bioenergy consumption, if no data on bioenergy consumption exists, use average from 2000-2012
if year in bio_dict.keys():
bioe_pct = bio_dict[year]['biopct']
else:
bioe_pct = np.average(bio_pct.biopct)
#Calculate total volume used in bioenergy
bioevol = bioe_pct * rw_harvest
#Establish utilization ratio for bioenergy
lrUsed = bioevol/lr
#Calcuate roundwood harvest volume fromwhich loggin residues were utilized
HWu = lrUsed * rw_harvest
#Calculate volume of final wood product produced using mill efficiency. S&O use volume of wood product not sawlogs for DF
WPu = HWu * mill_efficiency * wdens * 1000000 / 2204.62 * 0.5 * df
#Per comment from Roger Sathre,needs to be reduced by 50% before applying the DF as the DF is meant for tC not tWood..
#This is in MT
return WPu
In [23]:
def WPo (rw_harvest, lr, year, mill_efficiency = constants['me']['value'], wdens = constants['wDens']['value'], df = constants['DFo']['value']):
'''
Calculates the emissions reduction resulting from harvested wood without utilization of
logging residuals for bioenergy
'''
# establish the aproporiate bioenergy consumption lever for a given year, if no data on bioenergy consumption exists, use average from 2000-2012
if year in bio_dict.keys():
bioe_pct = bio_dict[year]['biopct']
else:
bioe_pct = np.average(bio_pct.biopct)
#Calculate total volume used in bioenergy
bioevol = bioe_pct * rw_harvest
#Establish utilization ratio for bioenergy
lrUsed = bioevol/lr
#Calcuate roundwood harvest volume fromwhich loggin residues were utilized
HWo = (1-lrUsed) * rw_harvest
#Calculate volume of final wood product produced using mill efficiency. S&O use volume of wood product not sawlogs for DF
WPo = HWo * mill_efficiency * wdens * 1000000 / 2204.62 * 0.5 * df
#This is in MT
return WPo
In [24]:
erWPu = []
for row in tpoData.index:
rw,lr,yr = tpoData.iloc[row][['roundwoodproducts','loggingresidues', 'year']].tolist()
erWPu.append(WPu(rw,lr,yr))
tpoData['erWPu'] = erWPu
In [27]:
erWPo = []
for row in tpoData.index:
rw,lr,yr = tpoData.iloc[row][['roundwoodproducts','loggingresidues', 'year']].tolist()
erWPo.append(WPo(rw,lr,yr))
tpoData['erWPo'] = erWPo
In [31]:
tpoData['erTotal'] = tpoData.erWPo+tpoData.erWPu
tpoData.to_sql('tpo_emreduc', sqdb['cx'], if_exists='replace')
tpoData['bioe_pct'] = tpoData.year.apply(bioPct)
tpoData['bioe_t'] = tpoData.bioe_pct * tpoData.loggingresidues * 1e6* constants['wDens']['value']/2204.62
In [32]:
erWPo = []
for row in mm_histHarvest.index:
r = mm_histHarvest.iloc[row]
yr = r['year'] ## year
rw = (r.state+r.private+r.tribal+r.blm+r.nat_forest)/5.44
qry = 'select avg(loggingresidues/roundwoodproducts) lr from tpo where year = {}'.format(yr)
if yr in tpoData.year.tolist():
lr = pd.read_sql(qry, sqdb['cx'])*rw
else:
lr = pd.read_sql('select avg(loggingresidues/roundwoodproducts) lr from tpo', sqdb['cx'])*rw
erWPo.append(WPo(rw,lr,yr).lr[0])
mm_histHarvest['erWPo'] = erWPo
In [33]:
erWPu = []
lrVect = []
tHarv = []
for row in mm_histHarvest.index:
r = mm_histHarvest.iloc[row]
yr = r['year'] ## year
rw = (r.state+r.private+r.tribal+r.blm+r.nat_forest)/5.44
qry = 'select avg(loggingresidues/roundwoodproducts) lr from tpo where year = {}'.format(yr)
if yr in tpoData.year.tolist():
lr = pd.read_sql(qry, sqdb['cx'])*rw
else:
lr = pd.read_sql('select avg(loggingresidues/roundwoodproducts) lr from tpo', sqdb['cx'])*rw
lrVect.append(lr.lr[0])
tHarv.append(rw)
erWPu.append(WPu(rw,lr,yr).lr[0])
mm_histHarvest['erWPu'] = erWPu
mm_histHarvest['loggingresidues'] = lrVect
mm_histHarvest['totalharvest'] = tHarv
In [34]:
mm_histHarvest['erTotal'] = mm_histHarvest.erWPo+mm_histHarvest.erWPu
mm_histHarvest.to_sql('mm_emreduc', sqdb['cx'], if_exists='replace')
mm_histHarvest['bioe_pct'] = mm_histHarvest.year.apply(bioPct)
mm_histHarvest['bioe_t'] = mm_histHarvest.bioe_pct * mm_histHarvest.loggingresidues * 1e6* constants['wDens']['value']/2204.62
mm_histHarvest.to_sql()
In [35]:
mm_histHarvest
Out[35]:
In [36]:
sns.set_style("whitegrid")
fig2, ax2 = plt.subplots(figsize=(12, 10))
ax2 = sns.barplot(x ='year', y='erTotal', data=mm_histHarvest.sort_values('year'))
ax2.set_ylabel('Emissions reduction (MT CO2e)')
ax2.set_title('Emissions reductions resulting \nfrom roundwood harvest in CA')
ax2.set_xticklabels(ax2.get_xticklabels(),rotation=90)
Out[36]:
In [37]:
[fig2.savefig('graphics/ann_hh_em_reduc.{}'.format(i)) for i in ['pdf','png']]
Out[37]:
In [38]:
sns.set_style("whitegrid")
fig, ax = plt.subplots()
ax = sns.barplot(x ='year', y='erTotal', hue="ownership", data=tpoData.sort_values('year'))
ax.set_ylabel('Emissions reduction (MT CO2e)')
ax.set_title('Emissions reductions resulting from roundwood harvest in CA')
Out[38]:
In [39]:
[fig.savefig('graphics/harv_em_reductions.{}'.format(i)) for i in ['pdf','png']]
Out[39]:
Total emissions reductions from roundwood harvesting in CA, 2012
In [40]:
pd.read_sql('select sum("erTotal") from tpo_emreduc where year = "2012"', sqdb['cx'])
Out[40]:
From logging residuals not used in bioenergy, emmisions are produced from combustion of the residual material or from decomposition of the material over time. To calculate the ratio of burned to decompsed logging residues I begin with the CARB estimate of PM2.5 produced from forest management:
In [41]:
tName = 'cpe_allyears'
sqdb['crs'].executescript('drop table if exists {0};'.format(tName))
for y in [2000, 2005, 2010, 2012, 2015]:
url = 'http://www.arb.ca.gov/app/emsinv/2013/emsbyeic.csv?F_YR={0}&F_DIV=0&F_SEASON=A&SP=2013&SPN=2013_Almanac&F_AREA=CA'
df = pd.read_csv(url.format(y))
df.to_sql(tName, sqdb['cx'], if_exists = 'append')
In [42]:
pmAnn = pd.read_sql('''
select year,
eicsoun,
"PM2_5"*365 an_pm25_av
from cpe_allyears
where eicsoun = 'FOREST MANAGEMENT';
''', sqdb['cx'])
pmAnn
Out[42]:
To estimate total biomass from PM2.5 I assume 90% consumption of biomass in piles and use the relationship of pile tonnage to PM emissions calculated using the Piled Fuels Biomass and Emissions Calculator provided by the Washington State Department of Natural resources. This calculator is based on the Consume fire behavior model published by the US Forest Service.
In [43]:
pfbec = pd.read_csv('fera_pile_cemissions.csv', header=1)
ward = ut.gData('13UQtRfNBSJ81PXxbYSnB2LrjHePNcvhJhrsxRBjHpoY', 475419971)
def sp2bio(pm, species = 'PM2.5 (tons)'):
return pm * (pfbec[species]/pfbec['Pile Biomass (tons)'])
def bioPm(pm):
return pm * (pfbec['Pile Biomass (tons)']/pfbec['PM2.5 (tons)'])
co2t = lambda x: sp2bio(x,'CO2 (tons)')
ch4t = lambda x: sp2bio(x,'CH4 (tons)')
pmAnn['biomass_t']=pmAnn.an_pm25_av.apply(bioPm)
pmAnn['co2_t'] = pmAnn.biomass_t.apply(co2t)
pmAnn['ch4_t'] = pmAnn.biomass_t.apply(ch4t)
pmAnn['ch4_co2e'] = pmAnn.ch4_t * 56
pmAnn['bc_co2e']= pmAnn.an_pm25_av.apply(ut.pm2bcgwpPiles)
pmAnn['t_co2e']= pmAnn.co2_t + pmAnn.ch4_co2e + pmAnn.bc_co2e
print tabulate(pmAnn[['YEAR','EICSOUN','co2_t','ch4_co2e','bc_co2e','t_co2e']], headers = ['Year','Emissions source','CO2 (t)', 'CH4 (tCO2e)', 'BC (tCO2e)', 'Pile Burn Total (tCO2e)'],tablefmt="pipe")
To provide a full picture of the emissions from residual material produced from commercial timber harvesting in California, decomposition of unutilized logging residuals left on-site that are not burned must be accounted for. To establish the fraction of logging residue that is left to decompose, residues burned and used in bioenergy are subtracted from the total reported by the TPO:
To calculate the GHG emissions from decomposition of piles we use the following equation:
In [44]:
annLrAvg = pd.read_sql('''with ann as (select sum(loggingresidues) lr
from tpo
group by year)
select avg(lr) foo
from ann;''', sqdb['cx'])['foo'][0]
pctLR_bio = (np.average(pmAnn.biomass_t)/1e6)/annLrAvg
In [45]:
annLrAvg
Out[45]:
In [46]:
pmAnn
Out[46]:
In [47]:
lr_t = 1e6*tpoData.loggingresidues*constants['wDens']['value']/2204.62
tpoData['unused_lr'] = 1e6*(lr_t-(pctLR_bio*lr_t))
tpoData['burned_lr'] = 1e6*lr_t*(np.average(pmAnn.biomass_t)/(annLrAvg*1e6))
tpoData['unburned_lr'] = (lr_t*1e6) - tpoData.bioe_t - tpoData.burned_lr
tpoData['unburned_lr_co2e'] = tpoData.unburned_lr.apply(ut.co2eDecomp)
tpoData
Out[47]:
Data from TPO does not account for forest management activities that do not result in commercial products (timber sales, biomass sales). To estimate the amount of residual material produced from non commercial management activities we use data from the US Forest Service (FACTS) and from CalFires timber harvest plan data.
Data from TPO does not account for forest management activities that do not result in commercial products (timber sales, biomass sales). We use a range of 10-35 BDT/acre to convert acres reported in FACTS to volume.
In [ ]:
pd.read_excel('lf/FACTS_Tabular_092115.xlsx', sheetname = 'CategoryCrosswalk').to_sql('facts_cat', sqdb['cx'], if_exists = 'replace')
pd.read_csv('pd/facts_notimber.csv').to_sql('facts_notimber', sqdb['cx'], if_exists='replace')
The USFS reports Hazardous Fuels Treatment (HFT) activities as well as Timber Sales (TS) derived from the FACTS database. We use these two datasets to estimate the number of acres treated that did not produce commercial material (sawlogs or biomass) and where burning was not used. The first step is to elimina all treatments in the HFT dataset that included timber sales. We accomplish this by eliminating all rows in the HFT dataset that have identical FACTS_ID fields in the TS dataset. We further filter the HFT dataset by removing any planned but not executed treatements (nbr_units1 >0 below -- nbr_units1 references NBR_UNITS_ACCOMPLISHED in the USFS dataset, see metadata for HFT here), and use text matching in the 'ACTIVITY' and 'METHOD' fields to remove any rows that contain reference to 'burning' or 'fire'. Finally, we remove all rows that that reference 'Biomass' in the method category as it is assumed that this means material was removed for bioenergy.
In [ ]:
usfs_acres = pd.read_sql('''select
sum(nbr_units1) acres,
method,
strftime('%Y',date_compl) year,
cat."ACTIVITY" activity,
cat."TENTATIVE_CATEGORY" r5_cat
from facts_notimber n
join facts_cat cat
on (n.activity = cat."ACTIVITY")
where date_compl is not null
and nbr_units1 > 0
and cat."TENTATIVE_CATEGORY" != 'Burning'
and cat."ACTIVITY" not like '%ire%'
and method not like '%Burn%'
and method != 'Biomass'
group by cat."ACTIVITY",
year,
method,
cat."TENTATIVE_CATEGORY"
order by year;''', con = sqdb['cx'])
In [ ]:
def sumBDT(ac, maxbdt = 35, minbdt = 10):
av = (maxbdt + minbdt)/2
stdev = (float(maxbdt) - float(minbdt))/4
d_frac = (ac-np.floor(ac))*np.random.normal(av, stdev, 1).clip(min=0)[0]
t_bdt = np.sum(np.random.normal(av,stdev,np.floor(ac)).clip(min=0))
return d_frac+t_bdt
In [ ]:
usfs_acres['bdt'] = usfs_acres['acres'].apply(sumBDT)
usfs_an_bdt = usfs_acres.groupby(['year']).sum()
Average wood density weighted by harvested species percent. Derived from McIver and Morgan, Table 4
In [ ]:
wood_dens = ut.gData('138FWlGeW57MKdcz2UkWxtWV4o50SZO8sduB1R6JOFp8', 1297253755)
wavg_dens =sum(wood_dens.pct/100 * wood_dens.density_lbscuft)
In [ ]:
cat_codes = {'nf_ncmr': 'Unburned, non-commercial management residuals from National Forest lands',
'nf_lr': 'Logging residuals generated from timber sales on National Forest lands',
'opriv_lr': 'Logging residuals generated from timber sales on non-industrial private forest lands',
'fi_lr': 'Logging residuals generated from timber sales on industrial private lands',
'opub_lr': 'Logging residuals generated from timber sales on industrial private lands'}
In [ ]:
usfs_an_bdt['cuft']= usfs_an_bdt.bdt *wavg_dens
resid_stats=pd.DataFrame((usfs_an_bdt.iloc[6:,2]/1000000).describe())
resid_stats.columns = ['nf_ncmr']
resid_stats['nf_lr']=tpoData[tpoData.ownership.str.contains('National Forest')]['loggingresidues'].describe()
resid_stats['opriv_lr']=tpoData[tpoData.ownership.str.contains('Other Private')]['loggingresidues'].describe()
resid_stats['fi_lr']=tpoData[tpoData.ownership.str.contains('Forest Industry')]['loggingresidues'].describe()
resid_stats['opub_lr']=tpoData[tpoData.ownership.str.contains('Other Public')]['loggingresidues'].describe()
resid_stats
In [ ]:
print tabulate(resid_stats, headers = resid_stats.columns.tolist(), tablefmt ='pipe')
In [ ]:
ureg = UnitRegistry()
ureg.define('cubic foot = cubic_centimeter/ 3.53147e-5 = cubic_foot' )
ureg.define('million cubic foot = cubic_foot*1000000 = MMCF' )
ureg.define('board foot sawlog = cubic_foot / 5.44 = BF_saw')
ureg.define('board foot veneer = cubic_foot / 5.0 = BF_vo')
ureg.define('board foot bioenergy = cubic_foot / 1.0 = BF_bio')
ureg.define('bone-dry unit = cubic_foot * 96 = BDU')