In [2]:
import utils as ut
from pint import UnitRegistry
import pandas as pd
from ggplot import *
import seaborn as sns
from tabulate import tabulate
from numpy import average as avg
import numpy as np
In [3]:
sqdb = ut.sqlitedb('fcat_biomass')
In [4]:
def distFromRange(total, maxr = 32, minr = 2):
av = (maxr + minr)/2
stdev = (float(maxr) - float(minr))/4
d_frac = (total-np.floor(total))*np.random.normal(av, stdev, 1).clip(min=0)[0]
t_bdt = np.random.normal(av,stdev,np.floor(total)).clip(min=0)
return np.append(t_bdt, d_frac)
In [5]:
def sumFromDist(total, maxr = 0.32, minr = 0.02):
av = (maxr + minr)/2
stdev = (float(maxr) - float(minr))/4
d_frac = (total-np.floor(total))*np.random.normal(av, stdev, 1).clip(min=0)[0]
t_bdt = np.sum(np.random.normal(av,stdev,np.floor(total)).clip(min=0))
return (d_frac+t_bdt)
The CARB criteria pollutant emissions inventory reports Particualte Matter (PM 2.5) emissions from anthropogenic buring of forest residuals. The following estimates elemental carbon (Black Carbon) based on empirically derived relationships between PM2.5 and EC from.
In [33]:
ward = ut.gData('13UQtRfNBSJ81PXxbYSnB2LrjHePNcvhJhrsxRBjHpoY', 475419971)
#Units are ratio of EC to PM produced
wardDF = ward[['source','pct_sm','pct_f','tc_f_est','tc_f_cv','tc_s_est','tc_s_cv','ec_f_est','ec_f_cv', 'ec_s_est','ec_s_cv','ecton-1_h_pm','ecton-1_l_pm']].transpose()
wardDF.columns = wardDF.iloc[0]
eFact = wardDF.to_dict()
d = dict(zip([eFact[i]['source'] for i in eFact.keys()],['ALL VEGETATION','WILDLAND FIRE USE (WFU)','FOREST MANAGEMENT']))
for k in eFact.keys():
eFact[k]['arblink'] = d[k]
pd.DataFrame.from_dict(eFact).transpose().to_sql('bc_pm_ratio', sqdb['cx'], if_exists= 'replace')
#print tabulate(pd.DataFrame.from_dict(eFact).transpose(), headers = ['CARB CPE Cat.','BC/t PM 2.5 (high)','BC/t PM 2.5 (low)', 'Source'], tablefmt="pipe")
pd.DataFrame.from_dict(eFact).transpose()
Out[33]:
In [34]:
pd.DataFrame.from_dict(eFact)
Out[34]:
In [35]:
ward
Out[35]:
In [36]:
ec_rat_table = pd.DataFrame([ward.source, ward.tc_f_est*ward.ec_f_est, ward.tc_f_cv, ward.ec_f_cv,ward.tc_s_est*ward.ec_s_est, ward.tc_s_cv, ward.ec_s_cv]).transpose()
ec_rat_table.to_sql('ec_ratios', sqdb['cx'], if_exists = 'replace')
In [37]:
ec_rat_table
Out[37]:
In [38]:
print tabulate(ec_rat_table, headers = ['Source',
'BC/t PM 2.5 (F, est.)',
'TC/t PM 2.5 (F, CV)',
'BC/t TC (F, CV)',
'BC/t PM 2.5 (S, est.)',
'TC/t PM 2.5 (S, CV)',
'BC/t TC (S, CV)'], tablefmt="pipe")
In [39]:
ecPct = ward[['ec_f_est', 'pct_f','pct_sm','tc_f_est','tc_f_cv','tc_s_est','tc_s_cv','ec_f_cv','ec_s_est', 'ec_s_cv']].set_index(ward.source).to_dict('index')
Several estimates exist for the GWP of Black Carbon. We use GWP20 estimates for black carbon from the CARB Short-Lived Climate Pollutant Strategy and from Fuglestvedt et. al 2000.
In [40]:
bc_gwp = ut.gData('13UQtRfNBSJ81PXxbYSnB2LrjHePNcvhJhrsxRBjHpoY', 195715938)
bc_gwp.to_sql('bc_gwp', sqdb['cx'], if_exists = 'replace')
#print tabulate(bc_gwp.drop('est_id', 1), headers = [i for i in bc_gwp.drop('est_id', 1).columns],tablefmt="pipe")
bc_gwp = bc_gwp.set_index(bc_gwp.est_id).drop('est_id',1)
In [41]:
bc_gwp
Out[41]:
In [43]:
cpe_data = pd.read_csv('http://www.arb.ca.gov/app/emsinv/2013/emsbyeic.csv?F_YR=2015&F_DIV=0&F_SEASON=A&SP=2013&SPN=2013_Almanac&F_AREA=CA')
cpe_data.columns = [i.lower() for i in cpe_data.columns]
cpe_data.to_sql('cpe_2015', sqdb['cx'], if_exists = 'replace')
cpe_2015 = pd.read_sql('''SELECT
eicsoun,
pm.source type,
pm2_5 pm25_tpd,
pm2_5*365*"ecton-1_h_pm" as t_ec_high,
pm2_5*365*"ecton-1_l_pm" as t_ec_low,
pm2_5*365*"ecton-1_h_pm"*gwp_20 as co2e_high,
pm2_5*365*"ecton-1_l_pm"*gwp_20 as co2e_low,
pm2_5*365*"ecton-1_l_pm"*(gwp_20-gwp_20_std) as co2e20_low_m1std,
pm2_5*365*"ecton-1_l_pm"*(gwp_20+gwp_20_std) as co2e20_low_p1std,
pm2_5*365*"ecton-1_h_pm"*(gwp_20-gwp_20_std) as co2e20_hi_m1std,
pm2_5*365*"ecton-1_h_pm"*(gwp_20+gwp_20_std) as co2e20_hi_p1std,
pm2_5*365*"ecton-1_l_pm"*(gwp_100-gwp_100_std) as co2e100_low_m1std,
pm2_5*365*"ecton-1_l_pm"*(gwp_100+gwp_100_std) as co2e100_low_p1std,
pm2_5*365*"ecton-1_h_pm"*(gwp_100-gwp_100_std) as co2e100_hi_m1std,
pm2_5*365*"ecton-1_h_pm"*(gwp_100+gwp_100_std) as co2e100_hi_p1std,
gwp.source
FROM cpe_2015
JOIN bc_pm_ratio pm on (arblink = eicsoun)
CROSS JOIN bc_gwp gwp
WHERE eicsoun in ('FOREST MANAGEMENT','WILDLAND FIRE USE (WFU)','ALL VEGETATION')''',
con = sqdb['cx'])
foo = pd.melt(cpe_2015.drop('eicsoun',1), id_vars = ['type', 'source'])
bar = foo[foo['variable'].str.contains("co2e")].dropna()
bar['type'] = bar.type.astype('category')
bar.value = bar.value
A secondary apporoach based on Ward (1989) and Jenkins (1996) uses a range of 2-32% of PM as BC.
In [50]:
pm2015=pd.read_sql('''select eicsoun,
pm2_5*365 as pm25
from cpe_2015
WHERE eicsoun in ('FOREST MANAGEMENT','WILDLAND FIRE USE (WFU)','ALL VEGETATION');''', con = sqdb['cx'])
pm2015=pm2015.set_index([['Wildfire','Pile Burn','Prescribed']])
In [51]:
pm2015
Out[51]:
In [52]:
def ecDist(ecEst, ecCV, PM):
'''
PM is a mass measure, its intended to be PM2.5
ecEst is the estimate of the percentage of elemental carbon comprising the PM
ecCV is the coefficient of variation around the estimate of elemental carbon
-----
returns a random selection from a normal distribution of size `len(pm)`
centered on `ecEst` with standard deviation of `ecCV`*`ecEst`
'''
ecStdev = ecCV * ecEst
# ec_frac = (PM-np.floor(PM))*np.random.normal(ecEst, ecStdev, 1).clip(min=0)[0]
t_ec = np.random.normal(ecEst,ecStdev,PM).clip(min=0)#+ec_frac
return t_ec #+ ec_frac
In [62]:
ecPct
Out[62]:
In [54]:
for k in ecPct.keys():
pm = pm2015.loc[k]['pm25']
ecPct[k]['t_pm'] = pm
#PM2.5 smoldering
pm_sm = ecPct[k]['pct_sm']*pm
ecPct[k]['pm_sm'] = pm_sm
#TC smoldering
tc_sm = ecPct[k]['tc_s_est']*pm_sm
ecPct[k]['tc_sm'] = tc_sm
#PM2.5 flaming
pm_f = ecPct[k]['pct_f']*pm
ecPct[k]['pm_f'] = pm_f
#TC flaming
tc_f = ecPct[k]['tc_f_est']*pm_f
ecPct[k]['tc_f'] = tc_f
res1k = []
for t in range(1000):
rnd = t
# Total Carbon in PM
tc_s = ecDist(ecPct[k]['tc_s_est'], ecPct[k]['tc_s_cv'], pm)
tc_f = ecDist(ecPct[k]['tc_f_est'], ecPct[k]['tc_f_cv'], pm)
#Elemental Carbon in Total Carbon
ec_s = ecDist(ecPct[k]['ec_s_est'], ecPct[k]['ec_s_cv'], pm)
ec_f = ecDist(ecPct[k]['ec_f_est'], ecPct[k]['ec_f_cv'], pm)
ec1tpm = (tc_s*ec_s) + (tc_f*ec_f)
ec_total_rnd = sum(ec1tpm)
ec_total_gwp = ec_total_rnd * bc_gwp.loc['carb_slcp']['gwp_20']
res1k.append([k,k+str(rnd),ec_total_rnd, ec_total_gwp])
ecPct[k]['ec_tdist'] = pd.DataFrame(res1k)
#ecPct[k]['ecSMDist'] = np.array([(ecDist(ecPct[k]['ec_s_est'], ecPct[k]['ec_s_cv'], pm)) for i in range(1000)])
#ecPct[k]['ecFDist'] = np.array([(ecDist(ecPct[k]['ec_f_est'], ecPct[k]['ec_f_cv'], pm)) for i in range(1000)])
#ecPct[k]['tECDist'] = ecPct[k]['ecSMDist']+ecPct[k]['ecFDist']
#ecPct[k]['df'] = pd.DataFrame(np.column_stack((ecPct[k]['tECDist'], [k]*len(ecPct[k]['tECDist']))))
In [56]:
ecPct['Prescribed']['tc_f']
Out[56]:
In [57]:
(pm_sm*0.2*3200)+(pm_f*0.2*3200)
Out[57]:
In [58]:
df=pd.concat([ecPct[i]['ec_tdist'] for i in ecPct.keys()])
df.columns = ['source', 'sourcernd', 'mt_ec','co2e_ec']
In [59]:
t=sns.FacetGrid(df,col='source', sharey=False)
t.map(sns.boxplot, 'source','co2e_ec')
t.set_ylabels('t $CO_2e$')
[c.xaxis.set_visible(False) for c in t.axes[0]]
sns.plt.subplots_adjust(top = 0.8)
t.fig.suptitle('Black carbon emissions in CO2 equivalent units from burning in CA, 2015', fontsize= 15)
t.fig.text(0.1, -0.08,'''Sources: CARB Criteria Pollutant Emissions Inventory(2015), Ward and Hardy (1989)''',
fontsize=10)
for f in ['.png','.pdf']:
t.savefig('graphics/bc_prob_gwp{0}'.format(f))
In [61]:
122*0.66
Out[61]:
In [63]:
pd.read_csv('fera_pile_cemissions.csv', header=1)
Out[63]:
ARB (2007). Technical support document for Land Use, Land Use Change & Forestry - Biodegradable Carbon Emissions & Sinks. Table available as a pdf document at: http://www.arb.ca.gov/cc/inventory/archive/tables/net_co2_flux_2007-11-19.pdf
In [65]:
t=foo[foo['variable'].str.contains("co2e20")].dropna()
co2 = ut.gData('1GDdquzrCoq2cxVN2fbCpP4gwi2yrMnONNrWbfhZKZu4', 1636249481)
co2.columns = co2.iloc[0]
co2plot=pd.melt(co2.reindex(co2.index.drop(0)), id_vars = ['Year'])
co2plot.columns = ['sc_cat','year','mmtco2e']
co2plot.to_sql('arb_co2', sqdb['cx'], if_exists = 'replace')
cdata = pd.read_sql('''select sc_cat, avg(mmtco2e) from arb_co2 where sc_cat in ('Forest and rangeland fires', 'Timber harvest slash') group by sc_cat''', con = sqdb['cx'])
In [136]:
co2
Out[136]:
In [135]:
cdata
#Output GitHub Markdown using the following:
#print tabulate(cdata, headers = ['Source Category','MMTCO2'],tablefmt="pipe")
Out[135]:
To arrive at an estimate of total annual emissions from buring forest management residuals in CO2 equivalent terms from published CARB estimates we can combine the CO2 emissions reported for 2004 in the LULUC Biodegradable Carbon Emissions and Sinks with black carbon emissions extrapolated from the CARB Criteria Air Pollutant Emissions inventory estimates. The dime discreppancy between the 2004 and 2015 is acknowledged as an irreconcilable source of uncertainty in this estimation among others. This does however reflect that a baseline of substantial emissions from forest management residuals has been reported in CARB emissions inventories and should be recognized as a baseline condition.
In [64]:
t=foo[foo['variable'].str.contains("co2e20")].dropna()
tE = pd.DataFrame([cdata['avg(mmtco2e)'][1]*1.10231,
avg(t[t['type'].str.contains('piles')]['value'])/1000000,
(cdata['avg(mmtco2e)'][1]*1.10231)+(avg(t[t['type'].str.contains('piles')]['value'])/1000000)],columns = ['Mt CO2e'])
tE['Source']=['CO2 pile buring', 'CO2e BC pile burning', 'Total Mt CO2e']
tE.to_sql('pile_em', sqdb['cx'], if_exists = 'replace')
In [13]:
#print tabulate(tE, headers = tE.columns.tolist(), tablefmt ='pipe')
tE
Out[13]:
In [14]:
tpoData = ut.gData('1GDdquzrCoq2cxVN2fbCpP4gwi2yrMnONNrWbfhZKZu4', 872275354, hrow=1)
tpoData
Out[14]:
Data from TPO does not account for forest management activities that do not result in commercial products (timber sales, biomass sales). To estimate the amount of residual material produced from non commercial management activities we use data from the US Forest Service (FACTS) and from CalFires timber harvest plan data.
Data from TPO does not account for forest management activities that do not result in commercial products (timber sales, biomass sales). We use a range of 10-35 BDT/acre to convert acres reported in FACTS to volume.
In [12]:
pd.read_excel('FACTS_Tabular_092115.xlsx', sheetname = 'CategoryCrosswalk').to_sql('facts_cat', sqdb['cx'], if_exists = 'replace')
In [13]:
pd.read_csv('pd/facts_notimber.csv').to_sql('facts_notimber', sqdb['cx'], if_exists='replace')
The USFS reports Hazardous Fuels Treatment (HFT) activities as well as Timber Sales (TS) derived from the FACTS database. We use these two datasets to estimate the number of acres treated that did not produce commercial material (sawlogs or biomass) and where burning was not used. The first step is to elimina all treatments in the HFT dataset that included timber sales. We accomplish this by eliminating all rows in the HFT dataset that have identical FACTS_ID fields in the TS dataset. We further filter the HFT dataset by removing any planned but not executed treatements (nbr_units1 >0 below -- nbr_units1 references NBR_UNITS_ACCOMPLISHED in the USFS dataset, see metadata for HFT here), and use text matching in the 'ACTIVITY' and 'METHOD' fields to remove any rows that contain reference to 'burning' or 'fire'. Finally, we remove all rows that that reference 'Biomass' in the method category as it is assumed that this means material was removed for bioenergy.
In [14]:
usfs_acres = pd.read_sql('''select
sum(nbr_units1) acres,
method,
strftime('%Y',date_compl) year,
cat."ACTIVITY" activity,
cat."TENTATIVE_CATEGORY" r5_cat
from facts_notimber n
join facts_cat cat
on (n.activity = cat."ACTIVITY")
where date_compl is not null
and nbr_units1 > 0
and cat."TENTATIVE_CATEGORY" != 'Burning'
and cat."ACTIVITY" not like '%ire%'
and method not like '%Burn%'
and method != 'Biomass'
group by cat."ACTIVITY",
year,
method,
cat."TENTATIVE_CATEGORY"
order by year;''', con = sqdb['cx'])
In [15]:
def sumBDT(ac, maxbdt = 35, minbdt = 10):
av = (maxbdt + minbdt)/2
stdev = (float(maxbdt) - float(minbdt))/4
d_frac = (ac-np.floor(ac))*np.random.normal(av, stdev, 1).clip(min=0)[0]
t_bdt = np.sum(np.random.normal(av,stdev,np.floor(ac)).clip(min=0))
return d_frac+t_bdt
In [16]:
usfs_acres['bdt'] = usfs_acres['acres'].apply(sumBDT)
usfs_an_bdt = usfs_acres.groupby(['year']).sum()
Average wood density weighted by harvested species percent. Derived from McIver and Morgan, Table 4
In [17]:
wood_dens = ut.gData('138FWlGeW57MKdcz2UkWxtWV4o50SZO8sduB1R6JOFp8', 1297253755)
wavg_dens =sum(wood_dens.pct/100 * wood_dens.density_lbscuft)
In [60]:
cat_codes = {'nf_ncmr': 'Unburned, non-commercial management residuals from National Forest lands',
'nf_lr': 'Logging residuals generated from timber sales on National Forest lands',
'opriv_lr': 'Logging residuals generated from timber sales on non-industrial private forest lands',
'fi_lr': 'Logging residuals generated from timber sales on industrial private lands',
'opub_lr': 'Logging residuals generated from timber sales on industrial private lands'}
In [48]:
usfs_an_bdt['cuft']= usfs_an_bdt.bdt *wavg_dens
resid_stats=pd.DataFrame((usfs_an_bdt.iloc[6:,2]/1000000).describe())
resid_stats.columns = ['nf_ncmr']
resid_stats['nf_lr']=tpoData[tpoData.ownership.str.contains('National Forest')]['loggingresidues'].describe()
resid_stats['opriv_lr']=tpoData[tpoData.ownership.str.contains('Other Private')]['loggingresidues'].describe()
resid_stats['fi_lr']=tpoData[tpoData.ownership.str.contains('Forest Industry')]['loggingresidues'].describe()
resid_stats['opub_lr']=tpoData[tpoData.ownership.str.contains('Other Public')]['loggingresidues'].describe()
resid_stats
Out[48]:
In [63]:
print tabulate(resid_stats, headers = resid_stats.columns.tolist(), tablefmt ='pipe')
In [120]:
import os
In [133]:
[os.path.splitext(i)[0] for i in os.listdir('lf/') if os.path.splitext(i)[1] =='.csv']
Out[133]:
In [9]:
ureg = UnitRegistry()
ureg.define('cubic foot = cubic_centimeter/ 3.53147e-5 = cubic_foot' )
ureg.define('million cubic foot = cubic_foot*1000000 = MMCF' )
ureg.define('board foot sawlog = cubic_foot / 5.44 = BF_saw')
ureg.define('board foot veneer = cubic_foot / 5.0 = BF_vo')
ureg.define('board foot bioenergy = cubic_foot / 1.0 = BF_bio')
ureg.define('bone-dry unit = cubic_foot * 96 = BDU')