In [1]:
%run basics
%matplotlib
sys.path.append('dev/imchugh')
In [2]:
import dark_T_response_functions as dark
import light_and_T_response_functions as light
import Partition_NEE as part
In [3]:
def get_configs_dict(cf,ds):
# configs_dict = {'nan_value': -9999,
# 'minimum_temperature_spread': 5,
# 'step_size_days': 5,
# 'window_size_days': 15,
# 'min_pct_annual': 30,
# 'min_pct_noct_window': 20,
# 'min_pct_day_window': 50,
# 'output_plots': False,
# 'measurement_interval': 0.5,
# 'QC_accept_code': 0,
# 'plot_output_path': '/home/imchugh/Documents'}
configs_dict = {}
configs_dict["nan_value"] = int(c.missing_value)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"minimum_temperature_spread",
default=5)
configs_dict["minimum_temperature_spread"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"step_size_days",
default=5)
configs_dict["step_size_days"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"window_size_days",
default=15)
configs_dict["window_size_days"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"minimum_percent_annual",
default=30)
configs_dict["minimum_pct_annual"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"minimum_percent_noct_window",
default=20)
configs_dict["minimum_pct_noct_window"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"minimum_percent_day_window",
default=50)
configs_dict["minimum_pct_day_window"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LL","ERUsingLasslop"],
"output_plots",
default=False)
configs_dict["output_plots"] = (opt=="True")
configs_dict["output_results"] = True
ts = int(ds.globalattributes["time_step"])
configs_dict["measurement_interval"] = float(ts)/60.0
configs_dict["QC_accept_code"] = 0
opt = qcutils.get_keyvaluefromcf(cf,["Files"],"plot_path",default="plots/")
configs_dict["output_path"] = os.path.join(opt,"respiration/")
return configs_dict
In [4]:
def get_data_dict(ds,config_dict):
data = {}
# NOTE: series are ndarrays not masked arrays
Fc,Fc_flag,a = qcutils.GetSeries(ds,"Fc")
Fsd,Fsd_flag,a = qcutils.GetSeries(ds,"Fsd")
T,T_flag,a = qcutils.GetSeries(ds,"Ta")
VPD,VPD_flag,a = qcutils.GetSeries(ds,"VPD")
ustar,ustar_flag,a = qcutils.GetSeries(ds,"ustar")
# replace c.missing_value with numpy.nan
Fc = numpy.where((Fc_flag!=0)|(Fc==c.missing_value),
numpy.nan,Fc)
Fsd = numpy.where((Fsd_flag!=0)|(Fsd==c.missing_value),
numpy.nan,Fsd)
T = numpy.where((T_flag!=0)|(T==c.missing_value),
numpy.nan,T)
VPD = numpy.where((VPD_flag!=0)|(VPD==c.missing_value),
numpy.nan,VPD)
ustar = numpy.where((ustar_flag!=0)|(ustar==c.missing_value),
numpy.nan,ustar)
# put the data in the dictionary
data["NEE"] = Fc
data["PAR"] = Fsd*0.46*4.6
data["TempC"] = T
data["VPD"] = VPD
data["ustar"] = ustar
data["date_time"] = numpy.array(ds.series["DateTime"]["Data"])
return data
In [5]:
def cleanup_ustar_dict(ds,ustar_dict):
"""
Purpose:
Clean up the ustar dictionary;
- make sure all years are included
- fill missing year values with the mean
Usage:
Author: PRI
Date: September 2015
"""
ldt = ds.series["DateTime"]["Data"]
start_year = ldt[0].year
end_year = ldt[-1].year
data_years = range(start_year,end_year+1)
ustar_years = ustar_dict.keys()
ustar_list = ustar_dict[ustar_years[0]]
for year in data_years:
if str(year) not in ustar_years:
ustar_dict[str(year)] = {}
for item in ustar_list:
ustar_dict[str(year)][item] = float(c.missing_value)
# loop over the list of ustar thresholds
year_list = ustar_dict.keys()
year_list.sort()
# get the average of good ustar threshold values
good_values = []
for year in year_list:
ustar_threshold = float(ustar_dict[year]["ustar_mean"])
if ustar_threshold!=float(c.missing_value):
good_values.append(ustar_threshold)
ustar_threshold_mean = numpy.sum(numpy.array(good_values))/len(good_values)
# replace missing vaues with mean
for year in year_list:
if ustar_dict[year]["ustar_mean"]==float(c.missing_value):
ustar_dict[year]["ustar_mean"] = ustar_threshold_mean
In [6]:
def get_turbulence_indicator_ustar(ds,ustar_dict):
year_list = ustar_dict.keys()
year_list.sort()
# now loop over the years in the data to apply the ustar threshold
ldt = ds.series["DateTime"]["Data"]
ts = int(ds.globalattributes["time_step"])
ustar,f,a = qcutils.GetSeriesasMA(ds,"ustar")
turbulence_indicator = numpy.ones(len(ldt),dtype=numpy.int32)
for year in year_list:
start_date = str(year)+"-01-01 00:30"
if ts==60: start_date = str(year)+"-01-01 01:00"
end_date = str(int(year)+1)+"-01-01 00:00"
# get the ustar threshold
ustar_threshold = float(ustar_dict[year]["ustar_mean"])
# get the start and end datetime indices
si = qcutils.GetDateIndex(ldt,start_date,ts=ts,default=0,match='exact')
ei = qcutils.GetDateIndex(ldt,end_date,ts=ts,default=len(ldt),match='exact')
# set the QC flag
index_lowustar = numpy.ma.where(ustar[si:ei]<ustar_threshold)[0]
turbulence_indicator[si:ei][index_lowustar] = numpy.int32(0)
return turbulence_indicator
In [7]:
def apply_turbulence_filter(data_dict,indicator):
data_dict["NEE"] = numpy.where(indicator==0,numpy.nan,data_dict["NEE"])
In [8]:
# read the control file and construct the configuration dictionary
cf = qcio.load_controlfile(path="controlfiles")
in_name = qcio.get_infilenamefromcf(cf)
ds = qcio.nc_read_series(in_name)
ts = int(ds.globalattributes["time_step"])
ldt = ds.series["DateTime"]["Data"]
In [9]:
configs_dict = get_configs_dict(cf,ds)
data_dict = get_data_dict(ds,configs_dict)
ustar_dict = qcrp.get_ustarthreshold_from_cpdresults(cf)
cleanup_ustar_dict(ds,ustar_dict)
turbulence_indicator = get_turbulence_indicator_ustar(ds,ustar_dict)
apply_turbulence_filter(data_dict,turbulence_indicator)
In [10]:
# *** start of main code ***
# If user wants individual window plots, check whether output directories
# are present, and create if not
if configs_dict['output_plots']:
output_path = configs_dict['output_path']
configs_dict['window_plot_output_path'] = output_path
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Get arrays of all datetimes, all dates and stepped dates
# original code
datetime_array = data_dict.pop('date_time')
(step_date_index_dict,
all_date_index_dict,
year_index_dict) = part.get_dates(datetime_array, configs_dict)
date_array = numpy.array(all_date_index_dict.keys())
date_array.sort()
step_date_array = numpy.array(step_date_index_dict.keys())
step_date_array.sort()
# Create variable name lists for results output
series_rslt_list = ['Nocturnally derived Re', 'GPP from nocturnal derived Re',
'Daytime derived Re', 'GPP from daytime derived Re']
new_param_list = ['Eo', 'rb_noct', 'rb_day', 'alpha_fixed_rb',
'alpha_free_rb', 'beta_fixed_rb', 'beta_free_rb',
'k_fixed_rb', 'k_free_rb', 'Eo error code',
'Nocturnal rb error code',
'Light response parameters + fixed rb error code',
'Light response parameters + free rb error code']
# Create dictionaries for results
# First the parameter estimates and error codes...
empty_array = numpy.empty([len(date_array)])
empty_array[:] = numpy.nan
opt_params_dict = {var: empty_array.copy() for var in new_param_list}
opt_params_dict['date'] = date_array
# Then the time series estimation
empty_array = numpy.empty([len(datetime_array)])
empty_array[:] = numpy.nan
series_est_dict = {var: empty_array.copy() for var in series_rslt_list}
series_est_dict['date_time'] = datetime_array
# Create a dictionary containing initial guesses for each parameter
params_dict = part.make_initial_guess_dict(data_dict)
In [11]:
# *** start of annual estimates of E0 code ***
# Get the annual estimates of Eo
print 'Optimising fit for Eo for each year...'
Eo_dict, EoQC_dict = part.optimise_annual_Eo(data_dict,params_dict,configs_dict,year_index_dict)
print 'Done!'
# Write to result arrays
year_array = numpy.array([i.year for i in date_array])
for yr in year_array:
index = numpy.where(year_array == yr)
opt_params_dict['Eo'][index] = Eo_dict[yr]
opt_params_dict['Eo error code'][index] = EoQC_dict[yr]
# *** end of annual estimates of E0 code ***
In [12]:
# *** start of estimating rb code for each window ***
# Rewrite the parameters dictionary so that there will be one set of
# defaults for the free and one set of defaults for the fixed parameters
params_dict = {'fixed_rb': part.make_initial_guess_dict(data_dict),
'free_rb': part.make_initial_guess_dict(data_dict)}
# Do nocturnal optimisation for each window
print 'Optimising fit for rb using nocturnal data...'
for date in step_date_array:
# Get Eo for the relevant year and write to the parameters dictionary
param_index = numpy.where(date_array == date)
params_dict['fixed_rb']['Eo_default'] = opt_params_dict['Eo'][param_index]
# Subset the data and check length
sub_dict = part.subset_window(data_dict, step_date_index_dict[date])
# Subset again to remove daytime and then nan
noct_dict = part.subset_daynight(sub_dict, noct_flag = True)
len_all_noct = len(noct_dict['NEE'])
noct_dict = part.subset_nan(noct_dict)
len_valid_noct = len(noct_dict['NEE'])
# Do optimisation only if data passes minimum threshold
if round(float(len_valid_noct) / len_all_noct * 100) > \
configs_dict['minimum_pct_noct_window']:
params, error_state = dark.optimise_rb(noct_dict,params_dict['fixed_rb'])
else:
params, error_state = [numpy.nan], 10
# Send data to the results dict
opt_params_dict['rb_noct'][param_index] = params
opt_params_dict['Nocturnal rb error code'][param_index] = error_state
# Estimate time series and plot if requested
if error_state == 0 and configs_dict['output_plots']:
this_params_dict = {'Eo': opt_params_dict['Eo'][param_index],
'rb': opt_params_dict['rb_noct'][param_index]}
est_series_dict = part.estimate_Re_GPP(sub_dict, this_params_dict)
combine_dict = dict(sub_dict, **est_series_dict)
part.plot_windows(combine_dict, configs_dict, date, noct_flag = True)
# Interpolate
opt_params_dict['rb_noct'] = part.interp_params(opt_params_dict['rb_noct'])
print 'Done!'
# *** end of estimating rb code for each window ***
In [17]:
def ER_LloydTaylor(T,E0,rb):
t1 = 1/(c.Tref-c.T0)
t2 = 1/(Ts-c.T0)
ER = rb*numpy.exp(E0*(t1-t2))
return ER
In [19]:
E0 = numpy.zeros(len(ldt))
rb = numpy.zeros(len(ldt))
ldt_year = numpy.array([dt.year for dt in ldt])
ldt_month = numpy.array([dt.month for dt in ldt])
ldt_day = numpy.array([dt.day for dt in ldt])
for date,E0_val,rb_val in zip(opt_params_dict["date"],opt_params_dict["Eo"],opt_params_dict["rb_noct"]):
param_year = date.year
param_month = date.month
param_day = date.day
idx = numpy.where((ldt_year==param_year)&(ldt_month==param_month)&(ldt_day==param_day))[0]
E0[idx] = E0_val
rb[idx] = rb_val
Ts,f,a = qcutils.GetSeriesasMA(ds,"Ts")
ER_LT = ER_LloydTaylor(Ts,E0,rb)
In [22]:
ds_l6 = qcio.nc_read_series("../Sites/Whroo/Data/Processed/all/Whroo_L6.nc")
ER_SOLO,f,a = qcutils.GetSeriesasMA(ds_l6,"ER_SOLO")
In [24]:
fig=plt.figure()
plt.plot(ldt,ER_LT,'b.')
plt.plot(ldt,ER_SOLO,'r+')
plt.show()
In [25]:
fig=plt.figure()
plt.plot(ldt,qcts.smooth(ER_LT,window_len=240),'b.')
plt.plot(ldt,qcts.smooth(ER_SOLO,window_len=240),'r+')
plt.show()
In [34]:
# *** start of code to estimate LRC parameters using fixed and free rb ***
print 'Optimising fit for light response parameters using fixed and free rb...'
# Now do daytime
for date in step_date_array:
# Get Eo for the relevant year and write to the parameters dictionary
param_index = np.where(date_array == date)
# Subset the data and check length
sub_dict = subset_window(data_dict, step_date_index_dict[date])
# Subset the data and check length
sub_dict = subset_window(data_dict, step_date_index_dict[date])
day_dict = subset_daynight(sub_dict, noct_flag = False)
len_all_day = len(day_dict['NEE'])
day_dict = subset_nan(day_dict)
len_valid_day = len(day_dict['NEE'])
In [ ]:
# *** start of code to estimate GPP and ER from Lloyd-Taylor
print 'Calculating time series from interpolated model parameters and drivers...'
# Loop through interpolated data and construct the time series
for ind, date in enumerate(date_array):
# Subset the data (single day)
sub_dict = part.subset_window(data_dict, all_date_index_dict[date])
# Get the indices for inserting the calculated data into the array
start_ind = all_date_index_dict[date][0]
end_ind = all_date_index_dict[date][1]
# For each parameter set, write current parameters to estimated
# parameters dictionary, then estimate the time series
swap_dict = {'Eo': 'Eo',
'rb': 'rb_noct',