In [ ]:
from __future__ import print_function
%pylab notebook
import datetime as dt
import glob
import matplotlib.pyplot as plt
import pdb
from charistools.hypsometry import Hypsometry
from charistools.meltModels import TriSurfTempIndexMelt
from charistools.meltModels import ImshowTriSurfMelt
from charistools.meltModels import PlotTriSurfInput
from charistools.meltModels import PlotTriSurfMelt
from charistools.modelEnv import ModelEnv
from charistools.timeSeries import TimeSeries
import pandas as pd
import re
import os
In [ ]:
dir = '/work/charis/ti_model/calibrations_correct_cost'
nstrikes = 2
params = "DDFnbr=10mm_N100_M050"
calibrationID = "AM_Vakhsh_at_Komsomolabad"
best_models_file = "%s/%s.%dstr_%s.stats.best20.dat" % (dir, calibrationID, nstrikes, params)
best_df = pd.read_pickle(best_models_file)
print("best model 0 : %s" % best_df.iloc[0].model)
print("best model 10: %s" % best_df.iloc[10].model)
print("best model 19: %s" % best_df.iloc[19].model)
best_df
In [ ]:
def prep_config_for_model_type(drainageID, nstrikes):
'''
Sets up model config for best model associated with this basin and nstrikes data
drainageID : name of drainage where the model will be run, XX_river_at_pourpoint
nstrikes : 2 or 3 (MODICE strikes)
Returns a dict with:
env : charis model config file
model : string with model to run, either "Best" or from the requested SA cycle
label : string to use for output filenames
'''
#on summit for partner basins
configFile = '/projects/brodzik/charis_ti_melt/partner_basins_modelEnv_config.ini'
#on summit for all basins used in REEC paper
#configFile = '/projects/brodzik/ipython_notebooks/charis/calibration_modelEnv_config.ini'
myEnv = ModelEnv(tileConfigFile=configFile)
# parse the drainageID to decide on the best calibration basin models to use
majorID = drainageID[:2]
if majorID == "IN":
calibrationID = "IN_Hunza_at_DainyorBridge"
elif majorID == "AM":
calibrationID = "AM_Vakhsh_at_Komsomolabad"
elif majorID == "SY":
calibrationID = "SY_Naryn_at_NarynTown"
elif drainageID == "GA_Narayani_at_Devghat":
calibrationID = drainageID
elif drainageID == "GA_Karnali_at_Benighat":
calibrationID = drainageID
elif drainageID == "GA_SaptaKosi_at_Chatara":
calibrationID = drainageID
elif majorID == "GA":
calibrationID = "GA_Karnali_at_Benighat"
elif majorID == "BR":
calibrationID = "GA_SaptaKosi_at_Chatara"
elif majorID == "BL":
calibrationID = "SY_Naryn_at_NarynTown"
else:
raise ValueError("ERROR: Skipping drainageID=%s, unknown majorID=%s" % (drainageID, majorID))
# Parse for the calibration basin short name
p = re.compile(r"[A-Z]{2}_([A-Za-z]+)_")
m = p.search(calibrationID)
shortName = m.group(1)
label = "best_%s_SA_model" % shortName
melt_type = ['snow_on_land_melt_by_elevation',
'snow_on_ice_melt_by_elevation',
'exposed_glacier_ice_melt_by_elevation']
# Set calibration_dir to the one in /pl/active/charis/
calibration_dir = "/pl/active/charis/ti_model/calibrations_correct_cost"
# Use the best overall model, get it from the models file with best models
# by drainage and nstrikes
print("Prepping for drainageID=%s" % (drainageID), file=sys.stderr)
params = "DDFnbr=10mm_N100_M050"
models_file = "%s/%s.%dstr_%s.stats.best20.dat" % (calibration_dir,
calibrationID, nstrikes, params)
best_df = pd.read_pickle(models_file)
# index 0 in this file is the best overall model
model_str = best_df.iloc[0].model
print("Using models from %s" % models_file, file=sys.stderr)
print("best model 0 : %s" % model_str)
#print("Output locations will be:", file=sys.stderr)
#for i in melt_type:
# print("%s: %s" % (i, myEnv.tileConfig['hypsometry'][i]['dir']), file=sys.stderr)
out = {'env':myEnv,
'majorID':majorID,
'calibrationID':calibrationID,
'model_str':model_str,
'label':label}
return out
In [ ]:
ids = ['SY_Naryn', 'AM_Vakhsh', 'IN_Hunza', 'GA_Karnali', 'BR_SaptaKosi']
for id in ids:
out = prep_config_for_model_type(id, 2)
print(out)
In [ ]:
# Generate a list of all OBJECTID basins
#%cd /Users/brodzik/projects/CHARIS/derived_hypsometries/MODSCAG_GF_v09_fromFile_MERRA_less_ET/
%cd /work/charis/ti_model/derived_hypsometries/MODSCAG_GF_v09_fromFile/
#majorBasinIDs = ['AM', 'BR','GA_v01', 'IN_v01', 'SY_v01']
majorBasinIDs = ['BR']
drainageIDs = []
for id in majorBasinIDs:
ids = glob.glob("%s_OBJECTID*" % id)
ids.sort()
drainageIDs = drainageIDs + ids
print("There are %d sub-basins for the %s major basin" % (len(ids), id))
len(drainageIDs)
drainageIDs
drainageIDs = drainageIDs[-4:-1]
drainageIDs
In [ ]:
#names = ['Astore', 'DrasNala', 'Gilgit', 'Hunza', 'Kharmong', 'Shigar', 'Shyok', 'Tarbela', 'Zanskar']
#names = ['10']
#names = ['Tarbela']
#drainageIDs = ["IN_OBJECTID%s" % name for name in names]
#drainageIDs = ["IN_Hunza_at_DainyorBridge",
# "AM_Vakhsh_at_Komsomolabad",
# "SY_Naryn_at_NarynTown",
# "GA_SaptaKosi_at_Chatara",
# #"GA_Narayani_at_Devghat",
# "GA_Karnali_at_Benighat"]
drainageIDs = ["AM_Vakhsh_at_Komsomolabad"]
In [ ]:
# Generate a list of all partner basins
%cd /scratch/summit/brodzik/ti_model/basins/partner_basins
drainageIDs = glob.glob("AM_*_at_*")
drainageIDs.sort()
print("There are %d partner basins in this batch" % (len(drainageIDs)))
drainageIDs = drainageIDs[-6:-5]
drainageIDs
In [ ]:
ablation_method = 'grsize_scag'
threshold = 'fromFile'
nstrikes = 2
years = np.arange(14) + 2001
#years = np.arange(1) + 2001
#years = np.arange(7) + 2001
#years = np.arange(7) + 2008
DDF_annotation = True
show_rainfall = False
rainfall_col = 'diff_km3'
show_runoff = False
closePlot = True
In [ ]:
def run_model(myEnv, drainageID, year=2001, nstrikes=3, ablation_method='grsize_scag', threshold=205,
model_str="8.67_8.67_10.0_17.0", label='best_model'):
input = myEnv.model_inputs(drainageID=drainageID,
year=year,
modice_nstrikes=nstrikes,
ablation_method=ablation_method,
threshold=threshold)
(min_snow_ddf, max_snow_ddf, min_ice_ddf, max_ice_ddf) = model_str.split("_")
min_snow_ddf = float(min_snow_ddf)
max_snow_ddf = float(max_snow_ddf)
min_ice_ddf = float(min_ice_ddf)
max_ice_ddf = float(max_ice_ddf)
(SOLmelt, SOImelt, EGImelt) = TriSurfTempIndexMelt(
input['snow_on_land_by_elevation_filename'],
input['snow_on_ice_by_elevation_filename'],
input['exposed_glacier_ice_by_elevation_filename'],
input['temperature_by_elevation_filename'],
min_snow_ddf=min_snow_ddf,
max_snow_ddf=max_snow_ddf,
min_ice_ddf=min_ice_ddf,
max_ice_ddf=max_ice_ddf)
SOLmeltfile = myEnv.hypsometry_filename(
type='snow_on_land_melt_by_elevation',
drainageID=drainageID,
year=year,
modice_nstrikes=nstrikes,
ablation_method=ablation_method,
threshold=threshold)
SOImeltfile = myEnv.hypsometry_filename(
type='snow_on_ice_melt_by_elevation',
drainageID=drainageID,
year=year,
modice_nstrikes=nstrikes,
ablation_method=ablation_method,
threshold=threshold)
EGImeltfile = myEnv.hypsometry_filename(
type='exposed_glacier_ice_melt_by_elevation',
drainageID=drainageID,
year=year,
modice_nstrikes=nstrikes,
ablation_method=ablation_method,
threshold=threshold)
SOLmeltfile = SOLmeltfile.replace('_by_elev.', '_by_elev.' + label + '.')
SOImeltfile = SOImeltfile.replace('_by_elev.', '_by_elev.' + label + '.')
EGImeltfile = EGImeltfile.replace('_by_elev.', '_by_elev.' + label + '.')
columns = [float(i) for i in SOLmelt.data.columns]
SOLmelt.data.columns = columns
SOLmelt.data = SOLmelt.data[sort(columns)]
# Temporary fix for SOLmelt.data NaNs
SOLmelt.data = SOLmelt.data.fillna(value=0.)
columns = [float(i) for i in SOImelt.data.columns]
SOImelt.data.columns = columns
SOImelt.data = SOImelt.data[sort(columns)]
columns = [float(i) for i in EGImelt.data.columns]
EGImelt.data.columns = columns
EGImelt.data = EGImelt.data[sort(columns)]
SOLmelt.write(filename=SOLmeltfile, decimal_places=6)
SOImelt.write(filename=SOImeltfile, decimal_places=6)
EGImelt.write(filename=EGImeltfile, decimal_places=6)
print("%s : %d : model=%s" % (drainageID, year, model_str))
baseFilename = SOImeltfile
p = re.compile(r'snow_on_ice.+')
baseFilename = p.sub('', baseFilename)
return(baseFilename, input, model_str, SOLmelt, SOImelt, EGImelt)
In [ ]:
def add_isotherm(ax, x, y, color):
orig_ylim = ax.get_ylim()
ax.plot(x, y, c=color)
ax.set_ylim(orig_ylim)
In [ ]:
def show_melt_hyps(drainageID, year, baseFilename, temperatureFilename,
SOLmelt, SOImelt, EGImelt, label='best_model', closePlot=True):
year_str = str(year)
fig, axes = plt.subplots(4,1, figsize=(8,10))
# If SOLmelt is non-empty, but SOImelt and SGImelt are empty,
# then make a copy of the dimensions of SOLmelt that is filled with zeroes
if not SOLmelt.data.empty and SOImelt.data.empty and EGImelt.data.empty:
SOImelt.data = SOLmelt.data.copy()
SOImelt.data[:] = 0.
EGImelt.data = SOLmelt.data.copy()
EGImelt.data[:] = 0.
if not SOLmelt.data.empty and not SOImelt.data.empty and not EGImelt.data.empty:
ImshowTriSurfMelt(axes[:3], SOLmelt, SOImelt, EGImelt)
else:
axes[0].annotate('no melt data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
axes[0].set_title('Snow-on-Ice Melt')
axes[1].annotate('no melt data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
axes[1].set_title('Exposed-Glacier-Ice Melt')
axes[2].annotate('no melt data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
axes[2].set_title('Snow-on-Land Melt')
# Fetch the temperature data hypsometry
temperatureHyps = Hypsometry(temperatureFilename)
if not temperatureHyps.data.empty:
temperatureHyps.data.replace(to_replace='--', value=0.0, inplace=True)
axes[3] = temperatureHyps.imshow(ax=axes[3], title='Temperature',
vmin=-45, vmax=45, cmap='RdGy_r'
)
else:
axes[3].annotate('no data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
axes[3].set_title('Temperature')
for ax in axes:
ax.set_title(drainageID + " (" + year_str + ") " + ax.get_title())
if not temperatureHyps.data.empty:
# Add a zero-degree isotherm line to each of the melt plots
# Find elevation of zero-degree isotherm
# Default is the highest elevation
zero_isotherm_elevations = np.full(len(temperatureHyps.data.index),
float(temperatureHyps.data.columns[-1]))
for i, d in enumerate(temperatureHyps.data.index):
neg_temps = temperatureHyps.data.loc[d][temperatureHyps.data.loc[d] < 0]
if len(neg_temps) > 0:
zero_isotherm_elevations[i] = float(neg_temps.index[0])
# zero_isotherm_elevations = [
# float(temperatureHyps.data.loc[d][temperatureHyps.data.loc[d] < 0].index[0])
# for d in temperatureHyps.data.index]
zero_isotherm_x = temperatureHyps.data.index
isotherm_color = (0.8, 0.8, 0.8)
add_isotherm(axes[0], zero_isotherm_x, zero_isotherm_elevations, isotherm_color)
add_isotherm(axes[1], zero_isotherm_x, zero_isotherm_elevations, isotherm_color)
add_isotherm(axes[2], zero_isotherm_x, zero_isotherm_elevations, isotherm_color)
add_isotherm(axes[3], zero_isotherm_x, zero_isotherm_elevations, isotherm_color)
fig.tight_layout()
outfile = baseFilename + label + '.melt_hyps.png'
fig.savefig(outfile)
print("Wrote melt_hyps to %s" % outfile)
if (closePlot):
plt.close('all')
In [ ]:
def show_melt_tseries(myEnv, drainageID, year, baseFilename, input, model_str,
SOLmelt, SOImelt, EGImelt,
label='best_model',
DDF_annotation=True, show_rainfall=False, rainfall_col=None,
show_runoff=False,
closePlot=True):
year_str = str(year)
(min_snow_ddf, max_snow_ddf, min_ice_ddf, max_ice_ddf) = model_str.split("_")
min_snow_ddf = float(min_snow_ddf)
max_snow_ddf = float(max_snow_ddf)
min_ice_ddf = float(min_ice_ddf)
max_ice_ddf = float(max_ice_ddf)
melt_by_doy = (SOLmelt.data_by_doy() +
SOImelt.data_by_doy() +
EGImelt.data_by_doy())
total_melt = melt_by_doy.sum()
print("total melt = %.2f" % total_melt, file=sys.stderr)
fig, ax = plt.subplots(3,1,figsize=(9,9))
if melt_by_doy.empty:
ax[0].annotate('no data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
ax[1].annotate('no data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
ax[2].annotate('no data', xy=(0.5, 0.5), xytext=(0.5, 0.5))
else:
melt_by_month = melt_by_doy.groupby([pd.TimeGrouper('M')]).sum().to_frame(name='melt')
yyyymm_index = pd.to_datetime(melt_by_month.index.map(lambda x: x.strftime('%Y-%m-15')))
df = pd.DataFrame(data=melt_by_month.values, index=yyyymm_index, columns=['melt'])
df['SOLmelt'] = SOLmelt.data_by_doy().groupby([pd.TimeGrouper('M')]).sum().values
df['SOImelt'] = SOImelt.data_by_doy().groupby([pd.TimeGrouper('M')]).sum().values
df['EGImelt'] = EGImelt.data_by_doy().groupby([pd.TimeGrouper('M')]).sum().values
left_ax, right_ax = PlotTriSurfInput(
ax[0],
input['snow_on_land_by_elevation_hyps'],
input['snow_on_ice_by_elevation_hyps'],
input['exposed_glacier_ice_by_elevation_hyps'],
input['temperature_by_elevation_hyps'],
temperature_color=(0.7, 0.7, 0.7),
title="Inputs for %s (%d)" % (drainageID, year))
h, l = left_ax.get_legend_handles_labels()
h1, l1 = right_ax.get_legend_handles_labels()
left_ax.legend(h+h1, l+l1, framealpha=0.5, loc='upper right')
#left_ax.legend(framealpha=0.5)
ax[1] = PlotTriSurfMelt(
ax[1],
SOLmelt,
SOImelt,
EGImelt,
title="Modelled melt for %s (%d)" % (drainageID, year))
ax[1].legend(framealpha=0.5, loc='upper right')
if DDF_annotation:
ax[1].text(ax[1].get_xlim()[0] + (0.03 * (ax[1].get_xlim()[1] - ax[1].get_xlim()[0])),
ax[1].get_ylim()[1] * 0.7,
'snow DDF = %.2f - %.2f $mm/C/day$\nice DDF = %.2f - %.2f $mm/C/day$' %
(min_snow_ddf, max_snow_ddf, min_ice_ddf, max_ice_ddf),
style='italic',
bbox={'facecolor':'gray', 'alpha':0.1, 'pad':10})
# Get the line colors used by PlotTriSurfMelt
lines = ax[1].get_lines()
SOIcolor = lines[1].get_color()
EGIcolor = lines[2].get_color()
SOLcolor = lines[3].get_color()
title = "Melt"
right_bar_list = ['EGImelt', 'SOImelt', 'SOLmelt']
right_bar_colors = [ EGIcolor, SOIcolor, SOLcolor ]
right_bar_title = "melt"
right_bar_sum = df["melt"].sum()
ylim = np.amax(df["melt"])
ylim_min = 0.
# Fetch rainfall and/or runoff, if requested
have_et = False
if show_rainfall:
rainfallFile = myEnv.calibration_filename(type="rainfall", drainageID=drainageID)
rainfall = TimeSeries(rainfallFile)
monthly_rainfall = rainfall.data['rainfall'][year_str + '-01-01':year_str + '-12-01']
monthly_et = rainfall.data['et_km3'][year_str + '-01-01':year_str + '-12-01']
rainfall_label = "rainfall"
et_label = "ET"
if 0 < len(monthly_rainfall.index):
df[rainfall_label] = monthly_rainfall.values
else:
df[rainfall_label] = np.nan
if 0 < len(monthly_et.index):
df[et_label] = -1 * monthly_et.values
have_et = True
else:
df[et_label] = np.nan
rainfallcolor = 'c'
right_bar_list = [rainfall_label] + right_bar_list
right_bar_colors = [ rainfallcolor ] + right_bar_colors
# The following will ignore any NaNs and just return
# the operation using the non-NaN values:
ylim = np.amax(df[["melt", rainfall_label]].sum(axis=1))
title = rainfall_label + " + Melt"
right_bar_title = 'melt + ' + rainfall_label
right_bar_sum = df[["melt", rainfall_label]].sum(axis=1).sum()
if have_et:
etcolor = (0.8, 0.8, 0.8)
et_bar_list = ["ET"]
et_bar_colors = [ etcolor ]
ylim_min = df.ET.min()
title = "%s - ET" % title
right_bar_title = "%s - ET" % right_bar_title
right_bar_sum = right_bar_sum + df[[et_label]].sum(axis=1).sum()
monthly_annotation = '%s = %.2f $km^3$' % (right_bar_title, right_bar_sum)
if show_runoff:
runoffFile = myEnv.calibration_filename(type="runoff", drainageID=drainageID)
runoff = TimeSeries(runoffFile)
monthly_runoff = runoff.data['runoff'][year_str + '-01-01':year_str + '-12-01']
if 0 < len(monthly_runoff.index) and np.amax(monthly_runoff.values) >= 0.:
df["runoff"] = monthly_runoff.values
max_runoff = np.amax(df["runoff"])
else:
df["runoff"] = np.nan
max_runoff = 0
runoffcolor = (0.4, 0.4, 0.4)
ylim = np.amax([ylim, max_runoff])
title = "Runoff vs. " + title
df[["runoff"]].plot(ax=ax[2], kind="bar",
edgecolor=(0.9, 0.9, 0.9),
color=[runoffcolor])
monthly_annotation = '%s = %.2f $km^3$\nrunoff = %.2f $km^3$' % (
right_bar_title, right_bar_sum, df["runoff"].sum())
df[right_bar_list].plot(ax=ax[2],
stacked=True, kind="bar",
position=0.,
edgecolor=(0.9, 0.9, 0.9),
color=right_bar_colors)
if have_et:
df[et_bar_list].plot(ax=ax[2],
kind="bar",
position=0.0,
edgecolor=(0.9, 0.9, 0.9),
color=et_bar_colors)
ax[2].axhline(c=(0.7, 0.7, 0.7))
ax[2].set_title("Monthly " + title)
ax[2].set_ylabel('Volume (' + r'$km^3$' + ')')
ax[2].xaxis.set_major_formatter(plt.NullFormatter())
for container in ax[2].containers:
plt.setp(container, width=0.25)
ax[2].set_ylim([1.1 * ylim_min, 1.1 * ylim])
ax[2].text(ax[2].get_xlim()[0] + (0.03 * (ax[2].get_xlim()[1] - ax[2].get_xlim()[0])),
ax[2].get_ylim()[1] * 0.7,
monthly_annotation,
style='italic',
bbox={'facecolor':'gray', 'alpha':0.1, 'pad':10})
handles, labels = ax[2].get_legend_handles_labels()
# ax[2].legend(reversed(handles), reversed(labels), loc='upper right')
ax[2].legend(loc='upper right')
fig.tight_layout()
outfile = baseFilename + label + '.melt_tseries.png'
fig.savefig(outfile)
print("Wrote melt_tseries to %s" % outfile)
if (closePlot):
plt.close('all')
In [ ]:
drainageIDs, years, nstrikes
In [ ]:
for drainageID in drainageIDs:
out = prep_config_for_model_type(drainageID, nstrikes)
print("drainageID=%s, majorID=%s, nstrikes=%d, model=%s, label=%s\n" % (
drainageID, out['majorID'], nstrikes, out['model_str'], out['label']),
file=sys.stderr)
for year in years:
(baseFilename, input, out['model_str'], SOLmelt, SOImelt, EGImelt) = run_model(
out['env'],
drainageID=drainageID, year=year, nstrikes=nstrikes,
ablation_method=ablation_method, threshold=threshold,
model_str=out['model_str'], label=out['label'])
show_melt_hyps(drainageID, year, baseFilename, input['temperature_by_elevation_filename'],
SOLmelt, SOImelt, EGImelt, label=out['label'], closePlot=closePlot)
show_melt_tseries(out['env'], drainageID, year, baseFilename, input, out['model_str'],
SOLmelt, SOImelt, EGImelt, label=out['label'],
DDF_annotation=DDF_annotation,
show_rainfall=show_rainfall,
rainfall_col=rainfall_col,
show_runoff=show_runoff,
closePlot=closePlot)
In [ ]:
hyps = Hypsometry(filename="/Users/brodzik/projects/CHARIS/derived_hypsometries/modscag_gf_grsize_scag/IN_OBJECTID10/IN_OBJECTID10.2001.0100m.ERA_Interim_downscale_uncorrected_tsurf.v0.2_by_elev.txt")
In [ ]:
def prep_reec_config_for_model_type(drainageID, nstrikes, ncycle=None, rank=0):
'''
Sets up model config, changing melt output files directory, if needed.
drainageID : name of drainage where the model will be run, XX_river_at_pourpoint
nstrikes : 2 or 3 (MODICE strikes)
rank : model rank to use, counting backwards/forwards???
ncycle : if not None, this is the cycle number to look up and
use the best model for this cycle
Returns a dict with:
env : charis model config file
model : string with model to run, either "Best" or from the requested SA cycle
label : string to use for output filenames
'''
#on summit
#configFile = '/projects/brodzik/charis_ti_melt/calibration_modelEnv_config.ini'
# on worsley with sshfs
configFile = '/Users/brodzik/ipython_notebooks/charis/calibration_modelEnv_config.ini'
myEnv = ModelEnv(tileConfigFile=configFile)
# parse the drainageID to decide on the best calibration basin models to use
majorID = drainageID[:2]
if majorID == "IN":
calibrationID = "IN_Hunza_at_DainyorBridge"
elif majorID == "AM":
calibrationID = "AM_Vakhsh_at_Komsomolabad"
elif majorID == "SY":
calibrationID = "SY_Naryn_at_NarynTown"
elif drainageID == "GA_Narayani_at_Devghat":
calibrationID = drainageID
elif drainageID == "GA_Karnali_at_Benighat":
calibrationID = drainageID
elif drainageID == "GA_SaptaKosi_at_Chatara":
calibrationID = drainageID
elif majorID == "GA":
calibrationID = "GA_Karnali_at_Benighat"
elif majorID == "BR":
calibrationID = "GA_SaptaKosi_at_Chatara"
else:
raise ValueError("ERROR: Skipping drainageID=%s, unknown majorID=%s" % (drainageID, majorID))
# Parse for the calibration basin short name
p = re.compile(r"[A-Z]{2}_([A-Za-z]+)_")
m = p.search(calibrationID)
shortName = m.group(1)
label = "best_%s_SA_model" % shortName
# For CycleXXX runs, change the location of output melt files to include the Cycle number
# Filenames will be exactly the same, just the output directories will be different
# For BestXXX runs, change the location of output melt files to include the Best model number
# Filenames will be exactly the same, just the output directories will be different
# This only works for REECv0 runs, should be generalized
p = re.compile(r'REECv0')
melt_type = ['snow_on_land_melt_by_elevation',
'snow_on_ice_melt_by_elevation',
'exposed_glacier_ice_melt_by_elevation']
calibration_dir = "%s/%s" % (myEnv.tileConfig['model_top_dir'], "calibrations_correct_cost")
if not ncycle:
# Use the best overall model, get it from the models file with best models
# by drainage and nstrikes
print("Prepping for model choice %d for drainageID=%s" % (rank, drainageID),
file=sys.stderr)
new_location = "REECv0_ModelRank%03d" % rank
for i in melt_type:
myEnv.tileConfig['hypsometry'][i]['dir'] = p.sub(
new_location, myEnv.tileConfig['hypsometry'][i]['dir'])
params = "DDFnbr=10mm_N100_M050"
models_file = "%s/%s.%dstr_%s.stats.best20.dat" % (
calibration_dir, calibrationID, nstrikes, params)
models_df = pd.read_pickle(models_file)
model_str = models_df.iloc[rank].model
else:
# Use the best model for this SA cycle number
# edit the output directory locations to match the cycle number
print("Prepping for Best model from cycle=%03d for drainageID=%s" % (ncycle, drainageID),
file=sys.stderr)
new_location = "REECv0_Cycle%03d" % ncycle
for i in melt_type:
myEnv.tileConfig['hypsometry'][i]['dir'] = p.sub(
new_location, myEnv.tileConfig['hypsometry'][i]['dir'])
# Read the SA cycle model results to get the best model for this cycle
params = "DDFnbr=10mm_N100_M050"
models_file = "%s/%s.%dstr_%s.SA_summary.best20.dat" % (
calibration_dir, drainageID, nstrikes, params)
models_df = pd.read_pickle(models_file)
model_str = models_df.at[ncycle, "model"]
print("Using models from %s" % models_file,
file=sys.stderr)
print("Output locations will be:", file=sys.stderr)
for i in melt_type:
print("%s: %s" % (i, myEnv.tileConfig['hypsometry'][i]['dir']), file=sys.stderr)
out = {'env':myEnv,
'majorID':majorID,
'calibrationID':calibrationID,
'model_str':model_str,
'label':label}
return out
In [ ]:
%cd BL_ChongKyzylSuu_at_KaraBatkak
%ls
In [ ]:
file = "BL_ChongKyzylSuu_at_KaraBatkak.2001.0100m.modicev04_1strike.snow_on_land_area_by_elev.txt"
In [ ]:
sol = Hypsometry(file)
In [ ]:
sol.comments
In [ ]:
sol.data
In [ ]: