SECOORA Notebook 2

Sea Surface Salinity time-series model skill

This notebook calculates several skill scores for the SECOORA models weekly time-series saved by 00-fetch_data.ipynb.

Load configuration


In [1]:
import os
try:
    import cPickle as pickle
except ImportError:
    import pickle


run_name = '2014-07-07'
fname = os.path.join(run_name, 'config.pkl')
with open(fname, 'rb') as f:
    config = pickle.load(f)

In [2]:
import numpy as np
from pandas import DataFrame, read_csv
from utilities import (load_secoora_ncs, to_html,
                       save_html, apply_skill)


fname = '{}-all_obs.csv'.format(run_name)
all_obs = read_csv(os.path.join(run_name, fname), index_col='name')

Skill 1: Model Bias (or Mean Bias)

The bias skill compares the model mean salinity against the observations. It is possible to introduce a Mean Bias in the model due to a mismatch of the boundary forcing and the model interior.

$$ \text{MB} = \mathbf{\overline{m}} - \mathbf{\overline{o}}$$

In [3]:
from utilities import mean_bias

dfs = load_secoora_ncs(run_name)

df = apply_skill(dfs, mean_bias, remove_mean=False, filter_tides=False)
#df = rename_cols(df)
skill_score = dict(mean_bias=df.copy())

# Filter out stations with no valid comparison.
df.dropna(how='all', axis=1, inplace=True)
df = df.applymap('{:.2f}'.format).replace('nan', '--')

html = to_html(df.T)
fname = os.path.join(run_name, 'mean_bias.html'.format(run_name))
save_html(fname, html)
html


Out[3]:
COAWST_4 HYCOM USF_ROMS
carocoops.cap2.buoy 1.12 1.03 --
carocoops.frp2.buoy 1.01 1.13 --
carocoops.sun2.buoy 0.81 0.61 --
cormp.ilm2.buoy 0.22 -- --
fau.lobo.1 8.34 -- --
fldep.binneydock. 35.96 -- --
fldep.stlucieinlet. 4.11 -- --
lbhmc.2ndave.pier 31.43 -- --
sccf.gulfofmexico.wq 1.50 -- 1.81
sccf.redfishpass.wq 0.16 -- 0.07
sccf.shellpoint.wq 7.97 -- 7.76
sccf.tarponbay.wq 2.76 -- 3.07
usf.c10.imet 1035.81 1035.81 1035.85
usf.shp.ngwlms 134.57 -- 134.16

Skill 2: Central Root Mean Squared Error

Root Mean Squared Error of the deviations from the mean.

$$ \text{CRMS} = \sqrt{\left(\mathbf{m'} - \mathbf{o'}\right)^2}$$

where: $\mathbf{m'} = \mathbf{m} - \mathbf{\overline{m}}$ and $\mathbf{o'} = \mathbf{o} - \mathbf{\overline{o}}$


In [4]:
from utilities import rmse

dfs = load_secoora_ncs(run_name)

df = apply_skill(dfs, rmse, remove_mean=True, filter_tides=False)
skill_score['rmse'] = df.copy()

# Filter out stations with no valid comparison.
df.dropna(how='all', axis=1, inplace=True)
df = df.applymap('{:.2f}'.format).replace('nan', '--')

html = to_html(df.T)
fname = os.path.join(run_name, 'rmse.html'.format(run_name))
save_html(fname, html)
html


Out[4]:
COAWST_4 HYCOM USF_ROMS
carocoops.cap2.buoy 0.40 0.34 --
carocoops.frp2.buoy 0.40 0.37 --
carocoops.sun2.buoy 0.17 0.15 --
cormp.ilm2.buoy 0.09 -- --
fau.lobo.1 0.78 -- --
fldep.binneydock. 0.04 -- --
fldep.stlucieinlet. 7.08 -- --
lbhmc.2ndave.pier 151.64 -- --
sccf.gulfofmexico.wq 0.57 -- 0.49
sccf.redfishpass.wq 0.13 -- 0.08
sccf.shellpoint.wq 2.52 -- 1.31
sccf.tarponbay.wq 0.40 -- 0.36
usf.c10.imet 0.03 0.06 0.03
usf.shp.ngwlms 0.03 -- 0.00

In [5]:
from utilities import r2

dfs = load_secoora_ncs(run_name)

df = apply_skill(dfs, r2, remove_mean=True, filter_tides=False)
skill_score['r2'] = df.copy()

# Filter out stations with no valid comparison.
df.dropna(how='all', axis=1, inplace=True)
df = df.applymap('{:.2f}'.format).replace('nan', '--')

html = to_html(df.T)
fname = os.path.join(run_name, 'r2.html'.format(run_name))
save_html(fname, html)
html


Out[5]:
COAWST_4 HYCOM USF_ROMS
carocoops.cap2.buoy 0.01 -0.01 --
carocoops.frp2.buoy 0.01 -0.02 --
carocoops.sun2.buoy 0.08 -2.66 --
cormp.ilm2.buoy -0.01 -- --
fau.lobo.1 -0.05 -- --
fldep.binneydock. 0.00 -- --
fldep.stlucieinlet. 0.00 -- --
lbhmc.2ndave.pier 0.00 -- --
sccf.gulfofmexico.wq 0.01 -- 0.04
sccf.redfishpass.wq 0.08 -- 0.46
sccf.shellpoint.wq 0.00 -- 0.00
sccf.tarponbay.wq 0.02 -- 0.12
usf.c10.imet 0.00 0.00 0.00
usf.shp.ngwlms 0.00 -- 1.00

In [6]:
from utilities import r2

dfs = load_secoora_ncs(run_name)

df = apply_skill(dfs, r2, remove_mean=True, filter_tides=True)
skill_score['low_pass_r2'] = df.copy()

# Filter out stations with no valid comparison.
df.dropna(how='all', axis=1, inplace=True)
df = df.applymap('{:.2f}'.format).replace('nan', '--')

html = to_html(df.T)
fname = os.path.join(run_name, 'low_pass_r2.html'.format(run_name))
save_html(fname, html)
html


Out[6]:
COAWST_4 HYCOM USF_ROMS
carocoops.cap2.buoy 0.02 -0.19 --
carocoops.frp2.buoy 0.05 0.02 --
carocoops.sun2.buoy 0.18 -1.48 --
cormp.ilm2.buoy -0.02 -- --
fau.lobo.1 -0.13 -- --
fldep.binneydock. 0.00 -- --
fldep.stlucieinlet. -0.00 -- --
lbhmc.2ndave.pier 0.00 -- --
sccf.gulfofmexico.wq 0.02 -- -0.05
sccf.redfishpass.wq 0.15 -- 0.40
sccf.shellpoint.wq -0.01 -- --
sccf.tarponbay.wq 0.02 -- 0.25
usf.c10.imet 0.00 0.00 0.00
usf.shp.ngwlms 0.00 -- --

Skill 4: Low passed and re-sampled (3H) R$^2$

https://github.com/ioos/secoora/issues/183


In [7]:
from utilities import r2

dfs = load_secoora_ncs(run_name)

# SABGOM dt = 3 hours.
dfs = dfs.swapaxes('items', 'major').resample('3H').swapaxes('items', 'major')

df = apply_skill(dfs, r2, remove_mean=True, filter_tides=False)
skill_score['low_pass_resampled_3H_r2'] = df.copy()

# Filter out stations with no valid comparison.
df.dropna(how='all', axis=1, inplace=True)
df = df.applymap('{:.2f}'.format).replace('nan', '--')

html = to_html(df.T)
fname = os.path.join(run_name, 'low_pass_resampled_3H_r2.html'.format(run_name))
save_html(fname, html)
html


Out[7]:
COAWST_4 HYCOM USF_ROMS
carocoops.cap2.buoy 0.01 -0.02 --
carocoops.frp2.buoy 0.01 -0.03 --
carocoops.sun2.buoy 0.09 -1.31 --
cormp.ilm2.buoy -0.01 -- --
fau.lobo.1 -0.06 -- --
fldep.binneydock. 0.00 -- --
fldep.stlucieinlet. 0.00 -- --
lbhmc.2ndave.pier 0.00 -- --
sccf.gulfofmexico.wq 0.01 -- 0.06
sccf.redfishpass.wq 0.10 -- 0.51
sccf.shellpoint.wq 0.00 -- 0.00
sccf.tarponbay.wq 0.01 -- 0.14
usf.c10.imet 0.00 0.00 0.00
usf.shp.ngwlms 0.00 -- 1.00

Save scores


In [8]:
fname = os.path.join(run_name, 'skill_score.pkl')
with open(fname,'wb') as f:
    pickle.dump(skill_score, f)

Normalized Taylor diagrams

The radius is model standard deviation error divided by observations deviation, azimuth is arc-cosine of cross correlation (R), and distance to point (1, 0) on the abscissa is Centered RMS.


In [9]:
%matplotlib inline
import matplotlib.pyplot as plt
from utilities.taylor_diagram import TaylorDiagram


def make_taylor(samples):
    fig = plt.figure(figsize=(9, 9))
    dia = TaylorDiagram(samples['std']['OBS_DATA'],
                        fig=fig,
                        label="Observation")
    colors = plt.matplotlib.cm.jet(np.linspace(0, 1, len(samples)))
    # Add samples to Taylor diagram.
    samples.drop('OBS_DATA', inplace=True)
    for model, row in samples.iterrows():
        dia.add_sample(row['std'], row['corr'], marker='s', ls='',
                       label=model)
    # Add RMS contours, and label them.
    contours = dia.add_contours(colors='0.5')
    plt.clabel(contours, inline=1, fontsize=10)
    # Add a figure legend.
    kw = dict(prop=dict(size='small'), loc='upper right')
    leg = fig.legend(dia.samplePoints,
                     [p.get_label() for p in dia.samplePoints],
                     numpoints=1, **kw)
    return fig

In [10]:
dfs = load_secoora_ncs(run_name)

# Bin and interpolate all series to 1 hour.
freq = '3H'
for station, df in list(dfs.iteritems()):
    df = df.resample(freq).interpolate().dropna(axis=1)
    if 'OBS_DATA' in df:
        samples = DataFrame.from_dict(dict(std=df.std(),
                                           corr=df.corr()['OBS_DATA']))
    else:
        continue
    samples[samples < 0] = np.NaN
    samples.dropna(inplace=True)
    if len(samples) <= 2:  # 1 obs 1 model.
        continue
    fig = make_taylor(samples)
    fig.savefig(os.path.join(run_name, '{}.png'.format(station)))
    plt.close(fig)