Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
default: True
** metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
pers (xarray object): Results of persistence forecast with the input metric
applied.
Reference:
* Chapter 8 (Short-Term Climate Prediction) in Van den Dool, Huug.
Empirical methods in short-term climate prediction.
Oxford University Press, 2007.
"""
# Check that init is int, cftime, or datetime; convert ints or cftime to datetime.
hind = convert_time_index(hind, 'init', 'hind[init]')
verif = convert_time_index(verif, 'time', 'verif[time]')
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(hind)
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
# get class metric(Metric)
metric = get_metric_class(metric, DETERMINISTIC_HINDCAST_METRICS)
if metric.probabilistic:
raise ValueError(
'probabilistic metric ',
metric.name,
'cannot compute persistence forecast.',
)
def __init__(self, xobj):
if isinstance(xobj, xr.DataArray):
# makes applying prediction functions easier, etc.
xobj = xobj.to_dataset()
has_dims(xobj, ['init', 'lead'], 'PredictionEnsemble')
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, 'init', 'xobj[init]')
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(xobj)
# Add initialized dictionary and reserve sub-dictionary for an uninitialized
# run.
self._datasets = {'initialized': xobj, 'uninitialized': {}}
self.kind = 'prediction'
self._temporally_smoothed = None
self._is_annual_lead = None
def add_observations(self, xobj, name):
"""Add a verification data with which to verify the initialized ensemble.
Args:
xobj (xarray object): Dataset/DataArray to append to the
``HindcastEnsemble`` object.
name (str): Short name for referencing the verification data.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets['initialized'], xobj)
match_initialized_vars(self._datasets['initialized'], xobj)
# Check that time is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, 'time', 'xobj[init]')
# For some reason, I could only get the non-inplace method to work
# by updating the nested dictionaries separately.
datasets_obs = self._datasets['observations'].copy()
datasets = self._datasets.copy()
datasets_obs.update({name: xobj})
datasets.update({'observations': datasets_obs})
return self._construct_direct(datasets, kind='hindcast')
- same_verif: slice to a common/consistent verification time frame prior to
computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
add_attrs (bool): write climpred compute args to attrs. default: True
**metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
result (xarray object):
Verification metric over ``lead`` reduced by dimension(s) ``dim``.
"""
metric, comparison, dim = _get_metric_comparison_dim(
metric, comparison, dim, kind='hindcast'
)
hind = convert_time_index(hind, 'init', 'hind[init]')
verif = convert_time_index(verif, 'time', 'verif[time]')
has_valid_lead_units(hind)
forecast, verif = comparison.function(hind, verif, metric=metric)
# think in real time dimension: real time = init + lag
forecast = forecast.rename({'init': 'time'})
inits, verif_dates = return_inits_and_verif_dates(
forecast, verif, alignment=alignment
)
log_compute_hindcast_header(metric, comparison, dim, alignment)
metric_over_leads = [
_apply_metric_at_given_lead(
verif,
- same_inits: slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be based
on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior to
computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
add_attrs (bool): write climpred compute args to attrs. default: True
** metric_kwargs (dict): additional keywords to be passed to metric
Returns:
u (xarray object): Results from comparison at the first lag.
"""
# Check that init is int, cftime, or datetime; convert ints or cftime to datetime.
hind = convert_time_index(hind, 'init', 'hind[init]')
uninit = convert_time_index(uninit, 'time', 'uninit[time]')
verif = convert_time_index(verif, 'time', 'verif[time]')
has_valid_lead_units(hind)
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
comparison = get_comparison_class(comparison, HINDCAST_COMPARISONS)
metric = get_metric_class(metric, DETERMINISTIC_HINDCAST_METRICS)
forecast, verif = comparison.function(uninit, verif, metric=metric)
hind = hind.rename({'init': 'time'})
_, verif_dates = return_inits_and_verif_dates(hind, verif, alignment=alignment)
plag = []
``verif`` to a common time frame at each lead.
- same_inits: slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be based
on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior to
computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
add_attrs (bool): write climpred compute args to attrs. default: True
** metric_kwargs (dict): additional keywords to be passed to metric
Returns:
u (xarray object): Results from comparison at the first lag.
"""
# Check that init is int, cftime, or datetime; convert ints or cftime to datetime.
hind = convert_time_index(hind, 'init', 'hind[init]')
uninit = convert_time_index(uninit, 'time', 'uninit[time]')
verif = convert_time_index(verif, 'time', 'verif[time]')
has_valid_lead_units(hind)
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
comparison = get_comparison_class(comparison, HINDCAST_COMPARISONS)
metric = get_metric_class(metric, DETERMINISTIC_HINDCAST_METRICS)
forecast, verif = comparison.function(uninit, verif, metric=metric)
hind = hind.rename({'init': 'time'})
_, verif_dates = return_inits_and_verif_dates(hind, verif, alignment=alignment)
** metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
pers (xarray object): Results of persistence forecast with the input metric
applied.
Reference:
* Chapter 8 (Short-Term Climate Prediction) in Van den Dool, Huug.
Empirical methods in short-term climate prediction.
Oxford University Press, 2007.
"""
# Check that init is int, cftime, or datetime; convert ints or cftime to datetime.
hind = convert_time_index(hind, 'init', 'hind[init]')
verif = convert_time_index(verif, 'time', 'verif[time]')
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(hind)
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
# get class metric(Metric)
metric = get_metric_class(metric, DETERMINISTIC_HINDCAST_METRICS)
if metric.probabilistic:
raise ValueError(
'probabilistic metric ',
metric.name,
'cannot compute persistence forecast.',
)
# If lead 0, need to make modifications to get proper persistence, since persistence
metric. This philosophy follows the thought that each lead should be based
on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior to
computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
add_attrs (bool): write climpred compute args to attrs. default: True
** metric_kwargs (dict): additional keywords to be passed to metric
Returns:
u (xarray object): Results from comparison at the first lag.
"""
# Check that init is int, cftime, or datetime; convert ints or cftime to datetime.
hind = convert_time_index(hind, 'init', 'hind[init]')
uninit = convert_time_index(uninit, 'time', 'uninit[time]')
verif = convert_time_index(verif, 'time', 'verif[time]')
has_valid_lead_units(hind)
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
comparison = get_comparison_class(comparison, HINDCAST_COMPARISONS)
metric = get_metric_class(metric, DETERMINISTIC_HINDCAST_METRICS)
forecast, verif = comparison.function(uninit, verif, metric=metric)
hind = hind.rename({'init': 'time'})
_, verif_dates = return_inits_and_verif_dates(hind, verif, alignment=alignment)
plag = []
# TODO: Refactor this, getting rid of `compute_uninitialized` completely.
def add_uninitialized(self, xobj):
"""Add a companion uninitialized ensemble for comparison to verification data.
Args:
xobj (xarray object): Dataset/DataArray of the uninitialzed
ensemble.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets['initialized'], xobj, uninitialized=True)
match_initialized_vars(self._datasets['initialized'], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, 'time', 'xobj[init]')
datasets = self._datasets.copy()
datasets.update({'uninitialized': xobj})
return self._construct_direct(datasets, kind='hindcast')
def add_control(self, xobj):
"""Add the control run that initialized the climate prediction
ensemble.
Args:
xobj (xarray object): Dataset/DataArray of the control run.
"""
# NOTE: These should all be decorators.
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets['initialized'], xobj)
match_initialized_vars(self._datasets['initialized'], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, 'time', 'xobj[init]')
datasets = self._datasets.copy()
datasets.update({'control': xobj})
return self._construct_direct(datasets, kind='perfect')