Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_interface_network_init():
"""
test limitation of interface to certain networks
"""
path_header_values = os.path.join(os.path.dirname(__file__),
'..', 'test-data', 'ismn', 'multinetwork', 'header_values')
hv_interface = interface.ISMN_Interface(
path_header_values, network=['SCAN'])
assert hv_interface.list_networks().size == 1
assert hv_interface.list_networks()[0] == 'SCAN'
hv_interface = interface.ISMN_Interface(
path_header_values, network=['SCAN', 'MAQU'])
assert hv_interface.list_networks().size == 2
def test_df_tcol_old_vs_new():
# compare the results of the old and new implementation
n = 3
df = create_testdata(n, np.array(list(range(n))))
new_err = df_metrics.tcol_error(df)
df = df.iloc[:, :n]
old_err_x, old_err_y, old_err_z = df_metrics.old_tcol_error(df)
print(new_err)
print(old_err_x, old_err_y, old_err_z)
assert old_err_x == new_err[0].ds0
assert old_err_y == new_err[0].ds1
assert old_err_z == new_err[0].ds2
def test_apply():
df = pd.DataFrame(index=pd.date_range(start='2000-01-01', end='2000-12-31', freq='D'),
data={'ds0': np.repeat(0, 366), 'ds1': np.repeat(1, 366)})
bias_matrix_old = df_metrics.pairwise_apply(df, bias)
bias_matrix_new = df_metrics.nwise_apply(df, bias, n=2, as_df=True)
assert bias_matrix_old.equals(bias_matrix_new)
# check if dict implementation and matrix implementation have same result
bias_new = df_metrics.nwise_apply(df, bias, n=2, as_df=False)
for i, v in bias_new.items():
assert bias_matrix_new.loc[i] == v
def df_snr(realdata=False, n=3):
if realdata:
df = read_realdata(n=n).dropna()
else:
df = create_testdata(n, np.array(list(range(n))))
snr, err, beta = df_metrics.tcol_snr(df, ref_ind=0)
print('snr')
pprint(snr)
print('err')
pprint(err)
print('beta')
pprint(beta)
print('------------------')
if n == 3:
old_snr, old_err, old_beta = \
metrics.tcol_snr(df.iloc[:,0].values,
df.iloc[:,1].values,
df.iloc[:,2].values)
print('old_snr')
pprint(old_snr)
print('old_err')
def test_df_snr_err():
n = 4
df = create_testdata(n, np.array(list(range(n))))
snr, err, beta = df_metrics.tcol_snr(df, ref_ind=0)
assert len(snr) == len(err) == len(beta) == n
# field names
assert snr[0]._fields == err[0]._fields == beta[0]._fields == ('ds0', 'ds1', 'ds2')
assert snr[1]._fields == err[1]._fields == beta[1]._fields == ('ds0', 'ds1', 'ds3')
assert snr[2]._fields == err[2]._fields == beta[2]._fields == ('ds0', 'ds2', 'ds3')
assert snr[3]._fields == err[3]._fields == beta[3]._fields == ('ds1', 'ds2', 'ds3')
# test some values
np.testing.assert_almost_equal(snr[0].ds0, -7.9553239335)
np.testing.assert_almost_equal(err[1].ds0, 0.2511626266)
np.testing.assert_almost_equal(beta[2].ds0, 1.) # must be 1 as there is no bias
np.testing.assert_almost_equal(snr[3].ds3, np.nan)
def test_n_combinations():
coll = [1,2,3,4]
combs = df_metrics.n_combinations(coll, n=2, must_include=[1], permutations=False)
assert combs == [(1,2), (1,3), (1,4)]
coll = [1, 2, 3, 4]
combs = df_metrics.n_combinations(coll, n=3, permutations=False)
assert combs == [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]
def test_BasicSeasonalMetrics():
"""
Test BasicSeasonalMetrics.
"""
df = make_some_data()
data = df[['ref', 'k1']]
with warnings.catch_warnings():
warnings.simplefilter("ignore") # many warnings due to test data
metriccalc = MonthsMetricsAdapter(BasicMetrics(other_name='k1'))
res = metriccalc.calc_metrics(data, gpi_info=(0, 0, 0))
should = dict(ALL_n_obs=np.array([366]), dtype='float32')
assert res['ALL_n_obs'] == should['ALL_n_obs']
assert np.isnan(res['ALL_rho'])
def test_BasicMetrics_calculator_metadata():
"""
Test BasicMetrics with metadata.
"""
df = make_some_data()
data = df[['ref', 'k1']]
metadata_dict_template = {'network': np.array(['None'], dtype='U256')}
with warnings.catch_warnings():
warnings.simplefilter("ignore") # many warnings due to test data
metriccalc = BasicMetrics(other_name='k1', calc_tau=False,
metadata_template=metadata_dict_template)
res = metriccalc.calc_metrics(
data, gpi_info=(0, 0, 0, {'network': 'SOILSCAPE'}))
should = dict(network=np.array(['SOILSCAPE'], dtype='U256'),
n_obs=np.array([366]), RMSD=np.array([0.2], dtype='float32'),
BIAS=np.array([-0.2], dtype='float32'), dtype='float32')
assert res['n_obs'] == should['n_obs']
assert np.isnan(res['rho'])
assert res['RMSD'] == should['RMSD']
assert res['BIAS'] == should['BIAS']
assert res['network'] == should['network']
assert np.isnan(res['R'])
# depends on scipy version changed after v1.2.1
def test_BasicSeasonalMetrics_metadata():
"""
Test BasicSeasonalMetrics with metadata.
"""
df = make_some_data()
data = df[['ref', 'k1']]
metadata_dict_template = {'network': np.array(['None'], dtype='U256')}
with warnings.catch_warnings():
warnings.simplefilter("ignore") # many warnings due to test data
metriccalc = MonthsMetricsAdapter(BasicMetrics(
other_name='k1', metadata_template=metadata_dict_template))
res = metriccalc.calc_metrics(
data, gpi_info=(0, 0, 0, {'network': 'SOILSCAPE'}))
assert res['network'] == np.array(['SOILSCAPE'], dtype='U256')
'rho': np.array([1.], dtype=np.float32),
'lat': np.array([4.]),
'R': np.array([1.], dtype=np.float32),
'p_R': np.array([0.], dtype=np.float32)}}
datasets = setup_TestDatasets()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(
dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))