How to use the pytesmo.metrics function in pytesmo

To help you get started, we’ve selected a few pytesmo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github TUW-GEO / pytesmo / tests / tc_metrics.py View on Github external
def old_pairwise_apply():
    df = create_testdata(3, np.array([1., 2., 3.]))
    method = metrics.pearsonr
    result = df_metrics.pairwise_apply(df, method, comm=False)
    named_tupel = df_metrics._to_namedtuple(result[0], 'R')
    return named_tupel
github TUW-GEO / pytesmo / docs / examples / 00_more_examples / compare_ISMN_ASCAT.py View on Github external
plt.ylabel(label_insitu)
        plt.show()
        
        # calculate correlation coefficients, RMSD, bias, Nash Sutcliffe
        x, y = scaled_data[label_ascat].values, scaled_data[label_insitu].values
        
        print("ISMN time series:", ISMN_time_series)
        print("compared to")
        print(ascat_time_series)
        print("Results:")
        
        # df_metrics takes a DataFrame as input and automatically
        # calculates the metric on all combinations of columns
        # returns a named tuple for easy printing
        print(df_metrics.pearsonr(scaled_data))
        print("Spearman's (rho,p_value)", metrics.spearmanr(x, y))
        print("Kendalls's (tau,p_value)", metrics.kendalltau(x, y))
        print(df_metrics.kendalltau(scaled_data))
        print(df_metrics.rmsd(scaled_data))
        print("Bias", metrics.bias(x, y))
        print("Nash Sutcliffe", metrics.nash_sutcliffe(x, y))
        
        
    i += 1

    # only show the first 2 stations, otherwise this program would run a long time
    # and produce a lot of plots
    if i >= 2:
        break
github TUW-GEO / pytesmo / src / pytesmo / validation_framework / metric_calculators.py View on Github external
def calc_metrics(self, data, gpi_info):
        dataset = super(BasicMetricsPlusMSE, self).calc_metrics(data, gpi_info)
        if len(data) < 10:
            return dataset
        x, y = data['ref'].values, data[self.other_name].values
        mse, mse_corr, mse_bias, mse_var = metrics.mse(x, y)
        dataset['mse'][0] = mse
        dataset['mse_corr'][0] = mse_corr
        dataset['mse_bias'][0] = mse_bias
        dataset['mse_var'][0] = mse_var

        return dataset
github TUW-GEO / pytesmo / examples / compare_ISMN_ASCAT.py View on Github external
plt.ylabel(label_insitu)
        plt.show()
        
        # calculate correlation coefficients, RMSD, bias, Nash Sutcliffe
        x, y = scaled_data[label_ascat].values, scaled_data[label_insitu].values
        
        print "ISMN time series:", ISMN_time_series
        print "compared to"
        print ascat_time_series
        print "Results:"
        
        # df_metrics takes a DataFrame as input and automatically
        # calculates the metric on all combinations of columns
        # returns a named tuple for easy printing
        print df_metrics.pearsonr(scaled_data)
        print "Spearman's (rho,p_value)", metrics.spearmanr(x, y)
        print "Kendalls's (tau,p_value)", metrics.kendalltau(x, y)
        print df_metrics.kendalltau(scaled_data)
        print df_metrics.rmsd(scaled_data)
        print "Bias", metrics.bias(x, y)
        print "Nash Sutcliffe", metrics.nash_sutcliffe(x, y)
        
        
    i += 1
    
    # only show the first 2 stations, otherwise this program would run a long time
    # and produce a lot of plots
    if i >= 2:
        break
github TUW-GEO / pytesmo / src / pytesmo / validation_framework / metric_calculators.py View on Github external
of (gpi, lon, lat)

        Notes
        -----
        Kendall tau is calculation is optional at the moment
        because the scipy implementation is very slow which is problematic for
        global comparisons

        """
        dataset = super(BasicMetrics, self).calc_metrics(data, gpi_info)

        if len(data) < 10:
            return dataset

        x, y = data['ref'].values, data[self.other_name].values
        R, p_R = metrics.pearsonr(x, y)
        rho, p_rho = metrics.spearmanr(x, y)
        RMSD = metrics.rmsd(x, y)
        BIAS = metrics.bias(x, y)

        dataset['R'][0], dataset['p_R'][0] = R, p_R
        dataset['rho'][0], dataset['p_rho'][0] = rho, p_rho
        dataset['RMSD'][0] = RMSD
        dataset['BIAS'][0] = BIAS
        dataset['n_obs'][0] = len(data)

        if self.calc_tau:
            tau, p_tau = metrics.kendalltau(x, y)
            dataset['tau'][0], dataset['p_tau'][0] = tau, p_tau

        return dataset