How to use the pyhf.infer function in pyhf

To help you get started, we’ve selected a few pyhf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scikit-hep / pyhf / tests / test_validation.py View on Github external
def validate_hypotest(pdf, data, mu_test, expected_result, tolerance=1e-6):
    init_pars = pdf.config.suggested_init()
    par_bounds = pdf.config.suggested_bounds()

    CLs_obs, CLs_exp_set = pyhf.infer.hypotest(
        mu_test,
        data,
        pdf,
        init_pars,
        par_bounds,
        return_expected_set=True,
        qtilde=False,
    )

    assert abs(CLs_obs - expected_result['obs']) / expected_result['obs'] < tolerance
    for result, expected in zip(CLs_exp_set, expected_result['exp']):
        assert abs(result - expected) / expected < tolerance
github scikit-hep / pyhf / tests / benchmarks / test_benchmark.py View on Github external
def hypotest(pdf, data):
    return pyhf.infer.hypotest(
        1.0,
        data,
        pdf,
        pdf.config.suggested_init(),
        pdf.config.suggested_bounds(),
        return_tail_probs=True,
        return_expected=True,
        return_expected_set=True,
        return_test_statistics=True,
    )
github scikit-hep / pyhf / tests / test_public_api.py View on Github external
def test_hypotest(backend, model_setup):
    model, data, init_pars = model_setup
    mu = 1.0
    pyhf.infer.hypotest(
        mu,
        data,
        model,
        init_pars,
        model.config.suggested_bounds(),
        return_expected_set=True,
        return_test_statistics=True,
    )
github scikit-hep / pyhf / tests / test_regression.py View on Github external
signal_patch_json: The JSON Patch for the signal model

    Returns:
        CLs_obs: The observed CLs value
        CLs_exp: List of the expected CLs value band
    """
    workspace = pyhf.workspace.Workspace(bkgonly_json)
    model = workspace.model(
        measurement_name=None,
        patches=[signal_patch_json],
        modifier_settings={
            'normsys': {'interpcode': 'code4'},
            'histosys': {'interpcode': 'code4p'},
        },
    )
    result = pyhf.infer.hypotest(
        1.0, workspace.data(model), model, qtilde=True, return_expected_set=True
    )
    return result[0].tolist()[0], result[-1].ravel().tolist()
github scikit-hep / pyhf / tests / test_backend_consistency.py View on Github external
data = source['bindata']['data'] + pdf.config.auxdata

    backends = [
        pyhf.tensor.numpy_backend(),
        pyhf.tensor.tensorflow_backend(session=tf.compat.v1.Session()),
        pyhf.tensor.pytorch_backend(),
    ]

    test_statistic = []
    for backend in backends:
        if backend.name == 'tensorflow':
            tf.reset_default_graph()
            backend.session = tf.compat.v1.Session()
        pyhf.set_backend(backend)

        q_mu = pyhf.infer.hypotest(
            1.0,
            data,
            pdf,
            pdf.config.suggested_init(),
            pdf.config.suggested_bounds(),
            return_test_statistics=True,
        )[-1][0]
        test_statistic.append(pyhf.tensorlib.tolist(q_mu))

    # compare to NumPy/SciPy
    test_statistic = np.array(test_statistic)
    numpy_ratio = np.divide(test_statistic, test_statistic[0])
    numpy_ratio_delta_unity = np.absolute(np.subtract(numpy_ratio, 1))

    # compare tensor libraries to each other
    tensors_ratio = np.divide(test_statistic[1], test_statistic[2])
github scikit-hep / pyhf / tests / test_validation.py View on Github external
init_pars_after = pdf_after.config.suggested_init()
    assert init_pars_before == init_pars_after

    par_bounds_before = pdf_before.config.suggested_bounds()
    par_bounds_after = pdf_after.config.suggested_bounds()
    assert par_bounds_before == par_bounds_after

    CLs_obs_before, CLs_exp_set_before = pyhf.infer.hypotest(
        1,
        data_before,
        pdf_before,
        init_pars_before,
        par_bounds_before,
        return_expected_set=True,
    )
    CLs_obs_after, CLs_exp_set_after = pyhf.infer.hypotest(
        1,
        data_after,
        pdf_after,
        init_pars_after,
        par_bounds_after,
        return_expected_set=True,
    )

    tolerance = 1e-6
    assert abs(CLs_obs_after - CLs_obs_before) / CLs_obs_before < tolerance
    for result, expected_result in zip(CLs_exp_set_after, CLs_exp_set_before):
        assert abs(result - expected_result) / expected_result < tolerance
github scikit-hep / pyhf / tests / test_optim.py View on Github external
def test_optim_uncerts(backend, source, spec, mu):
    pdf = pyhf.Model(spec)
    data = source['bindata']['data'] + pdf.config.auxdata

    init_pars = pdf.config.suggested_init()
    par_bounds = pdf.config.suggested_bounds()

    optim = pyhf.optimizer

    result = optim.minimize(pyhf.infer.mle.twice_nll, data, pdf, init_pars, par_bounds)
    assert pyhf.tensorlib.tolist(result)

    result = optim.minimize(
        pyhf.infer.mle.twice_nll,
        data,
        pdf,
        init_pars,
        par_bounds,
        [(pdf.config.poi_index, mu)],
        return_uncertainties=True,
    )
    assert result.shape[1] == 2
    assert pyhf.tensorlib.tolist(result)
github scikit-hep / pyhf / tests / test_optim.py View on Github external
def test_optim_with_value(backend, source, spec, mu):
    pdf = pyhf.Model(spec)
    data = source['bindata']['data'] + pdf.config.auxdata

    init_pars = pdf.config.suggested_init()
    par_bounds = pdf.config.suggested_bounds()

    optim = pyhf.optimizer

    result = optim.minimize(pyhf.infer.mle.twice_nll, data, pdf, init_pars, par_bounds)
    assert pyhf.tensorlib.tolist(result)

    result, fitted_val = optim.minimize(
        pyhf.infer.mle.twice_nll,
        data,
        pdf,
        init_pars,
        par_bounds,
        [(pdf.config.poi_index, mu)],
        return_fitted_val=True,
    )
    assert pyhf.tensorlib.tolist(result)