How to use the elfi.Summary function in elfi

To help you get started, we’ve selected a few elfi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github elfi-dev / elfi / tests / old_unit / test_core_persistence.py View on Github external
def run_local_object_cache_test(self, local_store):
        sleep_time = .2
        simfn = get_sleep_simulator(sleep_time)
        sim = elfi.Simulator("sim", simfn, observed=0, store=local_store)
        run_cache_test(sim, sleep_time)
        assert local_store._read_data(sim.id, 0)[0] == 1

        # Test that nodes derived from `sim` benefit from the storing
        summ = elfi.Summary("sum", lambda x : x, sim)
        t0 = timeit.default_timer()
        res = summ.acquire(1).compute()
        td = timeit.default_timer() - t0
        assert td < sleep_time
        assert res[0][0] == 1

        elfi.env.client().shutdown()
github elfi-dev / elfi / tests / old_unit / test_core.py View on Github external
# summary
            def mock_summary(x):
                exp_in_dims = in_dims
                if len(exp_in_dims) == 0:
                    exp_in_dims = (1,)
                if x.shape == (n_samples, ) + exp_in_dims:
                    # simulation data
                    return np.zeros((n_samples,) + out_dims)
                elif x.shape == (1,) + exp_in_dims:
                    # observation data
                    return np.zeros((1,) + out_dims)
                assert False
            # model
            mock = MockSimulator(ret)
            si = elfi.Simulator("si", mock, None, observed=obs)
            su = elfi.Summary("su", mock_summary, si)
            res = su.generate(n_samples).compute()
            exp_out_dims = out_dims
            if len(exp_out_dims) == 0:
                exp_out_dims = (1,)
            assert res.shape == (n_samples,) + exp_out_dims
            elfi.new_inference_task()
github elfi-dev / elfi / elfi / examples / ricker.py View on Github external
if true_params is None:
            true_params = [3.8]

    m = elfi.ElfiModel()
    y_obs = simulator(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))
    sim_fn = partial(simulator, n_obs=n_obs)
    sumstats = []

    if stochastic:
        elfi.Prior(ss.expon, np.e, 2, model=m, name='t1')
        elfi.Prior(ss.truncnorm, 0, 5, model=m, name='t2')
        elfi.Prior(ss.uniform, 0, 100, model=m, name='t3')
        elfi.Simulator(sim_fn, m['t1'], m['t2'], m['t3'], observed=y_obs, name='Ricker')
        sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
        sumstats.append(elfi.Summary(partial(np.var, axis=1), m['Ricker'], name='Var'))
        sumstats.append(elfi.Summary(num_zeros, m['Ricker'], name='#0'))
        elfi.Discrepancy(chi_squared, *sumstats, name='d')

    else:  # very simple deterministic case
        elfi.Prior(ss.expon, np.e, model=m, name='t1')
        elfi.Simulator(sim_fn, m['t1'], observed=y_obs, name='Ricker')
        sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
        elfi.Distance('euclidean', *sumstats, name='d')

    return m
github elfi-dev / elfi / elfi / examples / daycare.py View on Github external
if true_params is None:
        true_params = [3.6, 0.6, 0.1]

    m = elfi.ElfiModel()
    y_obs = daycare(*true_params, random_state=np.random.RandomState(seed_obs), **kwargs)
    sim_fn = partial(daycare, **kwargs)
    priors = []
    sumstats = []

    priors.append(elfi.Prior('uniform', 0, 11, model=m, name='t1'))
    priors.append(elfi.Prior('uniform', 0, 2, model=m, name='t2'))
    priors.append(elfi.Prior('uniform', 0, 1, model=m, name='t3'))

    elfi.Simulator(sim_fn, *priors, observed=y_obs, name='DCC')

    sumstats.append(elfi.Summary(ss_shannon, m['DCC'], name='Shannon'))
    sumstats.append(elfi.Summary(ss_strains, m['DCC'], name='n_strains'))
    sumstats.append(elfi.Summary(ss_prevalence, m['DCC'], name='prevalence'))
    sumstats.append(elfi.Summary(ss_prevalence_multi, m['DCC'], name='multi'))

    elfi.Discrepancy(distance, *sumstats, name='d')

    logger.info("Generated observations with true parameters "
                "t1: %.1f, t2: %.3f, t3: %.1f, ", *true_params)

    return m
github elfi-dev / elfi / elfi / examples / gnk.py View on Github external
# Initialising the prior settings as in Allingham et al. (2009).
    priors = []
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='A'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='B'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='g'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='k'))

    # Obtaining the observations.
    y_obs = GNK(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed))

    # Defining the simulator.
    fn_simulator = partial(GNK, n_obs=n_obs)
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='GNK')

    # Initialising the summary statistics as in Allingham et al. (2009).
    default_ss = elfi.Summary(ss_order, m['GNK'], name='ss_order')

    # Using the multi-dimensional Euclidean distance function as
    # the summary statistics' implementations are designed for multi-dimensional cases.
    elfi.Discrepancy(euclidean_multiss, default_ss, name='d')
    return m
github elfi-dev / elfi / elfi / methods / diagnostics.py View on Github external
n_acc : int
            Number of the accepted parameters.
        batch_size : int
            Number of samples per batch.

        Returns
        -------
        array_like
            Accepted parameters.

        """
        # Initialise the distance function.
        m = self.simulator.model.copy()
        list_ss = []
        for ss in set_ss:
            list_ss.append(elfi.Summary(ss, m[self.simulator.name], model=m))
        if isinstance(self.fn_distance, str):
            d = elfi.Distance(self.fn_distance, *list_ss, model=m)
        else:
            d = elfi.Discrepancy(self.fn_distance, *list_ss, model=m)

        # Run the simulations.
        # TODO: include different distance functions in the summary-statistics combinations.
        sampler_rejection = elfi.Rejection(d, batch_size=batch_size,
                                           seed=self.seed, pool=self.pool)
        result = sampler_rejection.sample(n_acc, n_sim=n_sim)

        # Extract the accepted parameters.
        thetas_acc = result.samples_array
        return thetas_acc
github elfi-dev / elfi / elfi / examples / daycare.py View on Github external
m = elfi.ElfiModel()
    y_obs = daycare(*true_params, random_state=np.random.RandomState(seed_obs), **kwargs)
    sim_fn = partial(daycare, **kwargs)
    priors = []
    sumstats = []

    priors.append(elfi.Prior('uniform', 0, 11, model=m, name='t1'))
    priors.append(elfi.Prior('uniform', 0, 2, model=m, name='t2'))
    priors.append(elfi.Prior('uniform', 0, 1, model=m, name='t3'))

    elfi.Simulator(sim_fn, *priors, observed=y_obs, name='DCC')

    sumstats.append(elfi.Summary(ss_shannon, m['DCC'], name='Shannon'))
    sumstats.append(elfi.Summary(ss_strains, m['DCC'], name='n_strains'))
    sumstats.append(elfi.Summary(ss_prevalence, m['DCC'], name='prevalence'))
    sumstats.append(elfi.Summary(ss_prevalence_multi, m['DCC'], name='multi'))

    elfi.Discrepancy(distance, *sumstats, name='d')

    logger.info("Generated observations with true parameters "
                "t1: %.1f, t2: %.3f, t3: %.1f, ", *true_params)

    return m
github elfi-dev / elfi / elfi / examples / gauss.py View on Github external
n_dim = len(true_params)
        for i in range(n_dim):
            name_prior = 'mu_{}'.format(i)
            prior_mu = elfi.Prior('uniform', true_params[i] - eps_prior,
                                  2 * eps_prior, model=m, name=name_prior)
            priors.append(prior_mu)
    else:
        priors.append(elfi.Prior('uniform', true_params[0] - eps_prior,
                                 2 * eps_prior, model=m, name='mu'))
        priors.append(elfi.Prior('truncnorm', np.amax([.01, true_params[1] - eps_prior]),
                                 2 * eps_prior, model=m, name='sigma'))
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='gauss')

    # Initialising the summary statistics.
    sumstats = []
    sumstats.append(elfi.Summary(ss_mean, m['gauss'], name='ss_mean'))
    sumstats.append(elfi.Summary(ss_var, m['gauss'], name='ss_var'))

    # Choosing the discrepancy metric.
    if nd_mean:
        elfi.Discrepancy(euclidean_multidim, *sumstats, name='d')
    else:
        elfi.Distance('euclidean', *sumstats, name='d')
    return m
github elfi-dev / elfi / elfi / examples / lorenz.py View on Github external
simulator = partial(forecast_lorenz, initial_state=initial_state, f=f, n_obs=n_obs, phi=phi,
                        total_duration=total_duration)

    if not true_params:
        true_params = [2.0, 0.1]

    m = elfi.ElfiModel()

    y_obs = simulator(*true_params, random_state=np.random.RandomState(seed_obs))
    sumstats = []

    elfi.Prior('uniform', 0.5, 3., model=m, name='theta1')
    elfi.Prior('uniform', 0, 0.3, model=m, name='theta2')
    elfi.Simulator(simulator, m['theta1'], m['theta2'], observed=y_obs, name='Lorenz')

    sumstats.append(elfi.Summary(mean, m['Lorenz'], name='Mean'))

    sumstats.append(elfi.Summary(var, m['Lorenz'], name='Var'))

    sumstats.append(elfi.Summary(autocov, m['Lorenz'], name='Autocov'))

    sumstats.append(elfi.Summary(cov, m['Lorenz'], name='Cov'))

    sumstats.append(elfi.Summary(xcov, m['Lorenz'], True, name='CrosscovPrev'))

    sumstats.append(elfi.Summary(xcov, m['Lorenz'], False, name='CrosscovNext'))

    elfi.Distance('euclidean', *sumstats, name='d')

    return m