Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_gaussian_multiple_populations_adpative_population_size(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 1)
nr_populations = 4
population_size = AdaptivePopulationStrategy(600, nr_populations)
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0, sigma_x))]
parameter_perturbation_kernels = [MultivariateNormalTransition()]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(1, probability_to_stay=1),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.2),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"y": y_observed}, 0, {}, options)
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# The prior over the model classes is uniform
model_prior = RV("randint", 0, 2)
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", mu_x_1, sigma)),
Distribution(x=RV("norm", mu_x_2, sigma))]
# Particles are perturbed in a Gaussian fashion
parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]
# We plug all the ABC setup together
nr_populations = 3
population_size = AdaptivePopulationStrategy(400, 3, mean_cv=0.05)
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(2, probability_to_stay=.7),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.2),
population_size,
sampler=sampler)
# Finally we add meta data such as model names and define where to store the results
options = {'db_path': db_path}
# y_observed is the important piece here: our actual observation.
def test_gaussian_single_population(db_path, sampler):
sigma_prior = 1
sigma_ground_truth = 1
observed_data = 1
def model(args):
return {"y": st.norm(args['x'], sigma_ground_truth).rvs()}
models = [model]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 1)
nr_populations = 1
population_size = ConstantPopulationStrategy(600, nr_populations)
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0, sigma_prior))]
parameter_perturbation_kernels = [MultivariateNormalTransition()]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(1, probability_to_stay=1),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.1),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"y": observed_data}, 0, {}, options)
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon)
posterior_x, posterior_weight = history.get_results_distribution(0, "x")
def test_gaussian_multiple_populations_adpative_population_size(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 1)
nr_populations = 4
population_size = AdaptivePopulationStrategy(600, nr_populations)
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0, sigma_x))]
parameter_perturbation_kernels = [MultivariateNormalTransition()]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(1, probability_to_stay=1),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.2),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"y": y_observed}, 0, {}, options)
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon)
posterior_x, posterior_weight = history.get_results_distribution(0, "x")
sort_indices = sp.argsort(posterior_x)
import matplotlib.pyplot as plt
# create and run some model
def model(p):
return {'ss0': p['p0'] + 0.1 * np.random.uniform(),
'ss1': p['p1'] + 0.1 * np.random.uniform()}
p_true = {'p0': 3, 'p1': 4}
observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']}
limits = {'p0': (0, 5), 'p1': (1, 8)}
prior = pyabc.Distribution(**{
key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0])
for key in p_true.keys()})
db_path = "sqlite:///" \
+ os.path.join(tempfile.gettempdir(), "test_visualize.db")
distance = pyabc.PNormDistance(p=2)
n_history = 2
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
for _ in range(n_history):
abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler)
abc.new(db_path, observation)
abc.run(minimum_epsilon=.1, max_nr_populations=3)
def test_gaussian_single_population(db_path, sampler):
sigma_prior = 1
sigma_ground_truth = 1
observed_data = 1
def model(args):
return {"y": st.norm(args['x'], sigma_ground_truth).rvs()}
models = [model]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 1)
nr_populations = 1
population_size = ConstantPopulationStrategy(600, nr_populations)
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0, sigma_prior))]
parameter_perturbation_kernels = [MultivariateNormalTransition()]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(1, probability_to_stay=1),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.1),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"y": observed_data}, 0, {}, options)
minimum_epsilon = -1
def model(parameters):
# sample from a gaussian
y = st.norm(parameters.x, sigma).rvs()
# return the sample as dictionary
return {"y": y}
# We define two models, but they are identical so far
models = [model, model]
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_priors = [
Distribution(x=RV("norm", mu_x_1, sigma)),
Distribution(x=RV("norm", mu_x_2, sigma))
]
# We plug all the ABC setup together with 2 populations maximum
population_strategy = ConstantPopulationStrategy(100, 2)
abc = ABCSMC(models, parameter_priors,
PercentileDistanceFunction(measures_to_use=["y"]),
population_strategy)
# Finally we add meta data such as model names
# and define where to store the results
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "test.db"))
# y_observed is the important piece here: our actual observation.
y_observed = 1