Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test(self):
B1 = Binomial([10, 0.2])
N1 = Normal([0.03, 0.01])
N2 = Normal([0.1, N1])
graph1 = Normal([B1, N2])
graph2 = Normal([1, N2])
statistics_calculator = Identity(degree=2, cross=False)
distance_calculator = LogReg(statistics_calculator)
backend = Backend()
sampler = RejectionABC([graph1, graph2], [distance_calculator, distance_calculator], backend)
rng = np.random.RandomState(1)
sampler.sample_from_prior(rng=rng)
y_sim = sampler.simulate(1, rng=rng)
self.assertTrue(isinstance(y_sim, list))
self.assertTrue(len(y_sim)==2)
self.assertTrue(isinstance(y_sim[0][0], np.ndarray))
def test(self):
model = Normal([1, 0.1])
Manager = AcceptedParametersManager([model])
backend = Backend()
Manager.update_kernel_values(backend, [1])
self.assertEqual(Manager.kernel_parameters_bds.value(),[1])
def test(self):
B1 = Binomial([10, 0.2])
N1 = Normal([0.03, 0.01])
N2 = Normal([0.1, N1])
graph1 = Normal([B1, N2])
graph2 = Normal([1, N2])
statistics_calculator = Identity(degree=2, cross=False)
distance_calculator = LogReg(statistics_calculator)
backend = Backend()
sampler = RejectionABC([graph1, graph2], [distance_calculator, distance_calculator], backend)
rng = np.random.RandomState(1)
sampler.sample_from_prior(rng=rng)
y_sim = sampler.simulate(1, rng=rng)
self.assertTrue(isinstance(y_sim, list))
self.assertTrue(len(y_sim)==2)
self.assertTrue(isinstance(y_sim[0][0], np.ndarray))
'''
If an exception is raised in a setUpModule then none of
the tests in the module will be run.
This is useful because the slaves run in a while loop on initialization
only responding to the master's commands and will never execute anything else.
On termination of master, the slaves call quit() that raises a SystemExit().
Because of the behaviour of setUpModule, it will not run any unit tests
for the slave and we now only need to write unit-tests from the master's
point of view.
'''
global rank,backend_mpi
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
backend_mpi = BackendMPI()
'''
If an exception is raised in a setUpModule then none of
the tests in the module will be run.
This is useful because the teams run in a while loop on initialization
only responding to the scheduler's commands and will never execute anything else.
On termination of scheduler, the teams call quit() that raises a SystemExit().
Because of the behaviour of setUpModule, it will not run any unit tests
for the team and we now only need to write unit-tests from the scheduler's
point of view.
'''
global rank,backend_mpi
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
backend_mpi = BackendMPI(process_per_model=2)
def test_Raises(self):
N1 = Normal([0.1, 0.01])
N2 = Normal([0.3, N1])
kernel = MultivariateNormalKernel([N1,N2,N1])
with self.assertRaises(ValueError):
JointPerturbationKernel([kernel])
def setUp(self):
self.stat_calc1 = Identity(degree = 1, cross = 0)
self.stat_calc2 = Identity(degree= 1, cross = 0)
self.likfun1 = SynLikelihood(self.stat_calc1)
self.likfun2 = SynLikelihood(self.stat_calc2)
## Define Models
# define a uniform prior distribution
self.mu = Uniform([[-5.0], [5.0]], name='mu')
self.sigma = Uniform([[0.0], [10.0]], name='sigma')
# define a Gaussian model
self.model1 = Normal([self.mu,self.sigma])
self.model2 = Normal([self.mu,self.sigma])
#Check whether wrong sized distnacefuncs gives an error
self.assertRaises(ValueError, ProductCombination, [self.model1,self.model2], [self.likfun1])
self.jointapprox_lhd = ProductCombination([self.model1, self.model2], [self.likfun1, self.likfun2])
def test_sample(self):
# setup backend
dummy = BackendDummy()
# define a uniform prior distribution
mu = Uniform([[-5.0], [5.0]], name='mu')
sigma = Uniform([[0.0], [10.0]], name='sigma')
# define a Gaussian model
self.model = Normal([mu,sigma])
# define sufficient statistics for the model
stat_calc = Identity(degree=2, cross=0)
# define a distance function
dist_calc = Euclidean(stat_calc)
# create fake observed data
y_obs = [np.array(9.8)]
# use the rejection sampling scheme
sampler = RejectionABC([self.model], [dist_calc], dummy, seed = 1)
journal = sampler.sample([y_obs], 10, 1, 10)
mu_sample = np.array(journal.get_parameters()['mu'])
def setUp(self):
# define prior and model
sigma = Uniform([[10], [20]])
mu = Normal([0, 1])
self.Y = Normal([mu, sigma])
# define backend
self.backend = Backend()
# define statistics
self.statistics_cal = Identity(degree=3, cross=False)
if has_torch:
# Initialize statistics learning
self.statisticslearning = TripletDistanceLearning([self.Y], self.statistics_cal, self.backend,
n_samples=100, n_samples_per_param=1, seed=1, n_epochs=10)
def setUp(self):
# find spark and initialize it
self.backend = BackendDummy()
# define a uniform prior distribution
mu = Uniform([[-5.0], [5.0]], name='mu')
sigma = Uniform([[0.0], [10.0]], name='sigma')
# define a Gaussian model
self.model = Normal([mu, sigma])
# define a distance function
stat_calc = Identity(degree=2, cross=0)
self.dist_calc = Euclidean(stat_calc)
# create fake observed data
#self.observation = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist()
self.observation = [np.array(9.8)]