Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Function which converts a dictionary of sampled parameter to a
dictionary of parameters of the population model.
max_samples: int, optional
Maximum number of samples to use from each set.
cupy: bool
If True and a compatible CUDA environment is available,
cupy will be used for performance.
Note: this requires setting up your hyper_prior properly.
"""
if cupy and not CUPY_LOADED:
logger.warning("Cannot import cupy, falling back to numpy.")
self.samples_per_posterior = max_samples
self.data = self.resample_posteriors(posteriors, max_samples=max_samples)
if not isinstance(hyper_prior, Model):
hyper_prior = Model([hyper_prior])
self.hyper_prior = hyper_prior
Likelihood.__init__(self, hyper_prior.parameters)
if sampling_prior is not None:
raise ValueError(
"Passing a sampling_prior is deprecated and will be removed "
"in the next release. This should be passed as a 'prior' "
"column in the posteriors."
)
elif "prior" in self.data:
self.sampling_prior = self.data.pop("prior")
else:
logger.info("No prior values provided, defaulting to 1.")
self.sampling_prior = 1
def __init__(self, model, data):
self.vts = data.pop("vt")
self.data = data
if isinstance(model, list):
model = Model(model)
elif not isinstance(model, Model):
model = Model([model])
self.model = model
self.values = {key: xp.unique(self.data[key]) for key in self.data}
shape = np.array(list(self.data.values())[0].shape)
lens = {key: len(self.values[key]) for key in self.data}
self.axes = {int(np.where(shape == lens[key])[0]): key for key in self.data}
self.ndim = len(self.axes)
dictionary of parameters of the population model.
max_samples: int, optional
Maximum number of samples to use from each set.
cupy: bool
If True and a compatible CUDA environment is available,
cupy will be used for performance.
Note: this requires setting up your hyper_prior properly.
"""
if cupy and not CUPY_LOADED:
logger.warning("Cannot import cupy, falling back to numpy.")
self.samples_per_posterior = max_samples
self.data = self.resample_posteriors(posteriors, max_samples=max_samples)
if not isinstance(hyper_prior, Model):
hyper_prior = Model([hyper_prior])
self.hyper_prior = hyper_prior
Likelihood.__init__(self, hyper_prior.parameters)
if sampling_prior is not None:
raise ValueError(
"Passing a sampling_prior is deprecated and will be removed "
"in the next release. This should be passed as a 'prior' "
"column in the posteriors."
)
elif "prior" in self.data:
self.sampling_prior = self.data.pop("prior")
else:
logger.info("No prior values provided, defaulting to 1.")
self.sampling_prior = 1
if ln_evidences is not None:
def __init__(self, model, data):
self.vts = data.pop("vt")
self.data = data
if isinstance(model, list):
model = Model(model)
elif not isinstance(model, Model):
model = Model([model])
self.model = model
self.values = {key: xp.unique(self.data[key]) for key in self.data}
shape = np.array(list(self.data.values())[0].shape)
lens = {key: len(self.values[key]) for key in self.data}
self.axes = {int(np.where(shape == lens[key])[0]): key for key in self.data}
self.ndim = len(self.axes)
def __init__(self, model, data):
self.vts = data.pop("vt")
self.data = data
if isinstance(model, list):
model = Model(model)
elif not isinstance(model, Model):
model = Model([model])
self.model = model
self.values = {key: xp.unique(self.data[key]) for key in self.data}
shape = np.array(list(self.data.values())[0].shape)
lens = {key: len(self.values[key]) for key in self.data}
self.axes = {int(np.where(shape == lens[key])[0]): key for key in self.data}
self.ndim = len(self.axes)
np.random.choice(
range(self.samples_per_posterior),
size=self.samples_per_posterior,
replace=True,
p=to_numpy(weights[ii]),
)
)
new_samples = {
key: xp.vstack(
[self.data[key][ii, new_idxs[ii]] for ii in range(self.n_posteriors)]
)
for key in self.data
}
event_weights = list(event_weights)
weight_string = " ".join([f"{float(weight):.1f}" for weight in event_weights])
logger.info(f"Resampling done, sum of weights for events are {weight_string}")
if return_weights:
return new_samples, weights
else:
return new_samples
----------
posteriors: list
List of pandas DataFrame objects.
max_samples: int, opt
Maximum number of samples to take from each posterior,
default is length of shortest posterior chain.
Returns
-------
data: dict
Dictionary containing arrays of size (n_posteriors, max_samples)
There is a key for each shared key in posteriors.
"""
for posterior in posteriors:
max_samples = min(len(posterior), max_samples)
data = {key: [] for key in posteriors[0]}
logger.debug(f"Downsampling to {max_samples} samples per posterior.")
self.samples_per_posterior = max_samples
for posterior in posteriors:
temp = posterior.sample(self.samples_per_posterior)
for key in data:
data[key].append(temp[key])
for key in data:
data[key] = xp.array(data[key])
return data
evidences will be set to 0. This produces a Bayes factor between
the sampling power_prior and the hyperparameterised model.
selection_function: func
Function which evaluates your population selection function.
conversion_function: func
Function which converts a dictionary of sampled parameter to a
dictionary of parameters of the population model.
max_samples: int, optional
Maximum number of samples to use from each set.
cupy: bool
If True and a compatible CUDA environment is available,
cupy will be used for performance.
Note: this requires setting up your hyper_prior properly.
"""
if cupy and not CUPY_LOADED:
logger.warning("Cannot import cupy, falling back to numpy.")
self.samples_per_posterior = max_samples
self.data = self.resample_posteriors(posteriors, max_samples=max_samples)
if not isinstance(hyper_prior, Model):
hyper_prior = Model([hyper_prior])
self.hyper_prior = hyper_prior
Likelihood.__init__(self, hyper_prior.parameters)
if sampling_prior is not None:
raise ValueError(
"Passing a sampling_prior is deprecated and will be removed "
"in the next release. This should be passed as a 'prior' "
"column in the posteriors."
)
elif "prior" in self.data:
if not isinstance(hyper_prior, Model):
hyper_prior = Model([hyper_prior])
self.hyper_prior = hyper_prior
Likelihood.__init__(self, hyper_prior.parameters)
if sampling_prior is not None:
raise ValueError(
"Passing a sampling_prior is deprecated and will be removed "
"in the next release. This should be passed as a 'prior' "
"column in the posteriors."
)
elif "prior" in self.data:
self.sampling_prior = self.data.pop("prior")
else:
logger.info("No prior values provided, defaulting to 1.")
self.sampling_prior = 1
if ln_evidences is not None:
self.total_noise_evidence = np.sum(ln_evidences)
else:
self.total_noise_evidence = np.nan
self.conversion_function = conversion_function
self.selection_function = selection_function
self.n_posteriors = len(posteriors)
import numpy as np
import pandas as pd
from tqdm import tqdm
from bilby.core.utils import logger
from bilby.core.likelihood import Likelihood
from bilby.hyper.model import Model
from .cupy_utils import CUPY_LOADED, to_numpy, xp
INF = xp.nan_to_num(xp.inf)
class HyperparameterLikelihood(Likelihood):
"""
A likelihood for inferring hyperparameter posterior distributions with
including selection effects.
See Eq. (34) of https://arxiv.org/abs/1809.02293 for a definition.
Parameters
----------
posteriors: list
An list of pandas data frames of samples sets of samples.
Each set may have a different size.
hyper_prior: `bilby.hyper.model.Model`
The population model, this can alternatively be a function.
sampling_prior: `bilby.hyper.model.Model`
The sampling prior, this can alternatively be a function.
log_evidences: list, optional