Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def constant(x, dtype=_ztypes.float):
return tf.constant(x, dtype)
def __init__(self, name, value, dtype=ztypes.float):
super().__init__(name=name, params={}, dtype=dtype)
static_value = tf.get_static_value(value, partial=True)
if static_value is None:
raise RuntimeError("Cannot convert input to static value. If you encounter this, please open a bug report"
" on Github: https://github.com/zfit/zfit")
self._value_np = static_value
self._value = tf.guarantee_const(tf.convert_to_tensor(value, dtype=dtype))
def to_complex(number, dtype=ztypes.complex):
return tf.cast(number, dtype=dtype)
def __init__(self, obs: ztyping.ObsTypeInput, params: Union[Dict[str, ZfitParameter], None] = None,
name: str = "BaseModel", dtype=ztypes.float,
**kwargs):
"""The base model to inherit from and overwrite `_unnormalized_pdf`.
Args:
dtype (DType): the dtype of the model
name (str): the name of the model
params (Dict(str, :py:class:`~zfit.Parameter`)): A dictionary with the internal name of the parameter and
the parameters itself the model depends on
"""
super().__init__(name=name, dtype=dtype, params=params, obs=obs, **kwargs)
# self._check_set_space(obs)
self._integration = zcontainer.DotDict()
self._integration.auto_numeric_integrator = self._DEFAULTS_integration.auto_numeric_integrator
self.integration = Integration(mc_sampler=self._DEFAULTS_integration.mc_sampler,
draws_per_dim=self._DEFAULTS_integration.draws_per_dim)
def lower(self, value):
if value is None and self._lower_limit_neg_inf is None:
self._lower_limit_neg_inf = tf.cast(-np.infty, dtype=ztypes.float)
self._lower_limit = value
def constant(value, dtype=ztypes.float, shape=None, name="Const", verify_shape=None):
# TODO(tf2): remove this legacy thing below
if verify_shape is not None:
raise RuntimeError("'verify_shape' is not a valid argument anymore. It's always true. Please remove.")
return tf.constant(value, dtype=dtype, shape=shape, name=name)
def _auto_upcast(tensor: tf.Tensor):
if isinstance(tensor, tf.Tensor):
new_dtype = ztypes[tensor.dtype]
if new_dtype != tensor.dtype:
tensor = tf.cast(x=tensor, dtype=new_dtype)
return tensor
def accept_reject_sample(prob: Callable, n: int, limits: Space,
sample_and_weights_factory: Callable = UniformSampleAndWeights,
dtype=ztypes.float, prob_max: Union[None, int] = None,
efficiency_estimation: float = 1.0) -> tf.Tensor:
"""Accept reject sample from a probability distribution.
Args:
prob (function): A function taking x a Tensor as an argument and returning the probability
(or anything that is proportional to the probability).
n (int): Number of samples to produce
limits (:py:class:`~zfit.Space`): The limits to sample from
sample_and_weights_factory (Callable): A factory function that returns the following function:
A function that returns the sample to insert into `prob` and the weights
(probability density) of each sample together with the random thresholds. The API looks as follows:
- Parameters:
- n_to_produce (int, tf.Tensor): The number of events to produce (not exactly).
- limits (Space): the limits in which the samples will be.
def body(batch_num, mean):
if mc_sampler == tfp.mcmc.sample_halton_sequence:
start_idx = batch_num * batch_size
end_idx = start_idx + batch_size
indices = tf.range(start_idx, end_idx, dtype=tf.int32)
sample = mc_sampler(space.n_obs, sequence_indices=indices,
dtype=ztypes.float, randomized=False)
else:
sample = mc_sampler(shape=(batch_size, space.n_obs), dtype=ztypes.float)
sample = tf.guarantee_const(sample)
sample = (np.array(upper[0]) - np.array(lower[0])) * sample + lower[0]
sample = tf.transpose(a=sample)
sample = func(sample)
sample = tf.guarantee_const(sample)
batch_mean = tf.reduce_mean(input_tensor=sample)
batch_mean = tf.guarantee_const(batch_mean)
# with tf.control_dependencies([batch_mean]):
err_weight = 1 / tf.cast(batch_num + 1, dtype=tf.float64)
# err_weight /= err_weight + 1
# print_op = tf.print(batch_mean)
do_print = False
if do_print: