How to use the pgmpy.utils._check_1d_array_object function in pgmpy

To help you get started, we’ve selected a few pgmpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pgmpy / pgmpy / pgmpy / sampling / HMC.py View on Github external
>>> mean = np.array([1, 1])
        >>> covariance = np.array([[1, 0.7], [0.7, 3]])
        >>> model = JGD(['x', 'y'], mean, covariance)
        >>> sampler = HMCda(model=model, grad_log_pdf=GLPG, simulate_dynamics=LeapFrog)
        >>> samples = sampler.sample(np.array([1, 1]), num_adapt=10000, num_samples = 10000,
        ...                          trajectory_length=2, stepsize=None, return_type='recarray')
        >>> samples_array = np.array([samples[var_name] for var_name in model.variables])
        >>> np.cov(samples_array)
        array([[ 0.98432155,  0.66517394],
               [ 0.66517394,  2.95449533]])

        """

        self.accepted_proposals = 1.0

        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        if num_adapt <= 1:  # Return samples genrated using Simple HMC algorithm
            return HamiltonianMC.sample(
                self, initial_pos, num_samples, trajectory_length, stepsize
            )

        # stepsize is epsilon
        # freely chosen point, after each iteration xt(/position) is shrunk towards it
        mu = np.log(10.0 * stepsize)
        # log(10 * stepsize) large values to save computation
github pgmpy / pgmpy / pgmpy / models / JointGaussian.py View on Github external
def __init__(self, variables, mean, covariance):

        mean = _check_1d_array_object(mean, 'mean')
        _check_length_equal(mean, variables, 'mean', 'variables')

        if not isinstance(covariance, (np.matrix, np.ndarray, list)):
            raise TypeError(
                "covariance must be a 2d array type object")
        covariance = np.array(covariance)
        if covariance.shape[0] != covariance.shape[1]:
            raise ValueError(
                "covariance must be a square in shape")

        if mean.shape[0] != covariance.shape[0]:
            raise ValueError("shape of mean vector should be d X 1 and" +
                             " shape of covariance matrix should be d X d")
        self.variables = variables
        self.mean = mean
        self.covariance = covariance
github pgmpy / pgmpy / pgmpy / sampling / NUTS.py View on Github external
>>> samples = sampler.sample(initial_pos=np.array([12, -4]), num_adapt=10, num_samples=10,
        ...                          stepsize=0.1, return_type='dataframe')
        >>> samples
                   x          y
        0  12.000000  -4.000000
        1  11.864821  -3.696109
        2  10.546986  -4.892169
        3   8.526596 -21.555793
        4   8.526596 -21.555793
        5  11.343194  -6.353789
        6  -1.583269 -12.802931
        7  12.411957 -11.704859
        8  13.253336 -20.169492
        9  11.295901  -7.665058
        """
        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        if num_adapt <= 1:
            return NoUTurnSampler(
                self.model, self.grad_log_pdf, self.simulate_dynamics
            ).sample(initial_pos, num_samples, stepsize)

        mu = np.log(10.0 * stepsize)
        stepsize_bar = 1.0
        h_bar = 0.0
github pgmpy / pgmpy / pgmpy / sampling / base.py View on Github external
def __init__(
        self, model, position, momentum, stepsize, grad_log_pdf, grad_log_position=None
    ):

        position = _check_1d_array_object(position, "position")

        momentum = _check_1d_array_object(momentum, "momentum")

        if not issubclass(grad_log_pdf, BaseGradLogPDF):
            raise TypeError(
                "grad_log_pdf must be an instance"
                + " of pgmpy.inference.continuous.base.BaseGradLogPDF"
            )

        _check_length_equal(position, momentum, "position", "momentum")
        _check_length_equal(position, model.variables, "position", "model.variables")

        if grad_log_position is None:
            grad_log_position, _ = grad_log_pdf(position, model).get_gradient_log_pdf()

        else:
github pgmpy / pgmpy / pgmpy / sampling / base.py View on Github external
momentum = _check_1d_array_object(momentum, "momentum")

        if not issubclass(grad_log_pdf, BaseGradLogPDF):
            raise TypeError(
                "grad_log_pdf must be an instance"
                + " of pgmpy.inference.continuous.base.BaseGradLogPDF"
            )

        _check_length_equal(position, momentum, "position", "momentum")
        _check_length_equal(position, model.variables, "position", "model.variables")

        if grad_log_position is None:
            grad_log_position, _ = grad_log_pdf(position, model).get_gradient_log_pdf()

        else:
            grad_log_positon = _check_1d_array_object(
                grad_log_position, "grad_log_position"
            )
            _check_length_equal(
                grad_log_position, position, "grad_log_position", "position"
            )

        self.position = position
        self.momentum = momentum
        self.stepsize = stepsize
        self.model = model
        self.grad_log_pdf = grad_log_pdf
        self.grad_log_position = grad_log_position

        # new_position is the new proposed position, new_momentum is the new proposed momentum, new_grad_lop
        # is the value of grad log at new_position
        self.new_position = self.new_momentum = self.new_grad_logp = None
github pgmpy / pgmpy / pgmpy / sampling / NUTS.py View on Github external
>>> samples = sampler.sample(initial_pos=np.array([1, 1, 1]), num_samples=10,
        ...                          stepsize=0.4, return_type='dataframe')
        >>> samples
                  x         y         z
        0  1.000000  1.000000  1.000000
        1  1.760756  0.271543 -0.613309
        2  1.883387  0.990745 -0.611720
        3  0.980812  0.340336 -0.916283
        4  0.781338  0.647220 -0.948640
        5  0.040308 -1.391406  0.412201
        6  1.179549 -1.450552  1.105216
        7  1.100320 -1.313926  1.207815
        8  1.484520 -1.349247  0.768599
        9  0.934942 -1.894589  0.471772
        """
        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        types = [(var_name, "float") for var_name in self.model.variables]
        samples = np.zeros(num_samples, dtype=types).view(np.recarray)

        samples[0] = tuple(initial_pos)
        position_m = initial_pos

        for i in tqdm(range(1, num_samples)):
            # Genrating sample
            position_m = self._sample(position_m, stepsize)
github pgmpy / pgmpy / pgmpy / sampling / NUTS.py View on Github external
>>> sampler = NUTS(model=model, grad_log_pdf=GradLogPDFGaussian)
        >>> samples = sampler.generate_sample(initial_pos=np.array([1, 1]), num_samples=10, stepsize=0.4)
        >>> samples = np.array([sample for sample in samples])
        >>> samples
        array([[ 10.26357538,   0.10062725],
               [ 12.70600336,   0.63392499],
               [ 10.95523217,  -0.62079273],
               [ 10.66263031,  -4.08135962],
               [ 10.59255762,  -8.48085076],
               [  9.99860242,  -9.47096032],
               [ 10.5733564 ,  -9.83504745],
               [ 11.51302059,  -9.49919523],
               [ 11.31892143,  -8.5873259 ],
               [ 11.29008667,  -0.43809674]])
        """
        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        position_m = initial_pos

        for _ in range(0, num_samples):

            position_m = self._sample(position_m, stepsize)

            yield position_m
github pgmpy / pgmpy / pgmpy / sampling / HMC.py View on Github external
3   1.608700e+00   1.315349e+00
        4   6.843856e-01   6.237043e-01
        >>> mean = np.array([4, 1, -1])
        >>> covariance = np.array([[1, 0.7 , 0.8], [0.7, 1, 0.2], [0.8, 0.2, 1]])
        >>> model = JGD(['x', 'y', 'z'], mean, covariance)
        >>> sampler = HMC(model=model, grad_log_pdf=GLPG)
        >>> samples = sampler.sample(np.array([1, 1]), num_samples = 10000,
        ...                          trajectory_length=6, stepsize=0.25, return_type='dataframe')
        >>> np.cov(samples.values.T)
        array([[ 1.00795398,  0.71384233,  0.79802097],
               [ 0.71384233,  1.00633524,  0.21313767],
               [ 0.79802097,  0.21313767,  0.98519017]])
        """

        self.accepted_proposals = 1.0
        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        types = [(var_name, "float") for var_name in self.model.variables]
        samples = np.zeros(num_samples, dtype=types).view(np.recarray)

        # Assigning after converting into tuple because value was being changed after assignment
        # Reason for this is unknown
        samples[0] = tuple(initial_pos)
        position_m = initial_pos

        lsteps = int(max(1, round(trajectory_length / stepsize, 0)))
github pgmpy / pgmpy / pgmpy / models / JointGaussian.py View on Github external
grad_log_pdf: A subclass of pgmpy.inference.continuous.BaseGradLogPDF, defaults to None
            A coustom class for finding gradient log and log for given assignment
            If None, the will be computed

        Example
        ---------

        Returns
        --------
        A tuple of following types (in order)

        numpy.array: A 1d numpy.array representing value of gradient log of JointGaussianDistribution

        float: A floating value representin log of JointGaussianDistribution
        """
        variable_assignment = _check_1d_array_object(variable_assignment, 'variable_assignment')
        _check_length_equal(variable_assignment, self.variables, 'variables_assignment', 'variables')

        if grad_log_pdf is not None:
            if not issubclass(grad_log_pdf, BaseGradLogPDF):
                raise TypeError("grad_log_pdf must be an instance" +
                                " of pgmpy.inference.continuous.base.BaseGradLogPDF")
            return grad_log_pdf(variable_assignment, self).get_gradient_log_pdf()

        sub_vec = variable_assignment - self.mean
        grad = - np.dot(self.precision_matrix, sub_vec)
        log_pdf = 0.5 * np.dot(sub_vec, grad)

        return grad, log_pdf
github pgmpy / pgmpy / pgmpy / sampling / HMC.py View on Github external
>>> from pgmpy.sampling import HamiltonianMCDA as HMCda, GradLogPDFGaussian as GLPG, LeapFrog
        >>> from pgmpy.factors.continuous import GaussianDistribution as JGD
        >>> import numpy as np
        >>> mean = np.array([1, 1])
        >>> covariance = np.array([[1, 0.7], [0.7, 3]])
        >>> model = JGD(['x', 'y'], mean, covariance)
        >>> sampler = HMCda(model=model, grad_log_pdf=GLPG, simulate_dynamics=LeapFrog)
        >>> gen_samples = sampler.generate_sample(np.array([1, 1]), num_adapt=10000,
        ...                                       num_samples = 10000, trajectory_length=2, stepsize=None)
        >>> samples_array = np.array([sample for sample in gen_samples])
        >>> np.cov(samples_array.T)
        array([[ 0.98432155,  0.69517394],
               [ 0.69517394,  2.95449533]])
        """
        self.accepted_proposals = 0
        initial_pos = _check_1d_array_object(initial_pos, "initial_pos")
        _check_length_equal(
            initial_pos, self.model.variables, "initial_pos", "model.variables"
        )

        if stepsize is None:
            stepsize = self._find_reasonable_stepsize(initial_pos)

        if num_adapt <= 1:  # return sample generated using Simple HMC algorithm
            for sample in HamiltonianMC.generate_sample(
                self, initial_pos, num_samples, trajectory_length, stepsize
            ):
                yield sample
            return
        mu = np.log(10.0 * stepsize)

        stepsize_bar = 1.0