Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
values = np.array(arr)
values = values.reshape(states, values.size // states)
tabular_cpds.append(TabularCPD(child_var, states, values))
model.add_cpds(*tabular_cpds)
return model
elif self.network_type == "MARKOV":
model = MarkovModel(self.edges)
factors = []
for table in self.tables:
variables = table[0]
cardinality = [int(self.domain[var]) for var in variables]
value = list(map(float, table[1]))
factor = DiscreteFactor(
variables=variables, cardinality=cardinality, values=value
)
factors.append(factor)
model.add_factors(*factors)
return model
# If false, then it is not used to create any clique potential
is_used = {factor: False for factor in self.factors}
for node in clique_trees.nodes():
clique_factors = []
for factor in self.factors:
# If the factor is not used in creating any clique potential as
# well as has any variable of the given clique in its scope,
# then use it in creating clique potential
if not is_used[factor] and set(factor.scope()).issubset(node):
clique_factors.append(factor)
is_used[factor] = True
# To compute clique potential, initially set it as unity factor
var_card = [self.get_cardinality()[x] for x in node]
clique_potential = DiscreteFactor(
node, var_card, np.ones(np.product(var_card))
)
# multiply it with the factors associated with the variables present
# in the clique (or node)
# Checking if there's clique_factors, to handle the case when clique_factors
# is empty, otherwise factor_product with throw an error [ref #889]
if clique_factors:
clique_potential *= factor_product(*clique_factors)
clique_trees.add_factors(clique_potential)
if not all(is_used.values()):
raise ValueError(
"All the factors were not used to create Junction Tree."
"Extra factors are defined."
)
factor_values = {}
for time_slice in range(1, time_range + 1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
if interface_nodes_dict:
evidence_time.update(interface_nodes_dict)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(
variable_time, evidence=evidence_time, joint=False
)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor(
[new_key], new_values[key].cardinality, new_values[key].values
)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
out_clique_phi = self._marginalize_factor(
self.interface_nodes_1, clique_phi
)
new_factor = self._shift_factor(out_clique_phi, 0)
potential_dict[time_slice] = new_factor
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, new_factor)
if evidence_time:
interface_nodes_dict = {
#!/usr/bin/env python3
"""Contains the different formats of CPDs used in PGM"""
from __future__ import division
from itertools import product
from warnings import warn
import numbers
import numpy as np
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.extern import tabulate
class TabularCPD(DiscreteFactor):
"""
Defines the conditional probability distribution table (cpd table)
Parameters
----------
variable: int, string (any hashable python object)
The variable whose CPD is defined.
variable_card: integer
cardinality of variable
values: 2d array, 2d list or 2d tuple
values of the cpd table
evidence: array-like
evidences(if any) w.r.t. which cpd is defined
import itertools
from operator import mul
from functools import reduce
import numpy as np
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.independencies import Independencies
class JointProbabilityDistribution(DiscreteFactor):
"""
Base class for Joint Probability Distribution
"""
def __init__(self, variables, cardinality, values):
"""
Initialize a Joint Probability Distribution class.
Defined above, we have the following mapping from variable
assignments to the index of the row vector in the value field:
+-----+-----+-----+-------------------------+
| x1 | x2 | x3 | P(x1, x2, x2) |
+-----+-----+-----+-------------------------+
| x1_0| x2_0| x3_0| P(x1_0, x2_0, x3_0) |
+-----+-----+-----+-------------------------+
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
... 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> JPD.is_imap(bm)
True
"""
from pgmpy.models import BayesianModel
if not isinstance(model, BayesianModel):
raise TypeError("model must be an instance of BayesianModel")
factors = [cpd.to_factor() for cpd in model.get_cpds()]
factor_prod = reduce(mul, factors)
JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values)
if JPD_fact == factor_prod:
return True
else:
return False
errors. In the same time it also updates the cardinalities of all the
random variables.
* Check whether bipartite property of factor graph is still maintained
or not.
* Check whether factors are associated for all the random variables or not.
* Check if factors are defined for each factor node or not.
* Check if cardinality information for all the variables is availble or not.
* Check if cardinality of random variable remains same across all the
factors.
"""
variable_nodes = set([x for factor in self.factors for x in factor.scope()])
factor_nodes = set(self.nodes()) - variable_nodes
if not all(
isinstance(factor_node, DiscreteFactor) for factor_node in factor_nodes
):
raise ValueError("Factors not associated for all the random variables")
if not (bipartite.is_bipartite(self)) or not (
bipartite.is_bipartite_node_set(self, variable_nodes)
or bipartite.is_bipartite_node_set(self, variable_nodes)
):
raise ValueError("Edges can only be between variables and factors")
if len(factor_nodes) != len(self.factors):
raise ValueError("Factors not associated with all the factor nodes.")
cardinalities = self.get_cardinality()
if len(variable_nodes) != len(cardinalities):
raise ValueError("Factors for all the variables not defined")
These clusters are then appended to the code.
Parameters
----------
triangle_list : list
The list of variables forming the triangles to be updated. It is of the form of
[['var_5', 'var_8', 'var_7'], ['var_4', 'var_5', 'var_7']]
"""
new_intersection_set = []
for triangle_vars in triangles_list:
cardinalities = [self.cardinality[variable] for variable in triangle_vars]
current_intersection_set = [
frozenset(intersect) for intersect in it.combinations(triangle_vars, 2)
]
current_factor = DiscreteFactor(
triangle_vars, cardinalities, np.zeros(np.prod(cardinalities))
)
self.cluster_set[frozenset(triangle_vars)] = self.Cluster(
current_intersection_set, current_factor
)
# add new factors
self.model.factors.append(current_factor)
# add new intersection sets
new_intersection_set.extend(current_intersection_set)
# add new factors in objective
self.objective[frozenset(triangle_vars)] = current_factor