Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
joint_hypothesis.probability
for joint_hypothesis in joint_hypotheses
if not joint_hypothesis.hypotheses[track].measurement)
single_measurement_hypotheses.append(
SingleProbabilityHypothesis(
hypotheses[track][0].prediction,
MissedDetection(timestamp=time),
measurement_prediction=hypotheses[track][0]
.measurement_prediction,
probability=prob_misdetect))
# record hypothesis for any given Detection being associated with
# this track
for detection in detections:
pro_detect_assoc = Probability.sum(
joint_hypothesis.probability
for joint_hypothesis in joint_hypotheses
if joint_hypothesis.
hypotheses[track].measurement is detection)
single_measurement_hypotheses.append(
SingleProbabilityHypothesis(
hypotheses[track][0].prediction,
detection,
measurement_prediction=hypotheses[track][0].
measurement_prediction,
probability=pro_detect_assoc))
result = MultipleHypothesis(single_measurement_hypotheses, True, 1)
new_hypotheses[track] = result
weights : np.array of shape (num_components,)
The weights of the GM components
Returns
-------
np.array of shape (num_dims, 1)
The mean of the reduced/single Gaussian
np.array of shape (num_dims, num_dims)
The covariance of the reduced/single Gaussian
"""
# Compute dimensionality variables
num_components, num_dims = np.shape(means)
# Normalise weights such that they sum to 1
weights = weights/Probability.sum(weights)
# Calculate mean
mean = np.average(means, axis=0, weights=weights).astype(np.float_)
mean.shape = (1, num_dims)
# Calculate covar
covar = np.zeros((num_dims, num_dims))
for i in range(num_components):
v = means[i, :] - mean
a = np.add(covars[i], v.T@v)
b = weights[i]
covar = np.add(covar, b*a)
return mean.transpose(), covar
def normalise_probabilities(self, total_weight):
# verify that SingleHypotheses composing this MultipleHypothesis
# all have Probabilities
if any(not hasattr(hypothesis, 'probability')
for hypothesis in self.single_hypotheses):
raise ValueError("MultipleHypothesis not composed of Probability"
" hypotheses!")
sum_weights = Probability.sum(
hypothesis.probability for hypothesis in self.single_hypotheses)
for hypothesis in self.single_hypotheses:
hypothesis.probability =\
(hypothesis.probability * total_weight)/sum_weights
for joint_hypothesis in itertools.product(*possible_assoc)
if cls.isvalid(joint_hypothesis))
# turn the valid JPDA joint hypotheses into 'JointHypothesis'
for joint_hypothesis in enum_JPDA_hypotheses:
local_hypotheses = {}
for track, hypothesis in zip(tracks, joint_hypothesis):
local_hypotheses[track] = \
multihypths[track][hypothesis.measurement]
joint_hypotheses.append(
ProbabilityJointHypothesis(local_hypotheses))
# normalize ProbabilityJointHypotheses relative to each other
sum_probabilities = Probability.sum(hypothesis.probability
for hypothesis in joint_hypotheses)
for hypothesis in joint_hypotheses:
hypothesis.probability /= sum_probabilities
return joint_hypotheses
for track in tracks}
# enumerate the Joint Hypotheses of track/detection associations
joint_hypotheses = \
self.enumerate_JPDA_hypotheses(tracks, hypotheses, self.gate_ratio)
# Calculate MultiMeasurementHypothesis for each Track over all
# available Detections with probabilities drawn from JointHypotheses
new_hypotheses = dict()
for track in tracks:
single_measurement_hypotheses = list()
# record the MissedDetection hypothesis for this track
prob_misdetect = Probability.sum(
joint_hypothesis.probability
for joint_hypothesis in joint_hypotheses
if not joint_hypothesis.hypotheses[track].measurement)
single_measurement_hypotheses.append(
SingleProbabilityHypothesis(
hypotheses[track][0].prediction,
MissedDetection(timestamp=time),
measurement_prediction=hypotheses[track][0]
.measurement_prediction,
probability=prob_misdetect))
# record hypothesis for any given Detection being associated with
# this track
for detection in detections:
pro_detect_assoc = Probability.sum(
self.weighted_measurements = list()
# verify that 'measurements' and 'weights' are the same size and the
# correct data types
if any(not (isinstance(measurement, Detection))
for measurement in measurements):
raise Exception('measurements must all be of type Detection!')
if any(not isinstance(weight, Probability) for weight in weights):
raise Exception('weights must all be of type Probability!')
if len(measurements) != len(weights):
raise Exception('There must be the same number of weights '
'and measurements!')
# normalize the weights to sum up to 1 if indicated
if normalize is True:
sum_weights = Probability.sum(weights)
for index in range(0, len(weights)):
weights[index] /= sum_weights
# store probabilities and measurements in 'weighted_measurements'
for index in range(0, len(measurements)):
self.weighted_measurements.append(
{"measurement": measurements[index],
"weight": weights[index]})
-------
: :class:`~.ParticleState`
The state posterior
"""
if hypothesis.measurement.measurement_model is None:
measurement_model = self.measurement_model
else:
measurement_model = hypothesis.measurement.measurement_model
for particle in hypothesis.prediction.particles:
particle.weight *= measurement_model.pdf(
hypothesis.measurement.state_vector, particle.state_vector,
**kwargs)
# Normalise the weights
sum_w = Probability.sum(
i.weight for i in hypothesis.prediction.particles)
for particle in hypothesis.prediction.particles:
particle.weight /= sum_w
# Resample
new_particles = self.resampler.resample(
hypothesis.prediction.particles)
return ParticleStateUpdate(new_particles,
hypothesis,
timestamp=hypothesis.measurement.timestamp)