Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_estimate_default_gradient_descent_so3(self):
points = self.so3.random_uniform(2)
mean_vec = FrechetMean(
metric=self.so3.bi_invariant_metric, method='default')
mean_vec.fit(points)
logs = self.so3.bi_invariant_metric.log(points, mean_vec.estimate_)
result = gs.sum(logs, axis=0)
expected = gs.zeros_like(points[0])
self.assertAllClose(result, expected)
# check mean value for concentrated distribution
kappa = 1000000
points = sphere.random_von_mises_fisher(kappa, n_points)
sum_points = gs.sum(points, axis=0)
mean = gs.array([0., 0., 1.])
mean_estimate = sum_points / gs.linalg.norm(sum_points)
expected = mean
result = mean_estimate
self.assertTrue(
gs.allclose(result, expected, atol=MEAN_ESTIMATION_TOL)
)
# check concentration parameter for dispersed distribution
kappa = 1
points = sphere.random_von_mises_fisher(kappa, n_points)
sum_points = gs.sum(points, axis=0)
mean_norm = gs.linalg.norm(sum_points) / n_points
kappa_estimate = (mean_norm * (dim + 1. - mean_norm**2)
/ (1. - mean_norm**2))
p = dim + 1
n_steps = 100
for i in range(n_steps):
bessel_func_1 = scipy.special.iv(p/2., kappa_estimate)
bessel_func_2 = scipy.special.iv(p/2.-1., kappa_estimate)
ratio = bessel_func_1 / bessel_func_2
denominator = 1. - ratio**2 - (p-1.)*ratio/kappa_estimate
kappa_estimate = kappa_estimate - (ratio-mean_norm)/denominator
expected = kappa
result = kappa_estimate
self.assertTrue(
gs.allclose(result, expected, atol=KAPPA_ESTIMATION_TOL)
)
def objective(velocity):
"""Define the objective function."""
velocity = velocity.reshape(base_point.shape)
delta = self.exp(velocity, base_point, n_steps, step) - point
loss = 1. / self.dim * gs.sum(delta ** 2, axis=-1)
return 1. / n_samples * gs.sum(loss)
base_point, point_type='vector')
def objective(velocity):
"""Define the objective function."""
velocity = velocity.reshape(base_point.shape)
delta = self.exp(velocity, base_point, n_steps, step) - point
loss = 1. / self.dim * gs.sum(delta ** 2, axis=-1)
return 1. / n_samples * gs.sum(loss)
objective_grad = autograd.elementwise_grad(objective)
def objective_with_grad(velocity):
"""Create helpful objective func wrapper for autograd comp."""
return objective(velocity), objective_grad(velocity)
tangent_vec = gs.random.rand(gs.sum(gs.shape(base_point)))
res = minimize(
objective_with_grad, tangent_vec, method='L-BFGS-B', jac=True,
options={'disp': False, 'maxiter': 25})
tangent_vec = res.x
tangent_vec = gs.reshape(tangent_vec, base_point.shape)
return tangent_vec
Point in hyperbolic space.
Returns
-------
mobius_add : array-like, shape=[...,]
Result of the Mobius addition.
"""
ball_manifold = PoincareBall(self.dim, scale=self.scale)
point_a_belong = ball_manifold.belongs(point_a)
point_b_belong = ball_manifold.belongs(point_b)
if (not gs.all(point_a_belong) or not gs.all(point_b_belong)):
raise ValueError("Points do not belong to the Poincare ball")
norm_point_a = gs.sum(point_a ** 2, axis=-1, keepdims=True)
norm_point_b = gs.sum(point_b ** 2, axis=-1, keepdims=True)
sum_prod_a_b = gs.einsum('...i,...i->...', point_a, point_b)
sum_prod_a_b = gs.expand_dims(sum_prod_a_b, axis=-1)
add_num_1 = 1 + 2 * sum_prod_a_b + norm_point_b
add_num_1 = gs.einsum('...i,...k->...k', add_num_1, point_a)
add_num_2 = gs.einsum('...i,...k->...k', (1 - norm_point_a), point_b)
add_nominator = add_num_1 + add_num_2
add_denominator = (1 + 2 * sum_prod_a_b + norm_point_a * norm_point_b)
mobius_add = gs.einsum(
'...i,...k->...i', add_nominator, 1 / add_denominator)
return mobius_add
def __init__(self, group,
inner_product_mat_at_identity=None,
left_or_right='left', **kwargs):
super(InvariantMetric, self).__init__(dim=group.dim, **kwargs)
self.group = group
if inner_product_mat_at_identity is None:
inner_product_mat_at_identity = gs.eye(self.group.dim)
geomstats.errors.check_parameter_accepted_values(
left_or_right, 'left_or_right', ['left', 'right'])
eigenvalues = gs.linalg.eigvalsh(inner_product_mat_at_identity)
mask_pos_eigval = gs.greater(eigenvalues, 0.)
n_pos_eigval = gs.sum(gs.cast(mask_pos_eigval, gs.int32))
mask_neg_eigval = gs.less(eigenvalues, 0.)
n_neg_eigval = gs.sum(gs.cast(mask_neg_eigval, gs.int32))
mask_null_eigval = gs.isclose(eigenvalues, 0.)
n_null_eigval = gs.sum(gs.cast(mask_null_eigval, gs.int32))
self.inner_product_mat_at_identity = inner_product_mat_at_identity
self.left_or_right = left_or_right
self.signature = (n_pos_eigval, n_null_eigval, n_neg_eigval)
sq_norm_b = self.embedding_metric.squared_norm(point_b)
inner_prod = self.embedding_metric.inner_product(point_a, point_b)
cosh_angle = - inner_prod / gs.sqrt(sq_norm_a * sq_norm_b)
cosh_angle = gs.clip(cosh_angle, 1.0, 1e24)
dist = gs.arccosh(cosh_angle)
return self.scale * dist
elif self.point_type == 'ball':
point_a_norm = gs.clip(gs.sum(point_a ** 2, -1), 0., 1 - EPSILON)
point_b_norm = gs.clip(gs.sum(point_b ** 2, -1), 0., 1 - EPSILON)
diff_norm = gs.sum((point_a - point_b) ** 2, -1)
norm_function = 1 + 2 * \
diff_norm / ((1 - point_a_norm) * (1 - point_b_norm))
dist = gs.log(norm_function + gs.sqrt(norm_function ** 2 - 1))
return self.scale * dist
else:
raise NotImplementedError(
'dist is only implemented for ball and extrinsic')
def dist(self, point_a, point_b):
"""Compute the geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[..., dim]
First point in hyperbolic space.
point_b : array-like, shape=[..., dim]
Second point in hyperbolic space.
Returns
-------
dist : array-like, shape=[...,]
Geodesic distance between the two points.
"""
point_a_norm = gs.clip(gs.sum(point_a ** 2, -1), 0., 1 - EPSILON)
point_b_norm = gs.clip(gs.sum(point_b ** 2, -1), 0., 1 - EPSILON)
diff_norm = gs.sum((point_a - point_b) ** 2, -1)
norm_function = 1 + 2 * \
diff_norm / ((1 - point_a_norm) * (1 - point_b_norm))
dist = gs.log(norm_function + gs.sqrt(norm_function ** 2 - 1))
dist *= self.scale
return dist
grad_term_1 = 1 / variances
grad_term_21 = 1 / gs.sum(term_2, axis=-1, keepdims=True)
grad_term_211 = \
gs.exp((prod_alpha_sigma) ** 2) \
* (1 + gs.erf(prod_alpha_sigma)) \
* gs.einsum('ij,j->ij', sigma_repeated, alpha ** 2) * 2
grad_term_212 = gs.repeat(gs.expand_dims((2 / gs.sqrt(gs.pi))
* alpha, axis=0),
variances.shape[0], axis=0)
grad_term_22 = grad_term_211 + grad_term_212
grad_term_22 = gs.einsum('ij, j->ij', grad_term_22, beta)
grad_term_22 = gs.sum(grad_term_22, axis=-1, keepdims=True)
norm_factor_gradient = grad_term_1 + (grad_term_21 * grad_term_22)
return gs.squeeze(norm_factor), gs.squeeze(norm_factor_gradient)