Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
[-0.0000, 0.0000, 0.0039, -0.0352, -0.0352, 0.0039, 0.0000, -0.0000, -0.0000, 0.0000],
[0.0000, 0.0000, -0.0000, -0.0542, -0.0000, -0.0000, -0.0000, -0.0142, -0.0000, -0.0000],
]
),
torch.tensor(
[
[-0.0002, 0.0022, 0.0022, -0.0002],
[0.0000, 0.0044, 0.0000, 0.0000],
[0.0000, -0.0000, -0.0000, 0.0000],
[0.0000, 0.0015, 0.0000, 0.0000],
]
),
],
1,
)
self.assertTrue(approx_equal(values, actual_values))
def test_solve_qr(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = torch.DoubleTensor(size).uniform_(math.log(1e-3), math.log(1e-1)).exp_().to(dtype=dtype)
lazy_tsr = RBFKernel().to(dtype=dtype)(X).evaluate_kernel().add_diag(noise)
precondition_qr, _, logdet_qr = lazy_tsr._preconditioner()
F = lazy_tsr._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_lkj_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = LKJPrior(2, torch.tensor(0.5, device=device))
S = torch.eye(2, device=device)
self.assertAlmostEqual(prior.log_prob(S).item(), -1.86942, places=4)
S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-1.86942, -1.72558], device=S.device)))
with self.assertRaises(ValueError):
prior.log_prob(torch.eye(3, device=device))
# For eta=1.0 log_prob is flat over all covariance matrices
prior = LKJPrior(2, torch.tensor(1.0, device=device))
self.assertTrue(torch.all(prior.log_prob(S) == prior.C))
def test_smoothed_box_prior_log_prob_log_transform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
a, b = torch.zeros(2, device=device), torch.ones(2, device=device)
sigma = 0.1
prior = SmoothedBoxPrior(a, b, sigma, transform=torch.exp)
t = torch.tensor([0.5, 1.1], device=device).log()
self.assertAlmostEqual(prior.log_prob(t).item(), -0.9473, places=4)
t = torch.tensor([[0.5, 1.1], [0.1, 0.25]], device=device).log()
log_prob_expected = torch.tensor([-0.947347, -0.447347], device=t.device)
self.assertTrue(torch.all(approx_equal(prior.log_prob(t), log_prob_expected)))
with self.assertRaises(RuntimeError):
prior.log_prob(torch.ones(3, device=device))
def test_lkj_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = LKJPrior(2, torch.tensor([0.5, 1.5], device=device))
S = torch.eye(2, device=device)
self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-1.86942, -0.483129], device=S.device)))
S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-1.86942, -0.62697], device=S.device)))
with self.assertRaises(ValueError):
prior.log_prob(torch.eye(3, device=device))
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
torch.linspace(0, 0.5, size).unsqueeze(0),
torch.linspace(0, 0.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
],
0,
).unsqueeze(-1)
covar_matrix = RBFKernel()(train_x, train_x).evaluate().view(2, 2, 3, size, size)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
def test_lkj_covariance_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
v = torch.ones(2, 1, device=device)
sd_prior = SmoothedBoxPrior(exp(-1) * v, exp(1) * v)
prior = LKJCovariancePrior(2, torch.tensor([0.5, 1.5], device=device), sd_prior)
S = torch.eye(2, device=device)
self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-3.59981, -2.21351], device=S.device)))
S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-3.59981, -2.35735], device=S.device)))
with self.assertRaises(ValueError):
prior.log_prob(torch.eye(3, device=device))
def test_lkj_cholesky_factor_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = LKJCholeskyFactorPrior(2, torch.tensor([0.5, 1.5], device=device))
S = torch.eye(2, device=device)
S_chol = torch.cholesky(S)
self.assertTrue(approx_equal(prior.log_prob(S_chol), torch.tensor([-1.86942, -0.483129], device=S_chol.device)))
S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
S_chol = torch.stack([torch.cholesky(Si) for Si in S])
self.assertTrue(approx_equal(prior.log_prob(S_chol), torch.tensor([-1.86942, -0.62697], device=S_chol.device)))
with self.assertRaises(ValueError):
prior.log_prob(torch.eye(3, device=device))
def test_matmul_vec(self):
# Forward
res = NonLazyTensor(self.mat).matmul(self.vec)
actual = self.mat_copy.matmul(self.vec_copy)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.randn(3)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mat_copy.grad, self.mat.grad))
self.assertTrue(approx_equal(self.vec_copy.grad, self.vec.grad))
def test_lkj_cholesky_factor_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = LKJCholeskyFactorPrior(2, torch.tensor(0.5, device=device))
S = torch.eye(2, device=device)
S_chol = torch.cholesky(S)
self.assertAlmostEqual(prior.log_prob(S_chol).item(), -1.86942, places=4)
S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S_chol.device)])
S_chol = torch.stack([torch.cholesky(Si) for Si in S])
self.assertTrue(approx_equal(prior.log_prob(S_chol), torch.tensor([-1.86942, -1.72558], device=S_chol.device)))
with self.assertRaises(ValueError):
prior.log_prob(torch.eye(3, device=device))
# For eta=1.0 log_prob is flat over all covariance matrices
prior = LKJCholeskyFactorPrior(2, torch.tensor(1.0, device=device))
self.assertTrue(torch.all(prior.log_prob(S_chol) == prior.C))