Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, inducing_points, learn_locs=True):
variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-1))
variational_strategy = WhitenedVariationalStrategy(
self, inducing_points, variational_distribution, learn_inducing_locations=learn_locs
)
super(SVGPRegressionModel, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(0.001, 1.0, sigma=0.1))
)
def test_computes_radial_basis_function_gradient(self):
softplus = torch.nn.functional.softplus
a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)
b = torch.tensor([0, 2, 2], dtype=torch.float).view(3, 1)
lengthscale = 2
kernel = RBFKernel().initialize(lengthscale=lengthscale)
kernel.eval()
param = math.log(math.exp(lengthscale) - 1) * torch.ones(3, 3)
param.requires_grad_()
diffs = a.expand(3, 3) - b.expand(3, 3).transpose(0, 1)
actual_output = (-0.5 * (diffs / softplus(param)) ** 2).exp()
actual_output.backward(gradient=torch.eye(3))
actual_param_grad = param.grad.sum()
output = kernel(a, b).evaluate()
output.backward(gradient=torch.eye(3))
res = kernel.raw_lengthscale.grad
self.assertLess(torch.norm(res - actual_param_grad), 1e-5)
def test_diag(self):
AddK = NewtonGirardAdditiveKernel(RBFKernel(ard_num_dims=3), 3, 2)
self.assertEqual(AddK.base_kernel.lengthscale.numel(), 3)
self.assertEqual(AddK.outputscale.numel(), 2)
testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
add_k_val = AddK(testvals, testvals).diag()
manual_k1 = ScaleKernel(
AdditiveKernel(RBFKernel(active_dims=0), RBFKernel(active_dims=1), RBFKernel(active_dims=2))
)
manual_k1.initialize(outputscale=1 / 2)
manual_k2 = ScaleKernel(
AdditiveKernel(RBFKernel(active_dims=[0, 1]), RBFKernel(active_dims=[1, 2]), RBFKernel(active_dims=[0, 2]))
)
manual_k2.initialize(outputscale=1 / 2)
manual_k = AdditiveKernel(manual_k1, manual_k2)
manual_add_k_val = manual_k(testvals, testvals).diag()
# np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
def test_solve_qr_constant_noise(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = 1e-2 * torch.ones(size, dtype=dtype)
lazy_tsr = RBFKernel().to(dtype=dtype)(X).evaluate_kernel().add_diag(noise)
precondition_qr, _, logdet_qr = lazy_tsr._preconditioner()
F = lazy_tsr._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_computes_sum_radial_basis_function_gradient(self):
softplus = torch.nn.functional.softplus
a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)
b = torch.tensor([0, 2, 2], dtype=torch.float).view(3, 1)
lengthscale = 2
param = math.log(math.exp(lengthscale) - 1) * torch.ones(3, 3)
param.requires_grad_()
diffs = a.expand(3, 3) - b.expand(3, 3).transpose(0, 1)
actual_output = (-0.5 * (diffs / softplus(param)) ** 2).exp()
actual_output.backward(torch.eye(3))
actual_param_grad = param.grad.sum() * 2
kernel_1 = RBFKernel().initialize(lengthscale=lengthscale)
kernel_2 = RBFKernel().initialize(lengthscale=lengthscale)
kernel = kernel_1 + kernel_2
kernel.eval()
output = kernel(a, b).evaluate()
output.backward(gradient=torch.eye(3))
res = kernel.kernels[0].raw_lengthscale.grad + kernel.kernels[1].raw_lengthscale.grad
self.assertLess(torch.norm(res - actual_param_grad), 2e-5)
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
self.covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1)))
def test_degree1(self):
AddK = NewtonGirardAdditiveKernel(RBFKernel(ard_num_dims=3), 3, 1)
self.assertEqual(AddK.base_kernel.lengthscale.numel(), 3)
self.assertEqual(AddK.outputscale.numel(), 1)
testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
add_k_val = AddK(testvals, testvals).evaluate()
manual_k = ScaleKernel(
AdditiveKernel(RBFKernel(active_dims=0), RBFKernel(active_dims=1), RBFKernel(active_dims=2))
)
manual_k.initialize(outputscale=1.0)
manual_add_k_val = manual_k(testvals, testvals).evaluate()
# np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
self.base_covar_module = ScaleKernel(RBFKernel())
self.covar_module = ProductStructureKernel(
GridInterpolationKernel(self.base_covar_module, grid_size=100, num_dims=1), num_dims=2
)
def __init__(self, train_x, train_y, likelihood, kernel='linear'):
super(ExactGPLayer, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
## RBF kernel
if(kernel=='rbf' or kernel=='RBF'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
## Spectral kernel
elif(kernel=='spectral'):
self.covar_module = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=4, ard_num_dims=2916)
else:
raise ValueError("[ERROR] the kernel '" + str(kernel) + "' is not supported for regression, use 'rbf' or 'spectral'.")