Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, train_x):
variational_distribution = CholeskyVariationalDistribution(train_x.size(0))
variational_strategy = VariationalStrategy(self, train_x, variational_distribution)
super(GPClassificationModel, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def __init__(self):
variational_distribution = distribution_cls(num_inducing ** 2, batch_shape=batch_shape)
variational_strategy = strategy_cls(self, num_inducing, [(-3, 3), (-3, 3)], variational_distribution)
super().__init__(variational_strategy)
if constant_mean:
self.mean_module = gpytorch.means.ConstantMean()
self.mean_module.initialize(constant=1.0)
else:
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def sq_dist_func(x1, x2):
dist_module = gpytorch.kernels.kernel.Distance()
return dist_module._sq_dist(x1, x2, postprocess=torch.tensor(False))
def __init__(self, inducing_points, learn_locs=True):
variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-1))
variational_strategy = WhitenedVariationalStrategy(
self, inducing_points, variational_distribution, learn_inducing_locations=learn_locs
)
super(SVGPRegressionModel, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(0.001, 1.0, sigma=0.1))
)
def __init__(self, train_x, train_y, likelihood):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def __init__(self, inducing_points):
variational_distribution = distribution_cls(num_inducing, batch_shape=batch_shape)
variational_strategy = strategy_cls(
self, inducing_points, variational_distribution, learn_inducing_locations=True
)
super().__init__(variational_strategy)
if constant_mean:
self.mean_module = gpytorch.means.ConstantMean()
self.mean_module.initialize(constant=1.0)
else:
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def __init__(self, train_x, train_y, likelihood):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
covar_a = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
covar_b = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=0.5))
self.covar_module = covar_a + covar_b
def __init__(self, train_x, train_y, num_functions=2):
# Define all the variational stuff
inducing_points = torch.linspace(0, 1, 64).unsqueeze(-1)
variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points=inducing_points.size(-2), batch_shape=torch.Size([num_functions])
)
variational_strategy = gpytorch.variational.MultitaskVariationalStrategy(
gpytorch.variational.VariationalStrategy(self, inducing_points, variational_distribution),
num_tasks=num_functions,
)
super().__init__(variational_strategy)
# Mean, covar
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
# Values to store
self.name_prefix = "llcmgp"
self.num_data, self.num_tasks = train_y.shape
self.num_functions = num_functions
# Define likelihood stuff
self.register_parameter(
"variational_logits", torch.nn.Parameter(torch.randn(self.num_tasks, self.num_functions))
)
self.register_parameter("raw_noise", torch.nn.Parameter(torch.tensor(0.0)))
def __init__(self, train_x, train_y, likelihood):
super(GPModelWithDerivatives, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMeanGrad()
self.base_kernel = gpytorch.kernels.RBFKernelGrad(ard_num_dims=num_params)
self.covar_module = gpytorch.kernels.ScaleKernel(self.base_kernel)