How to use the gpytorch.mlls function in gpytorch

To help you get started, we’ve selected a few gpytorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cornellius-gp / gpytorch / test / examples / test_kissgp_additive_regression.py View on Github external
def test_kissgp_gp_mean_abs_error(self):
        likelihood = GaussianLikelihood()
        gp_model = GPRegressionModel(train_x, train_y, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        with gpytorch.settings.max_preconditioner_size(10), gpytorch.settings.max_cg_iterations(50):
            with gpytorch.settings.fast_pred_var():
                # Optimize the model
                gp_model.train()
                likelihood.train()

                optimizer = optim.Adam(gp_model.parameters(), lr=0.01)
                optimizer.n_iter = 0
                for _ in range(15):
                    optimizer.zero_grad()
                    output = gp_model(train_x)
                    loss = -mll(output, train_y)
                    loss.backward()
                    optimizer.n_iter += 1
                    optimizer.step()
github cornellius-gp / gpytorch / test / examples / test_batch_whitened_svgp_gp_regression.py View on Github external
def test_regression_error_shared_inducing_locations(self):
        train_x, train_y = train_data()
        likelihood = GaussianLikelihood()
        inducing_points = torch.linspace(0, 1, 25).unsqueeze(-1)
        model = SVGPRegressionModel(inducing_points)
        mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.size(-1))

        # Find optimal model hyperparameters
        model.train()
        likelihood.train()
        optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)
        for _ in range(200):
            optimizer.zero_grad()
            output = model(train_x)
            loss = -mll(output, train_y)
            loss = loss.sum()
            loss.backward()
            optimizer.step()

        for param in model.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
github cornellius-gp / gpytorch / test / examples / test_whitened_svgp_gp_regression.py View on Github external
def test_regression_error(self, cuda=False, skip_logdet_forward=False, cholesky=False):
        train_x, train_y = train_data(cuda=cuda)
        likelihood = GaussianLikelihood()
        inducing_points = torch.linspace(0, 1, 25)
        model = SVGPRegressionModel(inducing_points=inducing_points, learn_locs=False)
        if cuda:
            likelihood.cuda()
            model.cuda()
        mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y))

        # Find optimal model hyperparameters
        model.train()
        likelihood.train()
        optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)

        _wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg)
        with gpytorch.settings.max_cholesky_size(math.inf if cholesky else 0), \
                gpytorch.settings.skip_logdet_forward(skip_logdet_forward), \
                warnings.catch_warnings(record=True) as w, \
                patch("gpytorch.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock:
            for _ in range(200):
                optimizer.zero_grad()
                output = model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
github cornellius-gp / gpytorch / test / examples / test_kissgp_variational_regression.py View on Github external
def test_kissgp_gp_mean_abs_error(self):
        train_x, train_y, test_x, test_y = make_data()
        train_dataset = TensorDataset(train_x, train_y)
        train_loader = DataLoader(train_dataset, shuffle=True, batch_size=64)

        model = GPRegressionModel()
        likelihood = GaussianLikelihood()
        mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=len(train_y))
        # We use SGD here, rather than Adam
        # Emperically, we find that SGD is better for variational regression
        optimizer = torch.optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)

        # Our loss object
        # We're using the VariationalMarginalLogLikelihood object
        mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=train_y.size(0))

        # The training loop
        def train(n_epochs=15):
            # We use a Learning rate scheduler from PyTorch to lower the learning rate during optimization
            # We're going to drop the learning rate by 1/10 after 3/4 of training
            # This helps the model converge to a minimum
            scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.75 * n_epochs], gamma=0.1)

            for _ in range(n_epochs):
                scheduler.step()
                for x_batch, y_batch in train_loader:
                    x_batch = x_batch.float()
                    y_batch = y_batch.float()
                    optimizer.zero_grad()
                    output = model(x_batch)
                    loss = -mll(output, y_batch)
github cornellius-gp / gpytorch / test / examples / test_kissgp_additive_classification.py View on Github external
def test_kissgp_classification_error(self):
        with gpytorch.settings.use_toeplitz(False), gpytorch.settings.max_preconditioner_size(5):
            model = GPClassificationModel()
            likelihood = BernoulliLikelihood()
            mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y))

            # Find optimal model hyperparameters
            model.train()
            likelihood.train()

            optimizer = optim.Adam(model.parameters(), lr=0.15)
            optimizer.n_iter = 0
            for _ in range(25):
                optimizer.zero_grad()
                # Get predictive output
                output = model(train_x)
                # Calc loss and backprop gradients
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()
github getkeops / keops / pykeops / tutorials / backends / plot_gpytorch.py View on Github external
# GP training
# -----------------
# The code below is now a direct copy-paste from the
# `GPytorch 101 tutorial `_:

# Find optimal model hyperparameters
model.train()
likelihood.train()

# Use the adam optimizer
optimizer = torch.optim.Adam([
    {'params': model.parameters()},  # Includes GaussianLikelihood parameters
], lr=0.1)

# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

training_iter = 50
for i in range(training_iter):
    # Zero gradients from previous iteration
    optimizer.zero_grad()
    # Output from model
    output = model(train_x)
    # Calc loss and backprop gradients
    loss = -mll(output, train_y)
    loss.backward()
    if i % 10 == 0 or i == training_iter - 1:
        print('Iter %d/%d - Loss: %.3f   lengthscale: %.3f   noise: %.3f' % (
            i + 1, training_iter, loss.item(),
            model.covar_module.base_kernel.lengthscale.item(),
            model.likelihood.noise.item()
        ))