How to use the gpytorch.settings.debug function in gpytorch

To help you get started, we’ve selected a few gpytorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cornellius-gp / gpytorch / test / examples / test_white_noise_regression.py View on Github external
gp_model = ExactGPModel(train_x, train_y, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
        gp_model.rbf_covar_module.initialize(lengthscale=exp(1))
        gp_model.mean_module.initialize(constant=0)

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
        optimizer.n_iter = 0
        with gpytorch.settings.debug(False):
            for _ in range(75):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            optimizer.step()
github cornellius-gp / gpytorch / test / examples / test_grid_gp_regression.py View on Github external
train_x, train_y, test_x, test_y = make_data(grid, cuda=cuda)
        likelihood = gpytorch.likelihoods.GaussianLikelihood()
        gp_model = GridGPRegressionModel(grid, train_x, train_y, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Optimize the model
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
        optimizer.n_iter = 0
        with gpytorch.settings.debug(True):
            for _ in range(20):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for name, param in gp_model.named_parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Test the model
github cornellius-gp / gpytorch / test / examples / test_white_noise_regression.py View on Github external
def test_posterior_latent_gp_and_likelihood_without_optimization(self, cuda=False):
        train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
        with gpytorch.settings.debug(False):
            # We're manually going to set the hyperparameters to be ridiculous
            likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 1e-8)
            gp_model = ExactGPModel(train_x, train_y, likelihood)
            # Update lengthscale prior to accommodate extreme parameters
            gp_model.rbf_covar_module.initialize(lengthscale=exp(-6))
            gp_model.mean_module.initialize(constant=0)

            if cuda:
                gp_model.cuda()
                likelihood.cuda()

            # Compute posterior distribution
            gp_model.eval()
            likelihood.eval()

            # Let's see how our model does, conditioned with weird hyperparams
github cornellius-gp / gpytorch / test / examples / test_simple_gp_regression.py View on Github external
likelihood = GaussianLikelihood()
        gp_model = ExactGPModel(train_x, train_y, likelihood)
        gp_model.covar_module.base_kernel.initialize(lengthscale=exp(-15))
        likelihood.initialize(noise=exp(-15))

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Compute posterior distribution
        gp_model.eval()
        likelihood.eval()

        # Let's see how our model does, conditioned with weird hyperparams
        # The posterior should fit all the data
        with gpytorch.settings.debug(False):
            function_predictions = likelihood(gp_model(train_x))

        self.assertAllClose(function_predictions.mean, train_y)
        self.assertAllClose(function_predictions.variance, torch.zeros_like(function_predictions.variance))

        # It shouldn't fit much else though
        test_function_predictions = gp_model(torch.tensor([1.1]).type_as(test_x))

        self.assertAllClose(test_function_predictions.mean, torch.zeros_like(test_function_predictions.mean))
        self.assertAllClose(
            test_function_predictions.variance,
            gp_model.covar_module.outputscale.expand_as(test_function_predictions.variance)
        )
github cornellius-gp / gpytorch / test / examples / test_fixed_noise_fanatasy_updates.py View on Github external
gp_model = ExactGPModel(train_x, train_y, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
        gp_model.covar_module.base_kernel.initialize(lengthscale=exp(1))
        gp_model.mean_module.initialize(constant=0)

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()
        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.15)
        for _ in range(50):
            optimizer.zero_grad()
            with gpytorch.settings.debug(False):
                output = gp_model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.step()

        for param in gp_model.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
        optimizer.step()

        train_x.requires_grad = True
        gp_model.set_train_data(train_x, train_y)
        with gpytorch.settings.fast_pred_var(), gpytorch.settings.detach_test_caches(False):
            # Test the model
            gp_model.eval()
            likelihood.eval()
github cornellius-gp / gpytorch / gpytorch / lazy / lazy_tensor.py View on Github external
def __getitem__(self, index):
        """
        Supports subindexing of the matrix this LazyTensor represents. This may return either another
        :obj:`gpytorch.lazy.LazyTensor` or a :obj:`torch.tensor` depending on the exact implementation.
        """
        ndimension = self.ndimension()

        # Process the index
        index = index if isinstance(index, tuple) else (index,)
        index = tuple(torch.tensor(idx) if isinstance(idx, list) else idx for idx in index)
        index = tuple(idx.item() if torch.is_tensor(idx) and not len(idx.shape) else idx for idx in index)

        # Handle the ellipsis
        # Find the index of the ellipsis
        ellipsis_locs = tuple(index for index, item in enumerate(index) if item is Ellipsis)
        if settings.debug.on():
            if len(ellipsis_locs) > 1:
                raise RuntimeError(
                    "Cannot have multiple ellipsis in a __getitem__ call. LazyTensor {} "
                    " received index {}.".format(self, index)
                )
        if len(ellipsis_locs) == 1:
            ellipsis_loc = ellipsis_locs[0]
            num_to_fill_in = ndimension - (len(index) - 1)
            index = index[:ellipsis_loc] + tuple(_noop_index for _ in range(num_to_fill_in)) + index[ellipsis_loc + 1 :]

        # Pad the index with empty indices
        index = index + tuple(_noop_index for _ in range(ndimension - len(index)))

        # Make the index a tuple again
        *batch_indices, row_index, col_index = index
github pytorch / botorch / botorch / utils / transforms.py View on Github external
def gpt_posterior_settings():
    r"""Context manager for settings used for computing model posteriors."""
    with ExitStack() as es:
        es.enter_context(gpt_settings.debug(False))
        es.enter_context(gpt_settings.fast_pred_var())
        es.enter_context(
            gpt_settings.detach_test_caches(settings.propagate_grads.off())
        )
        yield
github cornellius-gp / gpytorch / gpytorch / lazy / block_diag_lazy_tensor.py View on Github external
def _get_indices(self, left_indices, right_indices, *batch_indices):
        if self.num_blocks is None:
            if settings.debug.on():
                assert len(batch_indices) == 0
            block_size = self.base_lazy_tensor.size(-1)
            left_batch_indices = left_indices.div(block_size).long()
            right_batch_indices = right_indices.div(block_size).long()
            left_indices = left_indices.fmod(block_size)
            right_indices = right_indices.fmod(block_size)

            res = self.base_lazy_tensor._get_indices(left_indices, right_indices, left_batch_indices)
            res = res * torch.eq(left_batch_indices, right_batch_indices).type_as(res)
            return res
        else:
            if settings.debug.on():
                assert len(batch_indices) == 1
            batch_indices = batch_indices[0]
            block_size = self.base_lazy_tensor.size(-1)
            left_batch_indices = left_indices.div(block_size).long()
github cornellius-gp / gpytorch / gpytorch / likelihoods / noise_models.py View on Github external
def forward(
        self,
        *params: Any,
        batch_shape: Optional[torch.Size] = None,
        shape: Optional[torch.Size] = None,
        noise: Optional[Tensor] = None,
    ) -> DiagLazyTensor:
        if noise is not None:
            return DiagLazyTensor(noise)
        training = self.noise_model.training  # keep track of mode
        self.noise_model.eval()  # we want the posterior prediction of the noise model
        with settings.detach_test_caches(False), settings.debug(False):
            if len(params) == 1 and not torch.is_tensor(params[0]):
                output = self.noise_model(*params[0])
            else:
                output = self.noise_model(*params)
        self.noise_model.train(training)
        if not isinstance(output, MultivariateNormal):
            raise NotImplementedError("Currently only noise models that return a MultivariateNormal are supported")
        # note: this also works with MultitaskMultivariateNormal, where this
        # will return a batched DiagLazyTensors of size n x num_tasks x num_tasks
        noise_diag = output.mean if self._noise_indices is None else output.mean[..., self._noise_indices]
        return DiagLazyTensor(self._noise_constraint.transform(noise_diag))