Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# test extra param that does not affect loss
options["disp"] = False
mll = self._getModel(double=double)
mll.register_parameter(
"dummy_param",
torch.nn.Parameter(
torch.tensor(
[5.0],
dtype=torch.double if double else torch.float,
device=self.device,
)
),
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
mll = fit_gpytorch_model(
mll, optimizer=optimizer, options=options, max_retries=1
)
if optimizer == fit_gpytorch_scipy:
self.assertEqual(len(ws), 1)
self.assertTrue(MAX_RETRY_MSG in str(ws[0].message))
self.assertTrue(mll.dummy_param.grad is None)
# test excluding a parameter
mll = self._getModel(double=double)
original_raw_noise = mll.model.likelihood.noise_covar.raw_noise.item()
original_mean_module_constant = mll.model.mean_module.constant.item()
options["exclude"] = [
"model.mean_module.constant",
"likelihood.noise_covar.raw_noise",
]
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
if optimizer == fit_gpytorch_scipy:
self.assertEqual(len(ws), 1)
self.assertTrue(MAX_RETRY_MSG in str(ws[0].message))
model = mll.model
# Make sure all of the parameters changed
self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
self.assertLess(model.mean_module.constant.abs().item(), 0.1)
self.assertGreater(
model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
)
self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)
# test overriding the default bounds with user supplied bounds
mll = self._getModel(double=double)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
mll = fit_gpytorch_model(
mll,
optimizer=optimizer,
options=options,
max_retries=1,
bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)},
)
if optimizer == fit_gpytorch_scipy:
self.assertEqual(len(ws), 1)
self.assertTrue(MAX_RETRY_MSG in str(ws[0].message))
model = mll.model
self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(), 1e-1)
self.assertLess(model.mean_module.constant.abs().item(), 0.1)
self.assertGreater(
model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
)
def test_fit_gpytorch_model_sequential(self):
options = {"disp": False, "maxiter": 1}
for double in (False, True):
for kind in ("SingleTaskGP", "FixedNoiseGP", "HeteroskedasticSingleTaskGP"):
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
mll = self._getBatchedModel(kind=kind, double=double)
mll = fit_gpytorch_model(mll, options=options, max_retries=1)
mll = self._getBatchedModel(kind=kind, double=double)
mll = fit_gpytorch_model(
mll, options=options, sequential=True, max_retries=1
)
mll = self._getBatchedModel(kind=kind, double=double)
mll = fit_gpytorch_model(
mll, options=options, sequential=False, max_retries=1
)
if kind == "HeteroskedasticSingleTaskGP":
self.assertTrue(
any(issubclass(w.category, BotorchWarning) for w in ws)
)
self.assertTrue(
any(
"Failed to convert ModelList to batched model"
in str(w.message)
# We are now ready to run the BO loop (this make take a few minutes, depending on your machine).
# In[11]:
import warnings
warnings.filterwarnings("ignore")
print(f"\nRunning BO ", end='')
from matplotlib import pyplot as plt
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(N_BATCH):
# fit the model
fit_gpytorch_model(mll)
# define the qNEI acquisition module using a QMC sampler
qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES, seed=seed)
qEI = qExpectedImprovement(model=model, sampler=qmc_sampler, best_f=best_value)
# optimize and get new observation
new_x, new_obj = optimize_acqf_and_get_observation(qEI)
# update training points
train_x = torch.cat((train_x, new_x))
train_obj = torch.cat((train_obj, new_obj))
# update progress
best_value = score_image_recognition(decode(train_x)).max().item()
best_observed.append(best_value)
def test_fit_gpytorch_model_singular(self):
options = {"disp": False, "maxiter": 5}
for dtype in (torch.float, torch.double):
X_train = torch.rand(2, 2, device=self.device, dtype=dtype)
Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
test_likelihood = GaussianLikelihood(
noise_constraint=GreaterThan(-1.0, transform=None, initial_value=0.0)
)
gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
mll.to(device=self.device, dtype=dtype)
# this will do multiple retries (and emit warnings, which is desired)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
fit_gpytorch_model(mll, options=options, max_retries=2)
self.assertTrue(
any(issubclass(w.category, OptimizationWarning) for w in ws)
)
train_x_nei, train_obj_nei, train_con_nei = train_x_ei, train_obj_ei, train_con_ei
best_observed_value_nei = best_observed_value_ei
mll_nei, model_nei = initialize_model(train_x_nei, train_obj_nei, train_con_nei)
best_observed_ei.append(best_observed_value_ei)
best_observed_nei.append(best_observed_value_nei)
best_random.append(best_observed_value_ei)
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(1, N_BATCH + 1):
t0 = time.time()
# fit the models
fit_gpytorch_model(mll_ei)
fit_gpytorch_model(mll_nei)
# define the qEI and qNEI acquisition modules using a QMC sampler
qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
# for best_f, we use the best observed noisy values as an approximation
qEI = qExpectedImprovement(
model=model_ei,
best_f=(train_obj_ei * (train_con_ei <= 0).to(train_obj_ei)).max(),
sampler=qmc_sampler,
objective=constrained_obj,
)
qNEI = qNoisyExpectedImprovement(
model=model_nei,
X_baseline=train_x_nei,
sampler=qmc_sampler,
tkwargs = {"device": self.device, "dtype": dtype}
octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
model, _ = _get_model_and_data(
iteration_fidelity=iteration_fidelity,
data_fidelity=data_fidelity,
batch_shape=batch_shape,
m=m,
lin_truncated=lin_trunc,
outcome_transform=octf,
**tkwargs,
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.to(**tkwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_model(mll, sequential=False, options={"maxiter": 1})
# test init
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
if use_octf:
self.assertIsInstance(model.outcome_transform, Standardize)
# test param sizes
params = dict(model.named_parameters())
for p in params:
self.assertEqual(
params[p].numel(), m * torch.tensor(batch_shape).prod().item()
)
# test posterior
# test non batch evaluation
train_x_nei, train_obj_nei, train_con_nei = train_x_ei, train_obj_ei, train_con_ei
best_observed_value_nei = best_observed_value_ei
mll_nei, model_nei = initialize_model(train_x_nei, train_obj_nei, train_con_nei)
best_observed_ei.append(best_observed_value_ei)
best_observed_nei.append(best_observed_value_nei)
best_random.append(best_observed_value_ei)
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(1, N_BATCH + 1):
t0 = time.time()
# fit the models
fit_gpytorch_model(mll_ei)
fit_gpytorch_model(mll_nei)
# define the qEI and qNEI acquisition modules using a QMC sampler
qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
# for best_f, we use the best observed noisy values as an approximation
qEI = qExpectedImprovement(
model=model_ei,
best_f=(train_obj_ei * (train_con_ei <= 0).to(train_obj_ei)).max(),
sampler=qmc_sampler,
objective=constrained_obj,
)
qNEI = qNoisyExpectedImprovement(
model=model_nei,
X_baseline=train_x_nei,
train_Yvar: A `b x n x (t)` (or `b x n x (t)`) Tensor of observation.
noises observed for each outcome.
model: an initialized Model. This model must have a likelihood attribute.
options: Dictionary of solver options, passed along to scipy.minimize.
warm_start: If True, start optimizing the hyperparameters from their
previous values without resetting them.
Returns:
Model: a fitted model
"""
model.reinitialize(
train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar, keep_params=warm_start
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.to(dtype=train_X.dtype, device=train_X.device)
mll = fit_gpytorch_model(mll, options=options)
return model