Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def initialize_model(train_x, train_obj, train_con, state_dict=None):
# define models for objective and constraint
model_obj = FixedNoiseGP(train_x, train_obj, train_yvar.expand_as(train_obj)).to(train_x)
model_con = FixedNoiseGP(train_x, train_con, train_yvar.expand_as(train_con)).to(train_x)
# combine into a multi-output GP model
model = ModelListGP(model_obj, model_con)
mll = SumMarginalLogLikelihood(model.likelihood, model)
# load state dict if it is passed
if state_dict is not None:
model.load_state_dict(state_dict)
return mll, model
# test HeteroskedasticSingleTaskGP
gp2 = HeteroskedasticSingleTaskGP(
train_X, train_Y1, torch.ones_like(train_Y1)
)
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test custom likelihood
gp2 = SingleTaskGP(train_X, train_Y2, likelihood=GaussianLikelihood())
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test FixedNoiseGP
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1, keepdim=True)
train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
list_gp = ModelListGP(gp1_, gp2_)
batch_gp = model_list_to_batched(list_gp)
def test_roundtrip(self):
for dtype in (torch.float, torch.double):
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1)
train_Y2 = train_X[:, 0] - train_X[:, 1]
train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
# SingleTaskGP
batch_gp = SingleTaskGP(train_X, train_Y)
list_gp = batched_to_model_list(batch_gp)
batch_gp_recov = model_list_to_batched(list_gp)
sd_orig = batch_gp.state_dict()
sd_recov = batch_gp_recov.state_dict()
self.assertTrue(set(sd_orig) == set(sd_recov))
self.assertTrue(all(torch.equal(sd_orig[k], sd_recov[k]) for k in sd_orig))
# FixedNoiseGP
batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
list_gp = batched_to_model_list(batch_gp)
batch_gp_recov = model_list_to_batched(list_gp)
sd_orig = batch_gp.state_dict()
sd_recov = batch_gp_recov.state_dict()
self.assertTrue(set(sd_orig) == set(sd_recov))
self.assertTrue(all(torch.equal(sd_orig[k], sd_recov[k]) for k in sd_orig))
noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
self.train_x = train_x
self.train_y = train_y + noise
self.train_yvar = train_yvar
self.bounds = torch.tensor([[0.0], [1.0]], device=self.device, dtype=dtype)
model_st = SingleTaskGP(self.train_x, self.train_y)
self.model_st = model_st.to(device=self.device, dtype=dtype)
self.mll_st = ExactMarginalLogLikelihood(
self.model_st.likelihood, self.model_st
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
self.mll_st = fit_gpytorch_model(
self.mll_st, options={"maxiter": 5}, max_retries=1
)
model_fn = FixedNoiseGP(
self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
)
self.model_fn = model_fn.to(device=self.device, dtype=dtype)
self.mll_fn = ExactMarginalLogLikelihood(
self.model_fn.likelihood, self.model_fn
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
self.mll_fn = fit_gpytorch_model(
self.mll_fn, options={"maxiter": 5}, max_retries=1
)
"covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]),
"covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
3.0
),
"covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0),
"covar_module.outputscale_prior.concentration": torch.tensor(2.0),
"covar_module.outputscale_prior.rate": torch.tensor(0.1500),
}
train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
-1
)
train_y = torch.sin(train_x * (2 * math.pi))
noise = torch.tensor(NEI_NOISE, device=self.device, dtype=dtype)
train_y += noise
train_yvar = torch.full_like(train_y, 0.25 ** 2)
model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
model.load_state_dict(state_dict)
model.to(train_x)
model.eval()
return model
def initialize_model(train_x, train_obj, train_con, state_dict=None):
# define models for objective and constraint
model_obj = FixedNoiseGP(train_x, train_obj, train_yvar.expand_as(train_obj)).to(train_x)
model_con = FixedNoiseGP(train_x, train_con, train_yvar.expand_as(train_con)).to(train_x)
# combine into a multi-output GP model
model = ModelListGP(model_obj, model_con)
mll = SumMarginalLogLikelihood(model.likelihood, model)
# load state dict if it is passed
if state_dict is not None:
model.load_state_dict(state_dict)
return mll, model
model_list_to_batched(ModelListGP(gp1, gp2))
# test HeteroskedasticSingleTaskGP
gp2 = HeteroskedasticSingleTaskGP(
train_X, train_Y1, torch.ones_like(train_Y1)
)
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test custom likelihood
gp2 = SingleTaskGP(train_X, train_Y2, likelihood=GaussianLikelihood())
with self.assertRaises(NotImplementedError):
model_list_to_batched(ModelListGP(gp2))
# test FixedNoiseGP
train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
train_Y1 = train_X.sum(dim=-1, keepdim=True)
train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
list_gp = ModelListGP(gp1_, gp2_)
batch_gp = model_list_to_batched(list_gp)