Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _create_svgp_model(kernel, likelihood, q_mu, q_sqrt, whiten):
model_svgp = gpflow.models.SVGP(kernel, likelihood, DatumVGP.X.copy(), whiten=whiten, q_diag=False,
num_latent=DatumVGP.DY)
model_svgp.q_mu.assign(q_mu)
model_svgp.q_sqrt.assign(q_sqrt)
return model_svgp
for key in m1_params:
p1 = m1_params[key]
p2 = m2_params[key]
if not np.allclose(p1.read_value(), p2.read_value(), rtol=tolerance, atol=tolerance):
return False
return True
_gp_models = [
gpflow.models.VGP((default_datum.X, default_datum.Y), default_datum.kernel, default_datum.lik),
gpflow.models.GPMC((default_datum.X, default_datum.Y), default_datum.kernel, default_datum.lik),
gpflow.models.SGPMC((default_datum.X, default_datum.Y),
default_datum.kernel,
default_datum.lik,
inducing_variable=default_datum.Z),
gpflow.models.SGPR((default_datum.X, default_datum.Y), default_datum.kernel, inducing_variable=default_datum.Z),
gpflow.models.GPR((default_datum.X, default_datum.Y), default_datum.kernel),
gpflow.models.GPRFITC((default_datum.X, default_datum.Y), default_datum.kernel, inducing_variable=default_datum.Z)
]
_state_less_gp_models = [gpflow.models.SVGP(default_datum.kernel, default_datum.lik, inducing_variable=default_datum.Z)]
@pytest.mark.parametrize('model', _state_less_gp_models + _gp_models)
def test_methods_predict_f(model):
mf, vf = model.predict_f(default_datum.Xs)
assert_array_equal(mf.shape, vf.shape)
assert_array_equal(mf.shape, (10, 1))
assert_array_less(np.full_like(vf, -1e-6), vf)
@pytest.mark.parametrize('model', _state_less_gp_models + _gp_models)
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import gpflow
from gpflow.test_util import GPflowTestCase
from gpflow.core import AutoFlow
class DumbModel(gpflow.models.Model):
def __init__(self):
gpflow.models.Model.__init__(self)
self.a = gpflow.Param(3.)
@gpflow.params_as_tensors
def _build_likelihood(self):
return -tf.square(self.a)
class NoArgsModel(DumbModel):
@gpflow.autoflow()
@gpflow.params_as_tensors
def function1(self):
return self.a
@gpflow.autoflow()
rng = np.random.RandomState(0)
X = [rng.rand(10, 2) * 10, rng.rand(20, 2) * 10]
Y = [np.sin(x) + 0.9 * np.cos(x * 1.6) + rng.randn(*x.shape) * 0.8 for x in X]
label = [np.zeros((10, 1)), np.ones((20, 1))]
perm = list(range(30))
rng.shuffle(perm)
Xtest = rng.rand(10, 2) * 10
X_augumented = np.hstack([np.concatenate(X), np.concatenate(label)])
Y_augumented = np.hstack([np.concatenate(Y), np.concatenate(label)])
# 1. Two independent VGPs for two sets of data
k0 = gpflow.kernels.RBF(2)
k0.lengthscales.trainable = False
vgp0 = gpflow.models.VGP(
X[0], Y[0], kern=k0,
mean_function=gpflow.mean_functions.Constant(),
likelihood=gpflow.likelihoods.Gaussian())
k1 = gpflow.kernels.RBF(2)
k1.lengthscales.trainable = False
vgp1 = gpflow.models.VGP(
X[1], Y[1], kern=k1,
mean_function=gpflow.mean_functions.Constant(),
likelihood=gpflow.likelihoods.Gaussian())
# 2. Coregionalized GPR
lik = gpflow.likelihoods.SwitchedLikelihood(
[gpflow.likelihoods.Gaussian(), gpflow.likelihoods.Gaussian()])
kc = gpflow.kernels.RBF(2)
def test_few_inducing_points(self):
with self.test_context() as session:
vfe = gpflow.models.SGPR(self.X, self.Y, gpflow.kernels.RBF(1), self.X[:10, :].copy())
opt = gpflow.train.ScipyOptimizer()
opt.minimize(vfe)
full = gpflow.models.GPR(self.X, self.Y, gpflow.kernels.RBF(1))
full.kern.lengthscales = vfe.kern.lengthscales.read_value()
full.kern.variance = vfe.kern.variance.read_value()
full.likelihood.variance = vfe.likelihood.variance.read_value()
lml_upper = vfe.compute_upper_bound()
lml_vfe = - session.run(vfe.objective)
lml_full = - session.run(full.objective)
self.assertTrue(lml_upper > lml_full > lml_vfe)
See github issue #277. This is a regression test.
"""
model1, model2 = Linear(), Linear()
assert model1.b.numpy() == model2.b.numpy()
model2.b.assign([1.])
assert not model1.b.numpy() == model2.b.numpy()
# TODO: (@sergio.pasc) finish tests below once GP models are ready for TF2.0
_model_classes = [
gpflow.models.GPR,
gpflow.models.SGPR,
gpflow.models.GPRFITC,
gpflow.models.SVGP,
gpflow.models.VGP,
gpflow.models.GPMC,
gpflow.models.SGPMC
]
@pytest.mark.parametrize('model_class', _model_classes)
def test_models_with_mean_functions_changes(model_class):
"""
Simply check that all models have a higher prediction with a constant mean
function than with a zero mean function.
For compositions of mean functions check that multiplication/ addition of
a constant results in a higher prediction, whereas addition of zero/
mutliplication with one does not.
"""
data = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, 1)
predict_at = rng.randn(Datum.Ntest, Datum.input_dim)
def test_constrained_ei(domain):
design = gpflowopt.design.LatinHyperCube(16, domain)
X = design.generate()
Yo = parabola2d(X)
Yc = -parabola2d(X) + 0.5
m1 = gpflow.models.GPR(X, Yo, gpflow.kernels.RBF(2, ARD=False, lengthscales=37.7554549981, variance=845886.3367827121))
m1.likelihood.variance = 1e-6
m2 = gpflow.models.GPR(X, Yc, gpflow.kernels.RBF(2, ARD=False, lengthscales=0.851406328779, variance=845886.3367827121))
m2.likelihood.variance = 1e-6
ei = gpflowopt.acquisition.ExpectedImprovement(m1)
pof = gpflowopt.acquisition.ProbabilityOfFeasibility(m2)
joint = ei * pof
# Test output indices
np.testing.assert_allclose(joint.objective_indices(), np.array([0], dtype=int))
np.testing.assert_allclose(joint.constraint_indices(), np.array([1], dtype=int))
# Test proper setup
joint._needs_setup = False
joint._setup()
assert ei.fmin.read_value() > np.min(ei.data[1])
np.testing.assert_allclose(ei.fmin.read_value(), np.min(ei.data[1][pof.feasible_data_index(), :]), atol=1e-3)
if self.requires_likelihood:
params.update(dict(likelihood=self.likelihood))
return self.model_class(**params)
def __repr__(self):
return f"ModelSetup({self.model_class.__name__}, {self.whiten}, {self.q_diag})"
model_setups = [
ModelSetup(model_class=gpflow.models.SVGP, whiten=False, q_diag=True),
ModelSetup(model_class=gpflow.models.SVGP, whiten=True, q_diag=False),
ModelSetup(model_class=gpflow.models.SVGP, whiten=True, q_diag=True),
ModelSetup(model_class=gpflow.models.SVGP, whiten=False, q_diag=False),
ModelSetup(model_class=gpflow.models.SGPR, requires_data=True, requires_likelihood=False),
ModelSetup(model_class=gpflow.models.VGP, requires_inducing_variables=False, requires_data=True),
# ModelSetup(model_class=gpflow.models.GPRF),
ModelSetup(model_class=gpflow.models.GPMC, requires_data=True, requires_inducing_variables=False),
ModelSetup(model_class=gpflow.models.SGPMC, requires_data=True, requires_inducing_variables=True)
]
@pytest.mark.parametrize('Ntrain, Ntest, D', [[100, 10, 2]])
def test_gaussian_mean_and_variance(Ntrain, Ntest, D):
data = rng.randn(Ntrain, D), rng.randn(Ntrain, 1)
Xtest, _ = rng.randn(Ntest, D), rng.randn(Ntest, 1)
kernel = Matern32() + gpflow.kernels.White()
model_gp = gpflow.models.GPR(data, kernel=kernel)
mu_f, var_f = model_gp.predict_f(Xtest)
mu_y, var_y = model_gp.predict_y(Xtest)
def __getstate__(self):
d = gpflow.models.Model.__getstate__(self)
d.pop('_Kuf')
return d
# extra trace terms
KL += 0.5 * tf.reduce_sum(tf.diag_part(WTKiW))
return KL
@gpflow.params_as_tensors
def _build_likelihood(self):
# compute the mean and variance of the latent function
f_mu, f_var = self._build_predict_train()
E_lik = self.likelihood.variational_expectations(f_mu, f_var, self.Y)
return tf.reduce_sum(E_lik) - self.build_KL()
class VGP_kron_anyvar(gpflow.models.GPModel):
def __init__(self, X, Y, ms, a, b, kerns, likelihood):
"""
Here we assume the interval is [a,b]
We do *not* assume that the variance of q(u) has a kronecker structure.
This can get very computationally heavy very quickly, use with caution!.
"""
assert a.size == b.size == len(kerns) == X.shape[1]
for kern in kerns:
assert isinstance(kern, (gpflow.kernels.Matern12,
gpflow.kernels.Matern32,
gpflow.kernels.Matern52))
mf = gpflow.mean_functions.Zero()
gpflow.models.GPModel.__init__(self, X, Y, kern=None,
likelihood=likelihood, mean_function=mf)
self.num_latent = 1 # multiple columns not supported in this version