Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
# Check quiet argument with a non-positive definite kernel.
class NPDTerm(terms.Term):
parameter_names = ("par1", )
def get_real_coefficients(self, params): # NOQA
return [params[0]], [0.1]
gp = GP(NPDTerm(-1.0))
with pytest.raises(celerite.solver.LinAlgError):
gp.compute(x, 0.0)
with pytest.raises(celerite.solver.LinAlgError):
gp.log_likelihood(y)
assert np.isinf(gp.log_likelihood(y, quiet=True))
if terms.HAS_AUTOGRAD:
assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])
kernel = terms.RealTerm(0.1, 0.5)
gp = GP(kernel)
with pytest.raises(RuntimeError):
gp.log_likelihood(y)
termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
for term in termlist:
if len(term) > 2:
kernel += terms.ComplexTerm(*term)
if with_general:
U = np.vander(x - np.mean(x), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
# Check quiet argument with a non-positive definite kernel.
class NPDTerm(terms.Term):
parameter_names = ("par1", )
def get_real_coefficients(self, params): # NOQA
return [params[0]], [0.1]
gp = GP(NPDTerm(-1.0))
with pytest.raises(celerite.solver.LinAlgError):
gp.compute(x, 0.0)
with pytest.raises(celerite.solver.LinAlgError):
gp.log_likelihood(y)
assert np.isinf(gp.log_likelihood(y, quiet=True))
if terms.HAS_AUTOGRAD:
assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])
kernel = terms.RealTerm(0.1, 0.5)
gp = GP(kernel)
with pytest.raises(RuntimeError):
gp.log_likelihood(y)
termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
for term in termlist:
def logprob(self):
self.update_kernel_params()
try:
solver = self.kernel.compute_covmatrix(self.errorbars())
# calculate log likelihood
lnlike = -0.5 * (solver.dot_solve(self._resids()) + solver.log_determinant() + self.N*np.log(2.*np.pi))
return lnlike
except celerite.solver.LinAlgError:
warnings.warn("Non-positive definite kernel detected.", RuntimeWarning)
return -np.inf
try:
__CELERITE_SETUP__
except NameError:
__CELERITE_SETUP__ = False
if not __CELERITE_SETUP__:
__all__ = [
"terms", "solver", "modeling", "GP", "CholeskySolver",
"__library_version__",
]
from . import terms, solver, modeling
from .celerite import GP
from .solver import CholeskySolver
__library_version__ = solver.get_library_version()
def solver(self):
if self._solver is None:
self._solver = solver.CholeskySolver()
return self._solver
quiet (bool): If true, return ``-numpy.inf`` for non-positive
definite matrices instead of throwing an error.
Returns:
float: The marginalized likelihood of the GP model.
Raises:
ValueError: For mismatched dimensions.
solver.LinAlgError: For non-positive definite matrices.
"""
y = self._process_input(y)
resid = y - self.mean.get_value(self._t)
try:
self._recompute()
except solver.LinAlgError:
if quiet:
return -np.inf
raise
if len(y.shape) > 1:
raise ValueError("dimension mismatch")
logdet = self.solver.log_determinant()
if not np.isfinite(logdet):
return -np.inf
loglike = -0.5*(self.solver.dot_solve(resid)+logdet+len(y)*_const)
if not np.isfinite(loglike):
return -np.inf
return loglike
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
quiet (bool): If true, return ``-numpy.inf`` and a gradient vector
of zeros for non-positive definite matrices instead of
throwing an error.
Returns:
The gradient of marginalized likelihood with respect to the
parameter vector.
Raises:
ValueError: For mismatched dimensions.
solver.LinAlgError: For non-positive definite matrices.
"""
if not solver.has_autodiff():
raise RuntimeError("celerite must be compiled with autodiff "
"support to use the gradient methods")
if not self.kernel.vector_size:
return self.log_likelihood(y, quiet=quiet), np.empty(0)
y = self._process_input(y)
if len(y.shape) > 1:
raise ValueError("dimension mismatch")
resid = y - self.mean.get_value(self._t)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag) = self.kernel.coefficients
try:
val, grad = self.solver.grad_log_likelihood(
self.kernel.jitter,