Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
]
)
def test_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_coeffs_jacobian()
return
v = k.get_parameter_vector()
c = np.concatenate(k.coefficients)
def test_build_gp(seed=42):
kernel = terms.RealTerm(0.5, 0.1)
kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
gp = GP(kernel)
assert gp.vector_size == 5
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.1, 0.6, 0.7, 1.0])
gp.set_parameter_vector([0.5, 0.8, 0.6, 0.7, 2.0])
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.8, 0.6, 0.7, 2.0])
with pytest.raises(ValueError):
gp.set_parameter_vector([0.5, 0.8, -0.6])
with pytest.raises(ValueError):
gp.set_parameter_vector("face1")
gp.log_likelihood(y)
assert np.isinf(gp.log_likelihood(y, quiet=True))
if terms.HAS_AUTOGRAD:
assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])
kernel = terms.RealTerm(0.1, 0.5)
gp = GP(kernel)
with pytest.raises(RuntimeError):
gp.log_likelihood(y)
termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
for term in termlist:
if len(term) > 2:
kernel += terms.ComplexTerm(*term)
else:
kernel += terms.RealTerm(*term)
gp = GP(kernel)
assert gp.computed is False
with pytest.raises(ValueError):
gp.compute(np.random.rand(len(x)), yerr)
gp.compute(x, yerr, A=A, U=U, V=V)
assert gp.computed is True
assert gp.dirty is False
ll = gp.log_likelihood(y)
K = gp.get_matrix(include_diagonal=True)
ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
@lapack_switch
def test_nyquist_singularity(solver, seed=4220):
solver = solver()
np.random.seed(seed)
kernel = terms.ComplexTerm(1.0, np.log(1e-6), np.log(1.0))
gp = get_gp(solver, kernel)
# Samples are very close to Nyquist with f = 1.0
ts = np.array([0.0, 0.5, 1.0, 1.5])
ts[1] = ts[1]+1e-9*np.random.randn()
ts[2] = ts[2]+1e-8*np.random.randn()
ts[3] = ts[3]+1e-7*np.random.randn()
yerr = np.random.uniform(low=0.1, high=0.2, size=len(ts))
y = np.random.randn(len(ts))
gp.compute(ts, yerr)
llgp = gp.log_likelihood(y)
K = gp.get_matrix(ts)
K[np.diag_indices_from(K)] += yerr**2.0
terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
]
)
def test_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_coeffs_jacobian()
return
v = k.get_parameter_vector()
def test_nyquist_singularity(seed=4220):
np.random.seed(seed)
kernel = terms.ComplexTerm(1.0, np.log(1e-6), np.log(1.0))
gp = GP(kernel)
# Samples are very close to Nyquist with f = 1.0
ts = np.array([0.0, 0.5, 1.0, 1.5])
ts[1] = ts[1]+1e-9*np.random.randn()
ts[2] = ts[2]+1e-8*np.random.randn()
ts[3] = ts[3]+1e-7*np.random.randn()
yerr = np.random.uniform(low=0.1, high=0.2, size=len(ts))
y = np.random.randn(len(ts))
gp.compute(ts, yerr)
llgp = gp.log_likelihood(y)
K = gp.get_matrix(ts)
K[np.diag_indices_from(K)] += yerr**2.0
rcParams["font.sans-serif"] = ["Computer Modern Sans"]
rcParams["text.usetex"] = True
rcParams["text.latex.preamble"] = r"\usepackage{cmbright}"
rcParams["axes.prop_cycle"] = cycler("color", (
"#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b",
"#e377c2", "#7f7f7f", "#bcbd22", "#17becf",
)) # d3.js color cycle
import time
import numpy as np
import matplotlib.pyplot as pl
from scipy.linalg import cho_factor
from celerite import terms, GP
kernel = terms.RealTerm(1.0, 0.1) + terms.ComplexTerm(0.1, 2.0, 1.6)
gp = GP(kernel)
N = 2**np.arange(6, 20)
K = np.maximum((2 * N.max() / N), 5*np.ones_like(N)).astype(int)
K_chol = np.maximum((4096 / N), 5*np.ones_like(N)).astype(int)
times = np.empty((len(N), 3))
times[:] = np.nan
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)
for i, n in enumerate(N):
strt = time.time()
for k in range(K[i]):
gp.compute(t[:n], yerr[:n])
def get_terms(self):
coeffs = self.get_complex_coefficients()
return [terms.ComplexTerm(*(np.log(args))) for args in zip(*coeffs)]
B = self.kernel.hparams['gp_B'].value
C = self.kernel.hparams['gp_C'].value
L = self.kernel.hparams['gp_L'].value
Prot = self.kernel.hparams['gp_Prot'].value
# build celerite kernel with current values of hparams
kernel = celerite.terms.JitterTerm(
log_sigma = np.log(self.params[self.jit_param].value)
)
kernel += celerite.terms.RealTerm(
log_a=np.log(B*(1+C)/(2+C)),
log_c=np.log(1/L)
)
kernel += celerite.terms.ComplexTerm(
log_a=np.log(B/(2+C)),
log_b=-np.inf,
log_c=np.log(1/L),
log_d=np.log(2*np.pi/Prot)
)
gp = celerite.GP(kernel)
gp.compute(self.x, self.yerr)
mu, var = gp.predict(self._resids(), xpred, return_var=True)
stdev = np.sqrt(var)
return mu, stdev
kernel = 0.5*kernels.ExpSquaredKernel(2.0)
kernel += 1.0*kernels.ExpSquaredKernel(true_q) * \
kernels.CosineKernel(period=1.0 / true_freq)
true_gp = george.GP(kernel)
K = true_gp.get_matrix(t)
K[np.diag_indices_from(K)] += yerr**2
y = np.random.multivariate_normal(np.zeros_like(t), K)
# Set up the fit
# bounds = [(-8.0, 8.0), (-8.0, 8.0), (-8.0, 8.0)]
# kernel = SHOTerm(0.0, -0.5 * np.log(2), 0.0, bounds=bounds)
# kernel.freeze_parameter("log_Q")
# kernel += SHOTerm(0.0, 0.0, 0.0, bounds=bounds)
# kernel += SHOTerm(0.0, 0.0, 0.0, bounds=bounds)
kernel = terms.RealTerm(0.0, 0.0, bounds=[(-8, 8), (-8, 8)])
kernel += terms.ComplexTerm(0.0, 0.0, 0.0, 0.0,
bounds=[(-8, 8), (-8, 8), (-8, 8), (-8, 8)])
fit_gp = celerite.GP(kernel)
fit_gp.compute(t, yerr)
def nll(params):
fit_gp.set_parameter_vector(params)
if not np.isfinite(fit_gp.log_prior()):
return 1e10
return -fit_gp.log_likelihood(y)
npars = len(fit_gp.get_parameter_vector())
parameter_bounds = fit_gp.get_parameter_bounds()
# Optimize with random restarts
best = (np.inf, fit_gp.get_parameter_vector())
for i in range(10):