Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)
def numpy_compute(kernel, x, yerr):
K = kernel.get_value(x[:, None] - x[None, :])
K[np.diag_indices_from(K)] += yerr ** 2
return cho_factor(K)
def numpy_log_like(factor, y):
np.dot(y, cho_solve(factor, y)) + np.sum(np.log(np.diag(factor[0])))
for xi, j in enumerate(J):
kernel = terms.RealTerm(1.0, 0.1)
for k in range((2*j - 1) % 2):
kernel += terms.RealTerm(1.0, 0.1)
for k in range((2*j - 1) // 2):
kernel += terms.ComplexTerm(0.1, 2.0, 1.6)
coeffs = kernel.coefficients
assert 2*j == len(coeffs[0]) + 2*len(coeffs[2]), "Wrong number of terms"
if args.george:
george_kernel = None
for a, c in zip(*(coeffs[:2])):
k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
george_kernel = k if george_kernel is None else george_kernel + k
for a, b, c, d in zip(*(coeffs[2:])):
k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
george_kernel = k if george_kernel is None else george_kernel + k
solver = george.GP(george_kernel, solver=george.HODLRSolver)
elif args.carma:
arparams = np.random.randn(2*j)
# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)
def numpy_compute(kernel, x, yerr):
K = kernel.get_value(x[:, None] - x[None, :])
K[np.diag_indices_from(K)] += yerr ** 2
return cho_factor(K)
def numpy_log_like(factor, y):
np.dot(y, cho_solve(factor, y)) + np.sum(np.log(np.diag(factor[0])))
for xi, j in enumerate(J):
kernel = terms.RealTerm(1.0, 0.1)
for k in range((2*j - 1) % 2):
kernel += terms.RealTerm(1.0, 0.1)
for k in range((2*j - 1) // 2):
kernel += terms.ComplexTerm(0.1, 2.0, 1.6)
coeffs = kernel.coefficients
assert 2*j == len(coeffs[0]) + 2*len(coeffs[2]), "Wrong number of terms"
if args.george:
george_kernel = None
for a, c in zip(*(coeffs[:2])):
k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
george_kernel = k if george_kernel is None else george_kernel + k
for a, b, c, d in zip(*(coeffs[2:])):
k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
george_kernel = k if george_kernel is None else george_kernel + k
solver = george.GP(george_kernel, solver=george.HODLRSolver)
def test_build_gp(seed=42):
kernel = terms.RealTerm(0.5, 0.1)
kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
gp = GP(kernel)
assert gp.vector_size == 5
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.1, 0.6, 0.7, 1.0])
gp.set_parameter_vector([0.5, 0.8, 0.6, 0.7, 2.0])
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.8, 0.6, 0.7, 2.0])
with pytest.raises(ValueError):
gp.set_parameter_vector([0.5, 0.8, -0.6])
with pytest.raises(ValueError):
gp.set_parameter_vector("face1")
def get_gp(solver, kernel):
if type(solver) == SparseSolver:
return GP(kernel, sparse=True)
return GP(kernel, use_lapack=solver.use_lapack)
solver2.dot_solve(y))
s = pickle.dumps(solver, -1)
solver2 = pickle.loads(s)
compare(solver, solver2)
solver.compute(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
A, U, V, t, diag
)
solver2 = pickle.loads(pickle.dumps(solver, -1))
compare(solver, solver2)
# Test that models can be pickled too.
kernel = terms.RealTerm(0.5, 0.1)
kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
gp1 = GP(kernel)
gp1.compute(t, diag)
s = pickle.dumps(gp1, -1)
gp2 = pickle.loads(s)
assert np.allclose(gp1.log_likelihood(y), gp2.log_likelihood(y))
def test_predict(seed=42):
np.random.seed(seed)
x = np.linspace(1, 59, 300)
t = np.sort(np.random.uniform(10, 50, 100))
yerr = np.random.uniform(0.1, 0.5, len(t))
y = np.sin(t)
kernel = terms.RealTerm(0.1, 0.5)
for term in [(0.6, 0.7, 1.0), (0.1, 0.05, 0.5, -0.1)]:
kernel += terms.ComplexTerm(*term)
gp = GP(kernel)
gp.compute(t, yerr)
K = gp.get_matrix(include_diagonal=True)
Ks = gp.get_matrix(x, t)
true_mu = np.dot(Ks, np.linalg.solve(K, y))
true_cov = gp.get_matrix(x, x) - np.dot(Ks, np.linalg.solve(K, Ks.T))
mu, cov = gp.predict(y, x)
_, var = gp.predict(y, x, return_var=True)
assert np.allclose(mu, true_mu)
assert np.allclose(cov, true_cov)
assert np.allclose(var, np.diag(true_cov))
terms.RealTerm(log_a=0.1, log_c=0.5) +
terms.RealTerm(log_a=-0.1, log_c=0.7),
terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
]
)
def test_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_coeffs_jacobian()
return
terms.RealTerm(log_a=0.5, log_c=0.1),
terms.RealTerm(log_a=0.5, log_c=0.1) + terms.JitterTerm(log_sigma=0.3),
terms.JitterTerm(log_sigma=0.5) + terms.JitterTerm(log_sigma=0.1),
]
)
def test_jitter_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_jitter_jacobian()
return
v = k.get_parameter_vector()
jac = k.get_jitter_jacobian()
assert len(jac) == len(v)
jac0 = np.empty_like(jac)
for i, pval in enumerate(v):
v[i] = pval + eps
def test_bounds(seed=42):
bounds = [(-1.0, 0.3), (-2.0, 5.0)]
kernel = terms.RealTerm(log_a=0.1, log_c=0.5, bounds=bounds)
b0 = kernel.get_parameter_bounds()
assert all(np.allclose(a, b) for a, b in zip(b0, bounds))
kernel = terms.RealTerm(log_a=0.1, log_c=0.5,
bounds=dict(zip(["log_a", "log_c"], bounds)))
assert all(np.allclose(a, b)
for a, b in zip(b0, kernel.get_parameter_bounds()))
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
]
)
def test_jacobian(k, eps=1.34e-7):
if not terms.HAS_AUTOGRAD:
with pytest.raises(ImportError):
jac = k.get_coeffs_jacobian()
return
v = k.get_parameter_vector()
c = np.concatenate(k.coefficients)