Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_product(seed=42):
np.random.seed(seed)
t = np.sort(np.random.uniform(0, 5, 100))
tau = t[:, None] - t[None, :]
k1 = terms.RealTerm(log_a=0.1, log_c=0.5)
k2 = terms.ComplexTerm(0.2, -3.0, 0.5, 0.01)
k3 = terms.SHOTerm(1.0, 0.2, 3.0)
K1 = k1.get_value(tau)
K2 = k2.get_value(tau)
K3 = k3.get_value(tau)
assert np.allclose((k1 + k2).get_value(tau), K1 + K2)
assert np.allclose((k3 + k2).get_value(tau), K3 + K2)
assert np.allclose((k1 + k2 + k3).get_value(tau), K1 + K2 + K3)
for (a, b), (A, B) in zip(product((k1, k2, k3, k1+k2, k1+k3, k2+k3),
(k1, k2, k3)),
product((K1, K2, K3, K1+K2, K1+K3, K2+K3),
(K1, K2, K3))):
assert np.allclose((a * b).get_value(tau), A*B)
if config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_real':
kernel = terms.RealTerm(log_a=params['baseline_gp_real_lna_'+key+'_'+inst],
log_c=params['baseline_gp_real_lnc_'+key+'_'+inst])
elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_complex':
kernel = terms.ComplexTerm(log_a=params['baseline_gp_complex_lna_'+key+'_'+inst],
log_b=params['baseline_gp_complex_lnb_'+key+'_'+inst],
log_c=params['baseline_gp_complex_lnc_'+key+'_'+inst],
log_d=params['baseline_gp_complex_lnd_'+key+'_'+inst])
elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_Matern32':
kernel = terms.Matern32Term(log_sigma=params['baseline_gp_matern32_lnsigma_'+key+'_'+inst],
log_rho=params['baseline_gp_matern32_lnrho_'+key+'_'+inst])
elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_SHO':
kernel = terms.SHOTerm(log_S0=params['baseline_gp_sho_lnS0_'+key+'_'+inst],
log_Q=params['baseline_gp_sho_lnQ_'+key+'_'+inst],
log_omega0=params['baseline_gp_sho_lnomega0_'+key+'_'+inst])
else:
KeyError('GP settings and params do not match.')
#::: GP and mean (simple offset)
if 'baseline_gp_offset_'+key+'_'+inst in params:
gp = celerite.GP(kernel, mean=params['baseline_gp_offset_'+key+'_'+inst], fit_mean=True)
else:
gp = celerite.GP(kernel, mean=0.)
return gp
#::: fname
if fname is not None:
fname += '_gp_decor_'
else:
fname = 'gp_decor_'
#::: MCMC plot settings
if kernel=='Matern32':
keys = ['gp_log_sigma', 'gp_log_rho', 'log_y_err']
names = [r'gp: $\log{\sigma}$', r'gp: $\log{\rho}$', r'$\log{(y_\mathrm{err})}$']
elif kernel=='SHOT':
keys = ['gp_log_S0', 'gp_log_Q', 'log_omega0', 'log_y_err']
names = [r'gp: $\log{S_0}$', r'gp: $\log{Q}$', r'gp: $\log{\omega_0}$', r'$\log{(y_\mathrm{err})}$']
celerite.terms.SHOTerm
discard = int(1.*burn_steps/thin_by)
#::: phase-plot settings
dt=1./1000.
ferr_type='meansig'
ferr_style='sem'
sigmaclip=True
logprint('\nStarting...')
#::: guess yerr if not given
if yerr is None:
yerr = np.nanstd(y) * np.ones_like(y)
alpha_error = np.empty((K, len(J), len(N)))
logdet_error = np.empty((K, len(J), len(N)))
logdet_error[:, :, :] = np.nan
for k in range(K):
t = np.sort(np.random.uniform(0, N.max() * 0.8, N.max()))
yerr = np.random.uniform(1.0, 1.5, len(t))
for ix, j in enumerate(J):
kernel = terms.RealTerm(np.random.uniform(-1, 1),
np.random.uniform(-5, -1))
kernel += terms.RealTerm(np.random.uniform(-1, 1),
np.random.uniform(-5, -1))
while (len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) <
2*j):
kernel += terms.SHOTerm(
log_S0=np.random.uniform(-1, 1),
log_omega0=np.random.uniform(-5, 0),
log_Q=np.random.uniform(0, 1),
)
kernel += terms.JitterTerm(np.random.uniform(-1, 1))
assert (
len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) == 2*j
)
gp = celerite.GP(kernel)
for iy, n in enumerate(N):
gp.compute(t[:n], yerr[:n])
alpha_true = np.random.randn(n)
args = [kernel.jitter] + list(kernel.coefficients)
args += [t[:n], alpha_true]
# Jitter term:
kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
# Wrap GP kernel and object:
if self.instrument in ['rv','lc']:
self.kernel = exp_kernel*matern_kernel
else:
self.kernel = exp_kernel*matern_kernel + kernel_jitter
# We add a phantom variable because we want to leave index 2 without value ON PURPOSE: the idea is
# that here, that is always 0 (because this defines the log(sigma) of the matern kernel in the
# multiplication, which we set to 1).
phantomvariable = 1
# We are using celerite:
self.use_celerite = True
elif self.kernel_name == 'CeleriteSHOKernel':
# Generate kernel:
sho_kernel = terms.SHOTerm(log_S0=np.log(10.), log_Q=np.log(10.),log_omega0=np.log(10.))
# Jitter term:
kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
# Wrap GP kernel and object:
if self.instrument in ['rv','lc']:
self.kernel = sho_kernel
else:
self.kernel = sho_kernel + kernel_jitter
# We are using celerite:
self.use_celerite = True
# Check if use_celerite is True; if True, check that the regressor is ordered. If not, don't do the self.init_GP():
if self.use_celerite:
idx_sorted = np.argsort(self.X)
diff1 = np.sum(self.X - self.X[idx_sorted])
diff2 = np.sum(self.X - self.X[idx_sorted[::-1]])
if diff1 == 0 or diff1 == 0:
self.init_GP()
from __future__ import division, print_function
import emcee
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import celerite
from celerite import terms
from celerite import plot_setup
np.random.seed(42)
plot_setup.setup(auto=True)
# Simulate some data
kernel = terms.SHOTerm(log_S0=0.0, log_omega0=2.0, log_Q=2.0,
bounds=[(-10, 10), (-10, 10), (-10, 10)])
gp = celerite.GP(kernel)
true_params = np.array(gp.get_parameter_vector())
omega = 2*np.pi*np.exp(np.linspace(-np.log(10.0), -np.log(0.1), 5000))
true_psd = gp.kernel.get_psd(omega)
N = 200
t = np.sort(np.random.uniform(0, 10, N))
yerr = 2.5
y = gp.sample(t, diag=yerr**2)
# Find the maximum likelihood model
gp.compute(t, yerr)
def nll(params, gp, y):
gp.set_parameter_vector(params)
if not np.isfinite(gp.log_prior()):
bounds=[(-10, 10), (-10, 10), (-10, 10)])
# Simulate a dataset from the true model
np.random.seed(42)
N = 100
t = np.sort(np.random.uniform(0, 20, N))
yerr = 0.5
K = true_model.get_K(t)
K[np.diag_indices_from(K)] += yerr**2
y = np.random.multivariate_normal(np.zeros(N), K)
# Set up the celerite model that we will use to fit - product of two SHOs
log_Q = 1.0
kernel = terms.SHOTerm(log_S0=np.log(np.var(y))-2*log_Q, log_Q=log_Q,
log_omega0=np.log(2*np.pi))
kernel *= terms.SHOTerm(log_S0=0.0, log_omega0=0.0, log_Q=-0.5*np.log(2))
kernel.freeze_parameter("k2:log_S0")
kernel.freeze_parameter("k2:log_Q")
gp = celerite.GP(kernel)
gp.compute(t, yerr)
# Fit for the maximum likelihood
def nll(params, gp, y):
gp.set_parameter_vector(params)
if not np.isfinite(gp.log_prior()):
return 1e10
ll = gp.log_likelihood(y)
return -ll if np.isfinite(ll) else 1e10
p0 = gp.get_parameter_vector()
soln = minimize(nll, p0, method="L-BFGS-B", args=(gp, y))
if config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_real':
kernel = terms.RealTerm(log_a=params['stellar_var_gp_real_lna_'+key],
log_c=params['stellar_var_gp_real_lnc_'+key])
elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_complex':
kernel = terms.ComplexTerm(log_a=params['stellar_var_gp_complex_lna_'+key],
log_b=params['stellar_var_gp_complex_lnb_'+key],
log_c=params['stellar_var_gp_complex_lnc_'+key],
log_d=params['stellar_var_gp_complex_lnd_'+key])
elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_Matern32':
kernel = terms.Matern32Term(log_sigma=params['stellar_var_gp_matern32_lnsigma_'+key],
log_rho=params['stellar_var_gp_matern32_lnrho_'+key])
elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_SHO':
kernel = terms.SHOTerm(log_S0=params['stellar_var_gp_sho_lnS0_'+key],
log_Q=params['stellar_var_gp_sho_lnQ_'+key],
log_omega0=params['stellar_var_gp_sho_lnomega0_'+key])
else:
KeyError('GP settings and params do not match.')
#::: GP and mean (simple offset)
if 'stellar_var_gp_offset_'+key in params:
gp = celerite.GP(kernel, mean=params['stellar_var_gp_offset_'+key], fit_mean=True)
else:
gp = celerite.GP(kernel, mean=0.)
return gp