Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
celerite_solver.compute(*params)
celerite_solver.dot_solve(y)
""", "from __main__ import params, celerite_solver, y")
print(p, js[i], len(params[0]), len(params[2]), times[i])
a = np.log(ps)
b = np.log(times[:, 0])
print(np.polyfit(a, b, 1))
m = np.isfinite(times[:, 1])
a = np.log(js[m])
b = np.log(times[m, 1])
print(np.polyfit(a, b, 1))
fig, ax = plt.subplots(1, 1, figsize=plot_setup.get_figsize(1, 1))
ax.plot(ps, 1e-4 * ps**2, "k")
ax.plot(ps, times[:, 0], ".--")
ax.plot(js[m], times[m, 1], ".-")
ax.set_xlim(ps.min(), max(ps.max(), js.max()))
ax.set_ylim(4e-4, 4)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("number of terms")
ax.set_ylabel("computational cost [seconds]")
fig.savefig("carma_comp.pdf", bbox_inches="tight")
"from __main__ import gp, y")
if n <= 4096:
times[i, 2] = benchmark("""
C = gp.get_matrix(t[:{0}])
C[np.diag_indices_from(C)] += yerr[:{0}]**2
cho_factor(C)
""".format(n), """
from __main__ import gp, t, yerr
import numpy as np
from scipy.linalg import cho_factor
""")
print(n, times[i])
fig, ax = plt.subplots(1, 1, figsize=plot_setup.get_figsize(1, 1))
ax.plot(N, N / N[-1] * 2.0, "k", label="$\mathcal{O}(N)$")
ax.plot(N, times[:, 0], ".-", label="compute")
ax.plot(N, times[:, 1], ".--", label="log likelihood")
m = np.isfinite(times[:, 2])
ax.plot(N[m], times[:, 2][m], ".:", label="Cholesky")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim(N.min(), N.max())
ax.set_ylim(2e-5, 3.0)
ax.set_xlabel("number of data points")
ax.set_ylabel("computational cost [seconds]")
fig.savefig("benchmark.pdf", bbox_inches="tight")
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from timer import benchmark
from celerite.solver import Solver, CARMASolver
from celerite import plot_setup
plot_setup.setup()
# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(2**11))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)
ps = 2 ** np.arange(8)
js = np.empty_like(ps)
times = np.empty((len(ps), 2))
times[:] = np.nan
for i, p in enumerate(ps):
arparams = np.random.randn(p)
maparams = np.random.randn(p - 1)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from timer import benchmark
from celerite import GP, terms
from celerite import plot_setup
plot_setup.setup()
# Set up the dimensions of the problem
N = 2**np.arange(6, 20)
times = np.empty((len(N), 3))
times[:] = np.nan
# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)
# Set up the GP model
kernel = terms.RealTerm(1.0, 0.1) + terms.ComplexTerm(0.1, 2.0, 1.6)
gp = GP(kernel)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import emcee
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import celerite
from celerite import terms
from celerite import plot_setup
np.random.seed(42)
plot_setup.setup(auto=True)
# Simulate some data
kernel = terms.SHOTerm(log_S0=0.0, log_omega0=2.0, log_Q=2.0,
bounds=[(-10, 10), (-10, 10), (-10, 10)])
gp = celerite.GP(kernel)
true_params = np.array(gp.get_parameter_vector())
omega = 2*np.pi*np.exp(np.linspace(-np.log(10.0), -np.log(0.1), 5000))
true_psd = gp.kernel.get_psd(omega)
N = 200
t = np.sort(np.random.uniform(0, 10, N))
yerr = 2.5
y = gp.sample(t, diag=yerr**2)
# Find the maximum likelihood model
gp.compute(t, yerr)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import h5py
import numpy as np
import celerite
from celerite import terms, plot_setup
np.random.seed(42)
plot_setup.setup(auto=True)
K = 10
J = np.arange(2, 64, 8)
N = 2**np.arange(6, 16)
alpha_error = np.empty((K, len(J), len(N)))
logdet_error = np.empty((K, len(J), len(N)))
logdet_error[:, :, :] = np.nan
for k in range(K):
t = np.sort(np.random.uniform(0, N.max() * 0.8, N.max()))
yerr = np.random.uniform(1.0, 1.5, len(t))
for ix, j in enumerate(J):
kernel = terms.RealTerm(np.random.uniform(-1, 1),
np.random.uniform(-5, -1))
from __future__ import division, print_function
import emcee
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.linalg import cho_factor, cho_solve
import celerite
from celerite import terms
from celerite.modeling import Model
from celerite import plot_setup
plot_setup.setup(auto=True)
class TrueModel(Model):
parameter_names = ("log_amp", "log_ell", "log_period")
def get_K(self, x):
tau = x[:, None] - x[None, :]
return (
np.exp(self.log_amp - 0.5 * tau**2 * np.exp(-2.0*self.log_ell)) *
np.cos(2*np.pi*tau*np.exp(-self.log_period))
)
def __call__(self, params, x, y, yerr):
self.set_parameter_vector(params)
lp = self.log_prior()
if not np.isfinite(lp):
return -np.inf
ax1.set_ylim(-3.25, 3.25)
ax1.set_xlim(0, 20)
ax1.set_xlabel("time [day]")
ax1.set_ylabel("relative flux [ppm]")
ax1.annotate("simulated data", xy=(0, 0), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom")
n, b, p = ax2.hist(np.exp(-sampler.flatchain[:, -2])*(2*np.pi), 20,
color="k", histtype="step", lw=2, normed=True)
ax2.hist(np.exp(true_sampler.flatchain[:, -1]), b,
color=plot_setup.COLORS["MODEL_1"],
lw=2, histtype="step", normed=True, ls="dashed")
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.set_xlim(b.min(), b.max())
ax2.axvline(1.0, color=plot_setup.COLORS["MODEL_2"], lw=2)
ax2.set_xlabel("period [day]")
fig.savefig("wrong-qpo.pdf", bbox_inches="tight")
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)
coords, _, _ = sampler.run_mcmc(coords, 500)
sampler.reset()
coords, _, _ = sampler.run_mcmc(coords, 2000)
# Compute the posterior PSD inference
samples = sampler.flatchain[::15, :]
post_psd = np.empty((len(samples), len(omega)))
for i, s in enumerate(samples):
gp.set_parameter_vector(s)
post_psd[i] = gp.kernel.get_psd(omega)
q = np.percentile(post_psd, [16, 50, 84], axis=0)
# Plot the results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plot_setup.get_figsize(1, 2))
ax1.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
ax1.set_ylim(-26, 26)
ax1.set_xlim(0, 10)
ax1.set_xlabel("time [day]")
ax1.set_ylabel("relative flux [ppm]")
ax1.annotate("simulated data", xy=(0, 0), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom")
factor = 1.0 / (2*np.pi)
f = omega * factor
ax2.plot(f, q[1] * factor)
ax2.fill_between(f, q[0] * factor, q[2] * factor, alpha=0.3)
ax2.plot(f, true_psd * factor, "--k")
ax2.set_xlim(f[0], f[-1])