Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ax = axes[1]
q = np.percentile(psds, [16, 50, 84], axis=0)
ax.fill_between(f, q[0], q[2], alpha=0.5, color=color, edgecolor="none")
ax.plot(f, q[1], color=color, lw=1.5)
ax = axes[2]
q = np.percentile(acfs, [16, 50, 84], axis=0)
ax.fill_between(tau, q[0], q[2], alpha=0.5, color=color, edgecolor="none")
ax.plot(tau, q[1], color=color, lw=1.5)
fig.savefig("rotation.pdf", bbox_inches="tight", dpi=300)
plt.close(fig)
# Plot the period constraint
ind = gp.get_parameter_names().index("kernel:log_period")
period_samps = np.exp(samples[:, ind])
fig, ax = plt.subplots(1, 1, figsize=get_figsize())
ax.hist(period_samps, 40, histtype="step", color=color)
ax.yaxis.set_major_locator(plt.NullLocator())
mu, std = np.mean(period_samps), np.std(period_samps)
ax.set_xlim(mu - 3.5*std, mu + 3.5*std)
ax.set_xlabel("rotation period [days]")
ax.axvline(3.80 + 0.15, color="k", ls="dashed")
ax.axvline(3.80 - 0.15, color="k", ls="dashed")
fig.savefig("rotation-period.pdf", bbox_inches="tight", dpi=300)
q = np.percentile(period_samps, [16, 50, 84])
print(q, np.diff(q), np.mean(period_samps), np.std(period_samps))
with open("rotation.tex", "w") as f:
f.write("% Automatically generated\n")
f.write(("\\newcommand{{\\rotationperiod}}{{\\ensuremath{{{{"
"{0:.2f} \pm {1:.2f}}}}}}}\n")
assert u >= 0
assert mat_full.shape[1] == mat_full.shape[0]
size = mat_full.shape[0]
mat_rect = np.zeros((l + u + 1, size), dtype=int)
for i in range(-u, l + 1):
row = u + i
for j in range(size):
if j + i < 0:
continue
mat_rect[row, j] = mat_full[j + i, j] if j + i < size else 0.0
return mat_rect
band = band_e(2*J+2, 2*J+2, full)
print(band.shape)
fig, ax = plt.subplots(1, 1, figsize=get_figsize(2.3, 2.3))
ax.pcolor(band, cmap=cmap, vmin=0, vmax=len(c))
ax.set_ylim(band.shape[0], 0)
ax.set_xlim(0, band.shape[1])
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect("equal")
fig.savefig("matrix-band.pdf", bbox_inches="tight")
time_grid = np.linspace(0, 1.4, 5000)
n = 1000
psds = np.empty((n, len(freq)))
acors = np.empty((n, len(time_grid)))
for i, j in enumerate(np.random.randint(len(samples), size=n)):
s = samples[j]
gp.set_parameter_vector(s)
psds[i] = gp.kernel.get_psd(2*np.pi*freq)
acors[i] = gp.kernel.get_value(time_grid)
# Get the median modes
gp.set_parameter_vector(samples[np.argmax(log_probs)])
peak_freqs = gp.kernel.get_freqs()
factor = uHz_conv/(2*np.pi)
fig, ax = plt.subplots(1, 1, figsize=get_figsize(1, 2))
for t in gp.kernel.get_terms():
p = t.get_psd(2*np.pi*freq)*factor
ax.plot(freq_uHz, p, "k", lw=0.5, alpha=0.8)
p = gp.kernel.get_psd(2*np.pi*freq)*factor
ax.plot(freq_uHz, p, "k")
ax.plot(freq_uHz, gp.kernel.get_envelope(2*np.pi*freq)*factor, "--k", lw=0.75)
ax.axvline(np.exp(gp.kernel.log_nu_max) / uHz_conv, color="k", ls="dashed",
lw=0.75)
ax.set_ylabel("power [$\mathrm{ppm}^2\,\mu\mathrm{Hz}^{-1}$]")
ax.set_xlabel("frequency [$\mu$Hz]")
ax.set_xlim(freq_uHz.min(), freq_uHz.max())
ax.set_ylim(2e-2, 2e2)
ax.set_yscale("log")
fig.savefig("astero-sketch.pdf", bbox_inches="tight")
# Plot the predictions
bounds = gp.get_parameter_bounds()
r = minimize(nll, p0, method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(r.x)
return r.fun, r.x
with emcee3.pools.InterruptiblePool() as pool:
results = list(sorted(pool.map(
get_ml_params, gp.kernel.log_nu_max + np.linspace(-0.05, 0.05, 5)
), key=lambda o: o[0]))
gp.set_parameter_vector(results[0][1])
print(gp.get_parameter_dict(include_frozen=True))
# Use more modes in the MCMC:
gp.kernel.nterms = 3
fig, ax = plt.subplots(1, 1, figsize=get_figsize())
ax.plot(freq_uHz, power_all, "k", alpha=0.8, rasterized=True)
ax.plot(freq_uHz, gp.kernel.get_psd(2*np.pi*freq) * uHz_conv / (2*np.pi),
alpha=0.5, rasterized=True)
ax.set_xlabel("frequency [$\mu$Hz]")
ax.set_ylabel("power [$\mathrm{ppm}^2\,\mu\mathrm{Hz}^{-1}$]")
ax.set_yscale("log")
fig.savefig(format_filename("initial_psd"), bbox_inches="tight", dpi=300)
# Set up the probabilistic model for sampling
def log_prob(p):
gp.set_parameter_vector(p)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
ll = gp.log_likelihood(fit_y)
if not np.isfinite(ll):
if not np.isfinite(lp):
return -np.inf
ll = gp.log_likelihood(y)
return ll + lp if np.isfinite(ll) else -np.inf
ndim = len(soln.x)
nwalkers = 32
coords = soln.x + 1e-4 * np.random.randn(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)
coords, _, _ = sampler.run_mcmc(coords, 500)
sampler.reset()
coords, _, _ = sampler.run_mcmc(coords, 2000)
# Plot the results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plot_setup.get_figsize(1, 2))
ax1.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
ax1.set_ylim(-3.25, 3.25)
ax1.set_xlim(0, 20)
ax1.set_xlabel("time [day]")
ax1.set_ylabel("relative flux [ppm]")
ax1.annotate("simulated data", xy=(0, 0), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom")
n, b, p = ax2.hist(np.exp(-sampler.flatchain[:, -2])*(2*np.pi), 20,
color="k", histtype="step", lw=2, normed=True)
ax2.hist(np.exp(true_sampler.flatchain[:, -1]), b,
color=plot_setup.COLORS["MODEL_1"],
lw=2, histtype="step", normed=True, ls="dashed")
ax2.yaxis.set_major_locator(plt.NullLocator())
ml_params = np.array(r.x)
print("Maximum log-likelihood: {0}".format(gp.log_likelihood(y)))
# Compute the maximum likelihood predictions
x = np.linspace(t.min(), t.max(), 5000)
trend = gp.predict(y, t, return_cov=False)
trend -= gp.mean.get_value(t) - gp.mean.mean_flux
mu, var = gp.predict(y, x, return_var=True)
std = np.sqrt(var)
mean_mu = gp.mean.get_value(x)
mu -= mean_mu
wn = np.exp(gp.log_white_noise.value)
ml_yerr = np.sqrt(yerr**2 + wn)
# Plot the maximum likelihood predictions
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=get_figsize(1, 2))
ax1.errorbar(t - t.min(), y, yerr=ml_yerr, fmt=".k", capsize=0, zorder=-1)
ax1.plot(x - t.min(), mu, zorder=100)
ax1.set_ylim(-0.72, 0.72)
ax1.yaxis.set_major_locator(plt.MaxNLocator(5))
ax1.set_ylabel("raw [ppt]")
ax1.yaxis.set_label_coords(-0.1, 0.5)
ax2.errorbar(t - t.min(), y-trend, yerr=ml_yerr, fmt=".k", capsize=0,
zorder=-1)
ax2.plot(x - t.min(), mean_mu - gp.mean.mean_flux, zorder=100)
ax2.set_xlim(0, t.max()-t.min())
ax2.set_ylim(-0.41, 0.1)
ax2.yaxis.set_major_locator(plt.MaxNLocator(5))
ax2.set_ylabel("de-trended [ppt]")
ax2.set_xlabel("time [days]")
ax2.yaxis.set_label_coords(-0.1, 0.5)
if np.allclose(Q, 0.5):
return np.exp(-t) * (1.0 + t)
b = 1.0 / np.sqrt(4*Q**2 - 1)
c = 0.5 / Q
d = 0.5 * np.sqrt(4*Q**2 - 1) / Q
return np.exp(-c * t) * (np.cos(d*t)+b*np.sin(d*t))
def lorentz_psd(Q, x):
return Q**2 * (1.0 / ((x - 1)**2 * (2*Q)**2 + 1) +
1.0 / ((x + 1)**2 * (2*Q)**2 + 1))
def lorentz_acf(Q, tau):
t = np.abs(tau)
return np.exp(-0.5*t/Q) * np.cos(t)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=get_figsize(1, 3))
x = 10**np.linspace(-1.1, 1.1, 5000)
tau = np.linspace(0, 20, 1000)
for i, (Q_name, Q) in enumerate(
[("1/2", 0.5), ("1/\\sqrt{2}", 1./np.sqrt(2)), ("2", 2.0),
("10", 10.0)]):
l, = ax1.plot(x, sho_psd(Q, x), label="$Q = {0}$".format(Q_name), lw=1.5)
c = l.get_color()
ax2.plot(tau, sho_acf(Q, tau), label="$Q = {0}$".format(Q_name), lw=1.5,
color=c)
K = sho_acf(Q, tau[:, None] - tau[None, :])
y = np.random.multivariate_normal(np.zeros(len(tau)), K, size=3)
ax3.axhline(-5*i, color="k", lw=0.75)
ax3.plot(tau, -5*i + (y - np.mean(y, axis=1)[:, None]).T, color=c,
lw=1)