Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'''
ndim = chain.shape[2]
fig,axes = matplotlib.pyplot.subplots(ndim+1, 1, sharex=True, figsize=(20,15))
for i in range(ndim):
axes[i].plot(chain[:,:,i].T, color='k', alpha=0.5)
axes[i].set_ylabel(self.model.labels[i])
# last panel shows the evolution of log-likelihood for the ensemble of walkers
axes[-1].plot(loglike.T, color='k', alpha=0.5)
axes[-1].set_ylabel('log(L)')
maxloglike = numpy.max(loglike)
axes[-1].set_ylim(maxloglike-3*ndim, maxloglike) # restrict the range of log-likelihood arount its maximum
fig.tight_layout(h_pad=0.)
matplotlib.pyplot.savefig(self.filename+"_chain.png")
try:
corner.corner(chain[-nsteps_mcmc:].reshape((-1, chain.shape[2])), \
quantiles=[0.16, 0.5, 0.84], labels=labels)
matplotlib.pyplot.savefig(self.filename+"_posterior.png")
except ValueError as err:
print "Can't plot posterior distribution:", err
labels = [r'$\ln \left(\frac{P}{\rm day}\right)$', '$e$', r'$\omega$ [deg]', r'$\phi_0$ [deg]',
r'$\ln \left(\frac{s}{\rm m\,s^{-1}}\right)$', r'$K$ [km s$^{-1}$]', '$v_0$ [km s$^{-1}$]']
corner_style = dict(truth_color=_truth_color, data_kwargs=dict(alpha=0.5, markersize=2.),
plot_contours=False, plot_density=False, bins=32, color='#555555')
# remove jitter from top plots
s_idx = 4
fig1 = corner.corner(np.delete(samples1, s_idx, axis=1), range=np.delete(ranges, s_idx, axis=0),
truths=np.delete(truth_vec, s_idx), labels=np.delete(labels, s_idx),
**corner_style)
fig1.subplots_adjust(left=0.08, bottom=0.08)
fig1.suptitle("Experiment 1a: fixed jitter", fontsize=26)
fig1.savefig(join(paths.figures, 'exp1-corner-a.pdf'), dpi=128)
fig2 = corner.corner(samples2, range=ranges, truths=truth_vec, labels=labels, **corner_style)
fig2.subplots_adjust(left=0.08, bottom=0.08)
fig2.suptitle("Experiment 1b: sample jitter", fontsize=26)
fig2.savefig(join(paths.figures, 'exp1-corner-b.pdf'), dpi=128)
plt.figure()
plt.plot(x, y, 'b')
plt.plot(x, residual(mi.params) + y, 'r', label='best fit')
plt.legend(loc='best')
plt.show()
# Place bounds on the ln(sigma) parameter that emcee will automatically add
# to estimate the true uncertainty in the data since is_weighted=False
mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))
res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300,
steps=1000, thin=20, params=mi.params, is_weighted=False,
progress=False)
if HASPYLAB and HASCORNER:
emcee_corner = corner.corner(res.flatchain, labels=res.var_names,
truths=list(res.params.valuesdict().values()))
plt.show()
if HASPYLAB:
plt.plot(res.acceptance_fraction)
plt.xlabel('walker')
plt.ylabel('acceptance fraction')
plt.show()
if hasattr(res, "acor"):
print("Autocorrelation time for the parameters:")
print("----------------------------------------")
for i, par in enumerate(p):
print(par, res.acor[i])
print("\nmedian of posterior probability distribution")
nsteps = chain.shape[1]
# Is there any hydrogen, anywhere?
if blobs[:,:,-3].max() > blobs[:,:,-3].min():
labels=[r"RG Dur. (Myr)", r"Hydrogen (M$_\oplus$)", r"Water (TO)", r"Oxygen (bar)"]
blobs = blobs[nburn:,:,np.array([-4,-3,-2,-1])].reshape(nwalk * (nsteps - nburn), 4)
else:
labels=[r"RG Dur. (Myr)", r"Water (TO)", r"Oxygen (bar)"]
blobs = blobs[nburn:,:,np.array([-4,-2,-1])].reshape(nwalk * (nsteps - nburn), 3)
# Fix units
blobs[:,0] /= 1.e6
# Plot
matplotlib.rcParams['lines.linewidth'] = 1
fig = corner.corner(blobs, labels = labels, bins = 50)
fig.savefig(os.path.join(PATH, 'output', '%s.corner.pdf' % name), bbox_inches = 'tight')
#:::plot all chains of parameters
for i in range(ndim):
ax = axes[i+1]
ax.set( ylabel=names[i], xlabel='steps')
for j in range(nwalkers):
ax.plot(steps, sampler.chain[j,:,i], '-')
ax.axvline( burn_steps, color='k', linestyle='--' )
plt.tight_layout()
fig.savefig( os.path.join(outdir,fname+'mcmc_chains.pdf'), bbox_inches='tight')
plt.close(fig)
#::: plot corner
fig = corner.corner(samples,
labels=names,
show_titles=True, title_kwargs={"fontsize": 12});
fig.savefig( os.path.join(outdir,fname+'mcmc_corner.pdf'), bbox_inches='tight')
plt.close(fig)
#::: Calculate the detrended data
logprint('\nRetrieve samples for detrending...')
sys.stdout.flush()
if method=='mean_curve':
mu_all_samples = []
std_all_samples = []
for s in tqdm(samples[np.random.randint(len(samples), size=Nsamples_detr)]):
gp = call_gp(s)
# mu, var = gp.predict(yy, x, return_var=True)
mu, var = gp_predict_in_chunks(yy, x)
# Indices of events involving this planet
inds = np.where((pairs[:,0] == k) | (pairs[:,1] == k))[0]
# Again, for the 30,000 simulations we ran...
print("%s: %.3f" % (system.bodies[k].name, len(pairs[inds]) / 30000.))
# Duration
dt = durs[inds] / MINUTE
# Depth
d = depths[inds] * 1e2
# Corner plot
samples = np.vstack((dt, d)).T
fig = corner.corner(samples, plot_datapoints = False,
range = [(0, 60), (0, 1)],
labels = ["Duration [min]",
"Depth [%]"],
bins = 30,
hist_kwargs = {'color': 'w'})
# Indices of events involving each of the planets
pinds = [[] for j in range(1, 8)]
for j in range(1, 8):
if j != k:
pinds[j - 1] = np.where((pairs[inds,0] == j) | (pairs[inds,1] == j))[0]
# Duration stacked histogram
n, _, _ = fig.axes[0].hist([dt[p] for p in pinds], bins = 30,
range = (0, 60),
stacked = True,
# weighted average and covariance:
p, cov = nestle.mean_and_cov(res.samples, res.weights)
print("m = {0:5.2f} +/- {1:5.2f}".format(p[0], np.sqrt(cov[0, 0])))
print("b = {0:5.2f} +/- {1:5.2f}".format(p[1], np.sqrt(cov[1, 1])))
plt.figure()
plt.errorbar(x, y, yerr=yerr, capsize=0, fmt='k.', ecolor='.7')
plt.plot([0., 10.], model(p, np.array([0., 10.])), c='k')
plt.show()
###############################################################################
# Plot samples to see the full posterior surface.
fig = corner.corner(res.samples, weights=res.weights, labels=['m', 'b'],
range=[0.99999, 0.99999], truths=theta_true, bins=30)
plt.show()
def plot_corner(xs, ps=None, ms=None, filename=None, **kwargs):
"""
Produce a corner plot
"""
import corner
mask = [i for i in range(xs.shape[-1]) if not all(xs[:,i]==xs[0,i]) ]
fig = corner.corner(xs[:,mask], color='k', hist_kwargs={'density':True}, **kwargs)
if ps is not None:
mask = [i for i in range(ps.shape[-1]) if not all(ps[:,i]==ps[0,i]) ]
corner.corner(ps[:,mask], fig = fig, color='g', hist_kwargs={'density':True}, **kwargs)
if ms is not None:
mask = [i for i in range(ms.shape[-1]) if not all(ms[:,i]==ms[0,i]) ]
corner.corner(ms[:,mask], fig = fig, color='r', hist_kwargs={'density':True}, **kwargs)
if filename is not None:
plt.savefig(filename,bbox_inches='tight')
plt.close()
},
}
# make the output corner plot, and lightcurve plot if desired
if plotcorner:
if isinstance(trueparams,dict):
trueparamkeys = np.sort(list(trueparams.keys()))
truelist = [trueparams[k] for k in trueparamkeys]
fig = corner.corner(
samples,
labels=trueparamkeys,
truths=truelist,
quantiles=[0.1585, 0.5, .8415], show_titles=True
)
else:
fig = corner.corner(samples,
labels=fitparamnames,
quantiles=[0.1585, 0.5, .8415],
show_titles=True)
plt.savefig(plotcorner, dpi=300)
if verbose:
LOGINFO('saved {:s}'.format(plotcorner))
if plotfit and isinstance(plotfit, str):
f, ax = plt.subplots(figsize=(8,6))
ax.scatter(stimes, smags, c='k', alpha=0.5, label='observed',
zorder=1, s=1.5, rasterized=True, linewidths=0)
ax.scatter(stimes, init_flux, c='r', alpha=1,
s=3.5, zorder=2, rasterized=True, linewidths=0,
label='initial guess')