Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def output_values(param1, iteration, residual):
report_fit(param1)
lorentzian(x, 10, 9.6, 1.3) +
random.normal(scale=0.1, size=n))
pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='amp_l', value=10)
pfit.add(name='cen_g', value=5)
pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True)
pfit.add(name='cen_l', expr='peak_split+cen_g')
pfit.add(name='wid_g', value=1)
pfit.add(name='wid_l', expr='wid_g')
mini = Minimizer(residual, pfit, fcn_args=(x, data))
out = mini.leastsq()
report_fit(out.params)
best_fit = data + out.residual
if HASPYLAB:
plt.plot(x, data, 'bo')
plt.plot(x, best_fit, 'r--')
plt.show()
fit_params.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
fit_params.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
fit_params.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
###############################################################################
# Constrain the values of sigma to be the same for all peaks by assigning
# sig_2, ..., sig_5 to be equal to sig_1.
for iy in (2, 3, 4, 5):
fit_params['sig_%i' % iy].expr = 'sig_1'
###############################################################################
# Run the global fit and show the fitting result
out = minimize(objective, fit_params, args=(x, data))
report_fit(out.params)
###############################################################################
# Plot the data sets and fits
plt.figure()
for i in range(5):
y_fit = gauss_dataset(out.params, i, x)
plt.plot(x, data[i, :], 'o', x, y_fit, '-')
plt.show()
sample_width=6, beam_width=0.25,
background=81, I0=6.35e9)
# embed model in fit code
fitm = xu.simpack.FitModel(m, plot=True, verbose=True)
# set some parameter limitations
fitm.set_param_hint('SiO2_density', vary=False)
fitm.set_param_hint('Al2O3_density', min=0.8*xu.materials.Al2O3.density,
max=1.2*xu.materials.Al2O3.density)
p = fitm.make_params()
fitm.set_fit_limits(xmin=0.05, xmax=8.0)
# perform the fit
res = fitm.fit(edata, p, ai, weights=1/eps)
lmfit.report_fit(res, min_correl=0.5)
m.densityprofile(500, plot=True)
show()
def residual(p):
return p['a1']*np.exp(-x/p['t1']) + p['a2']*np.exp(-(x-0.1)/p['t2']) - y
# create Minimizer
mini = lmfit.Minimizer(residual, p, nan_policy='propagate')
# first solve with Nelder-Mead algorithm
out1 = mini.minimize(method='Nelder')
# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
out2 = mini.minimize(method='leastsq', params=out1.params)
lmfit.report_fit(out2.params, min_correl=0.5)
ci, trace = lmfit.conf_interval(mini, out2, sigmas=[1, 2], trace=True)
lmfit.printfuncs.report_ci(ci)
# plot data and best fit
plt.figure()
plt.plot(x, y, 'b')
plt.plot(x, residual(out2.params) + y, 'r-')
# plot confidence intervals (a1 vs t2 and a2 vs t2)
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30)
ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11))
fig.colorbar(ctp, ax=axes[0])
axes[0].set_xlabel('a1')
axes[0].set_ylabel('t2')
hdr_written = False
for group in np.unique(df_sorted["fitgroup"]):
dfr = df_sorted[df_sorted["fitgroup"]==group]
dfr.index = range(len(dfr)) # need to reindex slice
(params, dfr) = self.setup_model_params(dfr)
# Test sensitivity, are we falling into local mins?
lowest_rmse = self.high_number
for i, iter in enumerate(xrange(self.num_iter)):
print group, i
# pick new initial parameter guesses, but dont rebuild
# params object
if i > 0:
params = self.change_param_values(dfr, params)
result = minimize(self.residual, params, args=(dfr,))
report_fit(params, show_correl=False)
#ci = conf_interval(result, trace=False)
#report_ci(ci)
(An, Anc, Anj) = self.forward_run(result, dfr)
# Successful fit?
# See comment above about why errorbars might not be
# resolved.
#if result.errorbars and self.check_params(result):
if result.errorbars:
rmse = np.sqrt(np.mean((dfr["Photo"] - An)**2))
if rmse < lowest_rmse:
lowest_rmse = rmse
best_result = result
return 1e3/(1+score)
params = lmfit.Parameters()
params.add("center_x", value=result.center_x, vary=vary_center, min=result.center_x - 2.0, max=result.center_x + 2.0)
params.add("center_y", value=result.center_y, vary=vary_center, min=result.center_y - 2.0, max=result.center_y + 2.0)
params.add("alpha", value=result.alpha, vary=True)
params.add("beta", value=result.beta, vary=True)
params.add("gamma", value=result.gamma, vary=True)
params.add("scale", value=result.scale, vary=vary_scale, min=result.scale*0.8, max=result.scale*1.2)
args = img,
res = lmfit.minimize(objfunc, params, args=args, method=method, tol=self.fit_tol)
if verbose:
lmfit.report_fit(res)
p = res.params
alpha, beta, gamma = [round(p[key].value, 4) for key in ("alpha", "beta", "gamma")]
scale, center_x, center_y = [round(p[key].value, 2) for key in ("scale", "center_x", "center_y")]
proj = projector.get_projection(alpha, beta, gamma)
pks = proj[:,3:5]
shape_factor = proj[:,5]
score = round(self.get_score(img, pks, shape_factor, scale, center_x, center_y), 2)
# print "Score: {} -> {}".format(int(score), int(score))
refined = IndexingResult(score=score,
number=result.number,
y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
return y_model - ydata
params = lmfit.Parameters()
params.add('offset', 2.0)
params.add('omega', 3.3)
params.add('amp', 2.5)
params.add('decay', 1.0, min=0)
method='L-BFGS-B'
o1 = lmfit.minimize(resid, params, args=(x, yn), method=method)
print("# Fit using sum of squares:")
lmfit.report_fit(o1)
o2 = lmfit.minimize(resid, params, args=(x, yn), method=method, reducefcn='neglogcauchy')
print("# Robust Fit, using log-likelihood with Cauchy PDF:")
lmfit.report_fit(o2)
if HAS_PYLAB:
plt.plot(x, y, 'ko', lw=2)
plt.plot(x, yn, 'k--*', lw=1)
plt.plot(x, yn+o1.residual, 'r-', lw=2)
plt.plot(x, yn+o2.residual, 'b-', lw=2)
plt.legend(['True function',
'with noise+outliers',
'sum of squares fit',
'robust fit'], loc='upper left')
plt.show()
if i in skip:
continue
if j in skip:
continue
diffij = diff_vects[i, j]
x = V[j] - V[i] - diffij
weight = weights[i, j]
out.append(x * weight)
return np.array(out)
args = (difference_vectors, weights)
res = lmfit.minimize(obj_func, params, args=args, method=method)
lmfit.report_fit(res, show_correl=verbose, min_correl=0.8)
params = res.params
Vn = np.array([v.value for k, v in params.items() if k.startswith('C')]).reshape(-1, 2)
offset = min(Vn[:, 0]), min(Vn[:, 1])
coords = Vn - offset
self.optimized_coords = coords
if plot:
# center on average coordinate to better show displacement
c1 = self.coords - np.mean(self.coords, axis=0)
c2 = self.optimized_coords - np.mean(self.optimized_coords, axis=0)
plt.title(f'Shifts from minimization (`{method}`)\nCentered on average position')
plt.scatter(*c1.T, label='Original', marker='+')
plt.scatter(*c2.T, label='Optimized', marker='+')
p[pn].set(vary=True, min=limit[0], max=limit[1])
# if needed one can also set relations between parameters:
# p['axial_angD_deg'].set(expr='axial_angI_deg')
fitres2 = pm.fit(p, tt[mask], det[mask], std=sig[mask])
##############################
# final calculation and plotting/printing of the results
sim = pm.simulate(tt[mask])
xu.simpack.plot_powder(tt, det, pm, scale='sqrt', mask=mask)
xu.simpack.Rietveld_error_metrics(det[mask], sim, std=sig[mask],
Nvar=fitres2.nvarys, disp=True)
lmfit.report_fit(fitres2)