Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
else:
smooth_1D = smooth_scale_1D * width / fine_width
if smooth_1D < 2:
logging.warning('fine_bins not large enough to well sample smoothing scale - ' + par.name)
smooth_1D = min(max(1., smooth_1D), fine_bins // 2)
logging.debug("%s 1D sigma_range, std: %s, %s; smooth_1D_bins: %s ", par.name, par.sigma_range, par.err,
smooth_1D)
winw = min(int(round(2.5 * smooth_1D)), fine_bins // 2 - 2)
kernel = Kernel1D(winw, smooth_1D)
cache = {}
conv = convolve1D(bins, kernel.Win, 'same', cache=cache)
fine_x = np.linspace(binmin, binmax, fine_bins)
density1D = Density1D(fine_x, P=conv, view_ranges=[par.range_min, par.range_max])
if meanlikes: rawbins = conv.copy()
if par.has_limits and boundary_correction_order >= 0:
# correct for cuts allowing for normalization over window
prior_mask = np.ones(fine_bins + 2 * winw)
if par.has_limits_bot:
prior_mask[winw] = 0.5
prior_mask[: winw] = 0
if par.has_limits_top:
prior_mask[-(winw + 1)] = 0.5
prior_mask[-winw:] = 0
a0 = convolve1D(prior_mask, kernel.Win, 'valid', cache=cache)
ix = np.nonzero(a0 * density1D.P)
# eg. see http://www.jstor.org/stable/2965571
xWin2 = kernel.Win * kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, kernel.x ** 2)
corrected = (density1D.P * a4 - a2 * x2P) / (a4 - a2 ** 2)
ix = density1D.P > 0
density1D.P[ix] *= np.exp(np.minimum(corrected[ix] / density1D.P[ix], 2) - 1)
if mult_bias_correction_order:
prior_mask = np.ones(fine_bins)
if par.has_limits_bot:
prior_mask[0] *= 0.5
if par.has_limits_top:
prior_mask[-1] *= 0.5
a0 = convolve1D(prior_mask, kernel.Win, 'same', cache=cache, cache_args=[2])
for _ in range(mult_bias_correction_order):
# estimate using flattened samples to remove second order biases
# mostly good performance, see http://www.jstor.org/stable/2965571 method 3,1 for first order
prob1 = density1D.P.copy()
prob1[prob1 == 0] = 1
fine = bins / prob1
conv = convolve1D(fine, kernel.Win, 'same', cache=cache, cache_args=[2])
density1D.setP(density1D.P * conv)
density1D.P /= a0
density1D.normalize('max', in_place=True)
if not kwargs: self.density1D[par.name] = density1D
if meanlikes:
ix = density1D.P > 0
finebinlikes[ix] /= density1D.P[ix]
prior_mask[-(winw + 1)] = 0.5
prior_mask[-winw:] = 0
a0 = convolve1D(prior_mask, kernel.Win, 'valid', cache=cache)
ix = np.nonzero(a0 * density1D.P)
a0 = a0[ix]
normed = density1D.P[ix] / a0
if boundary_correction_order == 0:
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = kernel.Win * kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * kernel.x, 'valid', cache=cache, cache_args=[1])[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
if boundary_correction_order == 0:
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = kernel.Win * kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * kernel.x, 'valid', cache=cache, cache_args=[1])[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
# higher order kernel
# eg. see http://www.jstor.org/stable/2965571
xWin2 = kernel.Win * kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, kernel.x ** 2)
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = kernel.Win * kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * kernel.x, 'valid', cache=cache, cache_args=[1])[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
# higher order kernel
# eg. see http://www.jstor.org/stable/2965571
xWin2 = kernel.Win * kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, kernel.x ** 2)
corrected = (density1D.P * a4 - a2 * x2P) / (a4 - a2 ** 2)
prior_mask[: winw] = 0
if par.has_limits_top:
prior_mask[-(winw + 1)] = 0.5
prior_mask[-winw:] = 0
a0 = convolve1D(prior_mask, kernel.Win, 'valid', cache=cache)
ix = np.nonzero(a0 * density1D.P)
a0 = a0[ix]
normed = density1D.P[ix] / a0
if boundary_correction_order == 0:
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = kernel.Win * kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * kernel.x, 'valid', cache=cache, cache_args=[1])[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
density1D.P[ix] *= np.exp(np.minimum(corrected[ix] / density1D.P[ix], 2) - 1)
if mult_bias_correction_order:
prior_mask = np.ones(fine_bins)
if par.has_limits_bot:
prior_mask[0] *= 0.5
if par.has_limits_top:
prior_mask[-1] *= 0.5
a0 = convolve1D(prior_mask, kernel.Win, 'same', cache=cache, cache_args=[2])
for _ in range(mult_bias_correction_order):
# estimate using flattened samples to remove second order biases
# mostly good performance, see http://www.jstor.org/stable/2965571 method 3,1 for first order
prob1 = density1D.P.copy()
prob1[prob1 == 0] = 1
fine = bins / prob1
conv = convolve1D(fine, kernel.Win, 'same', cache=cache, cache_args=[2])
density1D.setP(density1D.P * conv)
density1D.P /= a0
density1D.normalize('max', in_place=True)
if not kwargs: self.density1D[par.name] = density1D
if meanlikes:
ix = density1D.P > 0
finebinlikes[ix] /= density1D.P[ix]
binlikes = convolve1D(finebinlikes, kernel.Win, 'same', cache=cache, cache_args=[2])
binlikes[ix] *= density1D.P[ix] / rawbins[ix]
if self.shade_likes_is_mean_loglikes:
maxbin = np.min(binlikes)
binlikes = np.where((binlikes - maxbin) < 30, np.exp(-(binlikes - maxbin)), 0)
binlikes[rawbins == 0] = 0
binlikes /= np.max(binlikes)
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
# higher order kernel
# eg. see http://www.jstor.org/stable/2965571
xWin2 = kernel.Win * kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, kernel.x ** 2)
corrected = (density1D.P * a4 - a2 * x2P) / (a4 - a2 ** 2)
ix = density1D.P > 0
density1D.P[ix] *= np.exp(np.minimum(corrected[ix] / density1D.P[ix], 2) - 1)
if mult_bias_correction_order:
prior_mask = np.ones(fine_bins)
if par.has_limits_bot:
prior_mask[0] *= 0.5
if par.has_limits_top:
prior_mask[-1] *= 0.5
a0 = convolve1D(prior_mask, kernel.Win, 'same', cache=cache, cache_args=[2])
for _ in range(mult_bias_correction_order):
# estimate using flattened samples to remove second order biases
# mostly good performance, see http://www.jstor.org/stable/2965571 method 3,1 for first order