Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_clopper_pearson_interval():
from coffea.hist.plot import clopper_pearson_interval
# Reference values for CL=0.6800 calculated with ROOT's TEfficiency
num = np.array([1., 5., 10., 10.])
denom = np.array([10., 10., 10., 437.])
ref_hi = np.array([0.293313782248242, 0.6944224231766912, 1.0, 0.032438865381336446])
ref_lo = np.array([0.01728422272382846, 0.3055775768233088, 0.8325532074018731, 0.015839046981153772])
interval = clopper_pearson_interval(num, denom, coverage=0.68)
threshold = 1e-6
assert(all((interval[1, :] / ref_hi) - 1 < threshold))
assert(all((interval[0, :] / ref_lo) - 1 < threshold))
def test_clopper_pearson_interval():
from coffea.hist.plot import clopper_pearson_interval
# Reference values for CL=0.6800 calculated with ROOT's TEfficiency
num = np.array([1., 5., 10., 10.])
denom = np.array([10., 10., 10., 437.])
ref_hi = np.array([0.293313782248242, 0.6944224231766912, 1.0, 0.032438865381336446])
ref_lo = np.array([0.01728422272382846, 0.3055775768233088, 0.8325532074018731, 0.015839046981153772])
interval = clopper_pearson_interval(num, denom, coverage=0.68)
threshold = 1e-6
assert(all((interval[1, :] / ref_hi) - 1 < threshold))
assert(all((interval[0, :] / ref_lo) - 1 < threshold))
def test_hist_serdes():
import pickle
h_regular_bins = hist.Hist("regular joe",
hist.Bin("x", "x", 20, 0, 200),
hist.Bin("y", "why", 20, -3, 3))
h_regular_bins.fill(x=np.array([1.,2.,3.,4.,5.]),y=np.array([-2.,1.,0.,1.,2.]))
h_regular_bins.sum('x').identifiers('y')
spkl = pickle.dumps(h_regular_bins)
hnew = pickle.loads(spkl)
hnew.sum('x').identifiers('y')
assert(h_regular_bins._dense_shape == hnew._dense_shape)
assert(h_regular_bins._axes == hnew._axes)
def test_normal_interval():
from coffea.hist.plot import normal_interval
# Reference weighted efficiency and error from ROOTs TEfficiency
denom = np.array([ 89.01457591590004, 2177.066076428943 , 6122.5256890981855 ,
0. , 100.27757990710668])
num = np.array([ 75.14287743709515, 2177.066076428943 , 5193.454723043864 ,
0. , 84.97723540536361])
denom_sumw2 = np.array([ 94.37919737476827, 10000. , 6463.46795877633 ,
0. , 105.90898005417333])
num_sumw2 = np.array([ 67.2202147680005 , 10000. , 4647.983931785646 ,
0. , 76.01275761253757])
ref_hi = np.array([0.0514643476600107, 0. , 0.0061403263960343,
np.nan, 0.0480731185500146])
ref_lo = np.array([0.0514643476600107, 0. , 0.0061403263960343,
np.nan, 0.0480731185500146])
interval = normal_interval(num, denom, num_sumw2, denom_sumw2)
threshold = 1e-6
lo, hi = interval
assert len(ref_hi) == len(hi)
def test_lumimask():
lumimask = LumiMask("tests/samples/Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON.txt")
runs = np.array([303825, 123], dtype=np.uint32)
lumis = np.array([115, 123], dtype=np.uint32)
mask = lumimask(runs, lumis)
print("mask:", mask)
assert(mask[0] == True)
assert(mask[1] == False)
# test underlying py_func
py_mask = np.zeros(dtype='bool', shape=runs.shape)
LumiMask._apply_run_lumi_mask_kernel.py_func(lumimask._masks,
runs, lumis,
py_mask)
assert(np.all(mask == py_mask))
self._eval_clamp_mins = clamps_and_vars[0]
self._eval_clamp_maxs = clamps_and_vars[1]
self._parm_order = parms_and_orders[1]
self._parms = parms_and_orders[0]
self._formula_str = formula
self._formula = None
if formula != 'None':
raise Exception('jet energy resolution scale factors have no formula!')
for binname in self._dim_order[1:]:
binsaslists = self._bins[binname].tolist()
self._bins[binname] = [np.array(bins) for bins in binsaslists]
# get the jit to compile if we've got more than one bin dim
if len(self._dim_order) > 1:
masked_bin_eval(np.array([0]), self._bins[self._dim_order[1]], np.array([0.0]))
self._signature = deepcopy(self._dim_order)
for eval in self._eval_vars:
if eval not in self._signature:
self._signature.append(eval)
self._dim_args = {self._dim_order[i]: i for i in range(len(self._dim_order))}
self._eval_args = {}
for i, argname in enumerate(self._eval_vars):
self._eval_args[argname] = i + len(self._dim_order)
if argname in self._dim_args.keys():
self._eval_args[argname] = self._dim_args[argname]
parmsFromColumns=True,
jme_f=uncFile)
temp = _build_standard_jme_lookup(name, layout, pars, nBinnedVars, nBinColumns,
nEvalVars, formula, nParms, columns, dtypes,
interpolatedFunc=True)
wrapped_up = {}
for key, val in temp.items():
newkey = (key[0], 'jec_uncertainty_lookup')
vallist = list(val)
vals, names = vallist[-1]
knots = vals[0:len(vals):3]
downs = vals[1:len(vals):3]
ups = vals[2:len(vals):3]
downs = np.array([down.flatten() for down in downs])
ups = np.array([up.flatten() for up in ups])
for knotv in knots:
knot = np.unique(knotv.flatten())
if knot.size != 1:
raise Exception('Multiple bin low edges found')
knots = np.array([np.unique(k.flatten())[0] for k in knots])
vallist[2] = ({'knots': knots, 'ups': ups.T, 'downs': downs.T}, vallist[2][-1])
vallist = vallist[:-1]
wrapped_up[newkey] = tuple(vallist)
return wrapped_up
def __setstate__(self, d):
if '_intervals' in d: # convert old hists to new serialization format
_old_intervals = d.pop('_intervals')
interval_bins = [i._lo for i in _old_intervals] + [_old_intervals[-1]._hi]
d['_interval_bins'] = np.array(interval_bins)
d['_bin_names'] = np.array([interval._label for interval in _old_intervals])
if '_interval_bins' in d and '_bin_names' not in d:
d['_bin_names'] = np.full(d['_interval_bins'][:-1].size, None)
self.__dict__ = d
def __init__(self, jsonfile):
with open(jsonfile) as fin:
goldenjson = json.load(fin)
self._masks = Dict.empty(
key_type=types.uint32,
value_type=types.uint32[:]
)
for run, lumilist in goldenjson.items():
mask = np.array(lumilist, dtype=np.uint32).flatten()
mask[::2] -= 1
self._masks[np.uint32(run)] = mask
if setn in A:
A[setn][membern][t][b] = [x * 0.01 for x in values]
else:
raise ValueError(line)
# now build the lookup tables
# for data scale, simple, just M A in bins of eta,phi
_scaleedges = (np.array(etaedges), np.array(phiedges))
_Mvalues = {s: {m: {t: np.array([M[s][m][t][b] for b in range(neta)]) for t in M[s][m]} for m in M[s]} for s in M}
_Avalues = {s: {m: {t: np.array([A[s][m][t][b] for b in range(neta)]) for t in A[s][m]} for m in A[s]} for s in A}
# for mc scale, more complicated
# version 1 if gen pt available
# only requires the kRes lookup
_resedges = np.array(absetaedges)
_kResvalues = {s: {m: {t: np.array(kRes[s][m][t]) for t in kRes[s][m]} for m in kRes[s]} for s in kRes}
# version 2 if gen pt not available
trkedges = [0] + [nmin + x + 0.5 for x in range(ntrk)]
_cbedges = (np.array(absetaedges), np.array(trkedges))
_rsParsvalues = {s: {m: {t: np.array([rsPars[s][m][t][b] for b in range(nabseta)]) for t in rsPars[s][m]} for m in rsPars[s]} for s in rsPars}
_cbSvalues = {s: {m: np.array([cbS[s][m][b] for b in range(nabseta)]) for m in cbS[s]} for s in cbS}
_cbAvalues = {s: {m: np.array([cbA[s][m][b] for b in range(nabseta)]) for m in cbA[s]} for s in cbA}
_cbNvalues = {s: {m: np.array([cbN[s][m][b] for b in range(nabseta)]) for m in cbN[s]} for s in cbN}
wrapped_up = {
'nsets': nsets,
'members': members,
'edges': {'scales': _scaleedges, 'res': _resedges, 'cb': _cbedges, },
'values': {'M': _Mvalues, 'A': _Avalues,
'kRes': _kResvalues,