Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, columns=[]):
self._columns = columns
dataset_axis = hist.Cat("dataset", "Primary dataset")
mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300)
pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300)
self._accumulator = processor.dict_accumulator({
'mass': hist.Hist("Counts", dataset_axis, mass_axis),
'pt': hist.Hist("Counts", dataset_axis, pt_axis),
'cutflow': processor.defaultdict_accumulator(int),
})
def __init__(self, columns=[]):
self._columns = columns
dataset_axis = hist.Cat("dataset", "Primary dataset")
mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300)
pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300)
self._accumulator = processor.dict_accumulator({
'mass': hist.Hist("Counts", dataset_axis, mass_axis),
'pt': hist.Hist("Counts", dataset_axis, pt_axis),
'cutflow': processor.defaultdict_accumulator(int),
})
electrons = awkward.JaggedArray.zip(p4=p4, **arrays)
arrays = {k.replace('Muon_', ''): v for k, v in tree.arrays("Muon_*", namedecode='ascii').items()}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(
arrays.pop('Px'),
arrays.pop('Py'),
arrays.pop('Pz'),
arrays.pop('E'),
)
muons = awkward.JaggedArray.zip(p4=p4, **arrays)
# Two types of axes exist presently: bins and categories
lepton_kinematics = hist.Hist("Events",
hist.Cat("flavor", "Lepton flavor"),
hist.Bin("pt", "$p_{T}$", 19, 10, 100),
hist.Bin("eta", r"$\eta$", [-2.5, -1.4, 0, 1.4, 2.5]),
)
# Pass keyword arguments to fill, all arrays must be flat numpy arrays
# User is responsible for ensuring all arrays have same jagged structure!
lepton_kinematics.fill(flavor="electron", pt=electrons['p4'].pt.flatten(), eta=electrons['p4'].eta.flatten())
lepton_kinematics.fill(flavor="muon", pt=muons['p4'].pt.flatten(), eta=muons['p4'].eta.flatten())
return lepton_kinematics
)
electrons = awkward.JaggedArray.zip(p4=p4, **arrays)
arrays = {k.replace('Muon_', ''): v for k, v in tree.arrays("Muon_*", namedecode='ascii').items()}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(
arrays.pop('Px'),
arrays.pop('Py'),
arrays.pop('Pz'),
arrays.pop('E'),
)
muons = awkward.JaggedArray.zip(p4=p4, **arrays)
# Two types of axes exist presently: bins and categories
lepton_kinematics = hist.Hist("Events",
hist.Cat("flavor", "Lepton flavor"),
hist.Bin("pt", "$p_{T}$", 19, 10, 100),
hist.Bin("eta", r"$\eta$", [-2.5, -1.4, 0, 1.4, 2.5]),
)
# Pass keyword arguments to fill, all arrays must be flat numpy arrays
# User is responsible for ensuring all arrays have same jagged structure!
lepton_kinematics.fill(flavor="electron", pt=electrons['p4'].pt.flatten(), eta=electrons['p4'].eta.flatten())
lepton_kinematics.fill(flavor="muon", pt=muons['p4'].pt.flatten(), eta=muons['p4'].eta.flatten())
return lepton_kinematics
def __init__(self, columns=[], canaries=[]):
self._columns = columns
self._canaries = canaries
dataset_axis = hist.Cat("dataset", "Primary dataset")
mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300)
pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300)
self._accumulator = processor.dict_accumulator(
{
'mass': hist.Hist("Counts", dataset_axis, mass_axis),
'pt': hist.Hist("Counts", dataset_axis, pt_axis),
'cutflow': processor.defaultdict_accumulator(int),
'worker': processor.set_accumulator(),
}
def test_hist_serdes():
import pickle
h_regular_bins = hist.Hist("regular joe",
hist.Bin("x", "x", 20, 0, 200),
hist.Bin("y", "why", 20, -3, 3))
h_regular_bins.fill(x=np.array([1.,2.,3.,4.,5.]),y=np.array([-2.,1.,0.,1.,2.]))
h_regular_bins.sum('x').identifiers('y')
spkl = pickle.dumps(h_regular_bins)
hnew = pickle.loads(spkl)
hnew.sum('x').identifiers('y')
assert(h_regular_bins._dense_shape == hnew._dense_shape)
assert(h_regular_bins._axes == hnew._axes)
def __init__(self, columns=[]):
self._columns = columns
dataset_axis = hist.Cat("dataset", "Primary dataset")
mass_axis = hist.Bin("mass", r"$m_{\mu\mu}$ [GeV]", 30000, 0.25, 300)
pt_axis = hist.Bin("pt", r"$p_{T}$ [GeV]", 30000, 0.25, 300)
self._accumulator = processor.dict_accumulator({
'mass': hist.Hist("Counts", dataset_axis, mass_axis),
'pt': hist.Hist("Counts", dataset_axis, pt_axis),
'cutflow': processor.defaultdict_accumulator(int),
})
height = hist.Bin("height", "height [m]", 10, 0, 5)
h_mascots_1 = hist.Hist("fermi mascot showdown",
animal,
vocalization,
height,
# weight is a reserved keyword
hist.Bin("mass", "weight (g=9.81m/s**2) [kg]", np.power(10., np.arange(5)-1)),
)
h_mascots_2 = hist.Hist("fermi mascot showdown",
axes=(animal,
vocalization,
height,
# weight is a reserved keyword
hist.Bin("mass", "weight (g=9.81m/s**2) [kg]", np.power(10., np.arange(5)-1)),)
)
h_mascots_3 = hist.Hist(
axes=[animal,
vocalization,
height,
# weight is a reserved keyword
hist.Bin("mass", "weight (g=9.81m/s**2) [kg]", np.power(10., np.arange(5)-1)),],
label="fermi mascot showdown"
)
h_mascots_4 = hist.Hist(
"fermi mascot showdown",
animal,
vocalization,
height,
def test_hist_serdes_labels():
import pickle
ax = hist.Bin('asdf', 'asdf', 3, 0, 3)
ax.identifiers()[0].label = 'type 1'
h = hist.Hist('a', ax)
h.identifiers('asdf')
spkl = pickle.dumps(h)
hnew = pickle.loads(spkl)
for old, new in zip(h.identifiers('asdf'), hnew.identifiers('asdf')):
assert(old.label == new.label)
assert(h._dense_shape == hnew._dense_shape)
assert(h._axes == hnew._axes)