Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_box_plot_html(self, base_file_name: str) -> str: # a box plot over the mean scores per sub program
scores_per_impl = self.get_scores_per_impl()
singles = []
for impl in scores_per_impl:
scores = scores_per_impl[impl]
name = "mean score"
data = RunData({name: scores}, {"description": impl})
singles.append(SingleProperty(Single(data), data, name))
return self.boxplot_html(base_file_name, singles)
def clone(self, data: t.Dict[str, t.List[Number]] = None, attributes: t.Dict[str, str] = None,
recorded_error: RecordedError = None, external: bool = None) -> 'RunData':
"""
Clone this instance and replaces thereby some instance properties.
:param data: optional dictionary mapping each property to a list of actual values
:param attributes: dictionary of optional attributes that describe its program block
:param external: does the data come from a prior benchmarking?
:return: new instance
"""
def alt(new, old):
return new if new is not None else old
return RunData(data=alt(data, self.data), attributes=alt(attributes, self.attributes),
recorded_error=alt(recorded_error, self.recorded_error),
external=alt(external, self.external))
runs = runs or [] # type: t.List[dict]
prop_descrs = {} # type: t.Dict[str, str]
for run in runs:
props = {}
if "property_descriptions" in run:
prop_descrs.update(run["property_descriptions"])
else:
if "data" not in run:
run["data"] = {}
error = None
if "error" in run:
error = RecordedProgramError(run["error"]["message"], run["error"]["output"],
run["error"]["error_output"], run["error"]["return_code"])
elif "internal_error" in run:
error = RecordedInternalError(run["internal_error"]["message"])
run_datas.append(RunData(run["data"], run["attributes"] if "attributes" in run else {}, recorded_error=error,
external=external))
return RunDataStatsHelper(run_datas, external_count=len(run_datas) if external else 0,
property_descriptions=prop_descrs, included_blocks=included_blocks)
self.append = Settings().default(append, "run/append") # type: bool
""" Append to the old benchmarks if there are any in the result file? """
self.show_report = Settings().default(show_report, "run/show_report") # type: bool
""" Show a short report after finishing the benchmarking? """
self.stats_helper = None # type: RunDataStatsHelper
""" Used stats helper to help with measurements """
typecheck(Settings()["run/out"], FileName())
if self.append:
run_data = []
try:
if os.path.exists(Settings()["run/out"]):
with open(Settings()["run/out"], "r") as f:
run_data = yaml.safe_load(f)
self.stats_helper = RunDataStatsHelper.init_from_dicts(run_data, external=True)
for run in runs:
self.stats_helper.runs.append(RunData(attributes=run["attributes"]))
except:
self.teardown()
raise
else:
self.stats_helper = RunDataStatsHelper.init_from_dicts(copy.deepcopy(runs),
included_blocks=Settings()["run/included_blocks"])
#if Settings()["run/remote"]:
# self.pool = RemoteRunWorkerPool(Settings()["run/remote"], Settings()["run/remote_port"])
if os.path.exists(Settings()["run/out"]):
os.remove(Settings()["run/out"])
self.start_time = time.time() # type: float
""" Unix time stamp of the start of the benchmarking """
self.end_time = -1 # type: float
""" Unix time stamp of the point in time that the benchmarking can at most reach """
try:
max_time = parse_timespan(Settings()["run/max_time"])
]
:param runs: list of dictionaries representing the benchmarking runs for each program block
:param external: are the passed runs not from this benchmarking session but from another?
:param included_blocks: include query
:raises ValueError: if runs parameter has an incorrect structure
:return: created stats helper
"""
typecheck(runs, List(
Dict({
"data": Dict(key_type=Str(), value_type=List(Int() | Float()), unknown_keys=True) | NonExistent(),
"run_config": Dict(unknown_keys=True)
}, unknown_keys=True) |
RunData.block_type_scheme |
RunData.property_descriptions_scheme),
value_name="runs parameter")
run_datas = []
runs = runs or [] # type: t.List[dict]
prop_descrs = {} # type: t.Dict[str, str]
for run in runs:
props = {}
if "property_descriptions" in run:
prop_descrs.update(run["property_descriptions"])
else:
if "data" not in run:
run["data"] = {}
error = None
if "error" in run:
error = RecordedProgramError(run["error"]["message"], run["error"]["output"],
run["error"]["error_output"], run["error"]["return_code"])
elif "internal_error" in run:
included_blocks: str = None):
"""
Don't use the constructor use init_from_dicts if possible.
:param runs: list of run data objects
:param tester: used tester or tester that is set in the settings
:param external_count: Number of external program blocks (blocks for which the data was obtained in a
different benchmarking session)
:param property_descriptions: mapping of some properties to their descriptions or longer versions
:param errorneous_runs: runs that resulted in errors
:param included_blocks: include query
"""
self.tester = tester or TesterRegistry.get_for_name(TesterRegistry.get_used(), # type: Tester
Settings()["stats/uncertainty_range"])
""" Used statistical tester """
typecheck(runs, List(T(RunData)))
self.runs = filter_runs(runs, included_blocks or Settings()["report/included_blocks"]) # type: t.List[RunData]
self.errorneous_runs = errorneous_runs or [r for r in self.runs if r.has_error()]
self.runs = [r for r in self.runs if not r.has_error() or (any(len(v) > 0 for v,p in r.data.items()))]
""" Data of serveral runs from several measured program blocks """
self.external_count = external_count # type: int
"""
Number of external program blocks (blocks for which the data was obtained in a different benchmarking session)
"""
self.property_descriptions = property_descriptions or {} # type: t.Dict[str, str]
def get_single(self):
data = InsertionTimeOrderedDict()
for impl in self.impls:
data[impl] = self.impls[impl]
return Single(RunData(data))
def __init__(self, data: t.Union[RunData, 'Single']):
"""
Create an instance.
:param data: run data wrapped by this instance or another instance of which the run data is used
"""
super().__init__()
self.rundata = None # type: RunData
""" Run data wrapped by this instance """
if isinstance(data, RunData):
self.rundata = data
else:
self.rundata = data.rundata
self.attributes = self.rundata.attributes # type: t.Dict[str, str]
""" Attributes for this instance """
self.properties = {} # type: t.Dict[str, SingleProperty]
""" SingleProperty objects for each property """
for prop in data.properties:
self.properties[prop] = SingleProperty(self, self.rundata, prop)