Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print_func("""\t{d}:\n\t\t{m}""".format(d=run.description(), m="\n\t\t".join(str(run.recorded_error).split("\n"))))
@register(ReporterRegistry, "html", Dict(unknown_keys=True) // Default({}) // Description("Deprecated setting"),
deprecated=True)
class HTMLReporter(AbstractReporter):
"""
Deprecated reporter that just lives as a hull.
It might be useful to revive it as a basic reporter without JavaScript.
"""
def report(self):
raise NotImplementedError("The html reporter is broken. Consider using the html2 reporter.")
@register(ReporterRegistry, "html2", Dict({
"out": Str() // Default("report") // Description("Output directory"),
"html_filename": Str() // Default("report.html") // Description("Name of the HTML file"),
"fig_width_small": Float() // Default(15.0) // Description("Width of all small plotted figures"),
"fig_width_big": Float() // Default(25.0) // Description("Width of all big plotted figures"),
"boxplot_height": Float() // Default(2.0) // Description("Height per run block for the big comparison box plots"),
"alpha": Float() // Default(0.05) // Description("Alpha value for confidence intervals"),
"gen_tex": Bool() // Default(True) // Description("Generate simple latex versions of the plotted figures?"),
"gen_pdf": Bool() // Default(False) // Description("Generate pdf versions of the plotted figures?"),
"gen_xls": Bool() // Default(False) // Description("Generate excel files for all tables"),
"show_zoomed_out": Bool() // Default(False)
// Description("Show zoomed out (x min = 0) figures in the extended summaries?"),
"percent_format": Str() // Default("{:5.2%}") // Description("Format string used to format floats as percentages"),
"float_format": Str() // Default("{:5.2e}") // Description("Format string used to format floats"),
"min_in_comparison_tables": Bool() // Default(False)
// Description("Show the mininmum related values in the big comparison table"),
"mean_in_comparison_tables": Bool() // Default(True)
def setup(self):
self._set_nice(self.misc_settings["nice"])
self._set_io_nice(self.misc_settings["io_nice"])
def _set_nice(self, nice: int):
self._exec_command("renice -n {} -p {}".format(nice, os.getpid()))
def _set_io_nice(self, nice: int):
self._exec_command("ionice -n {} -p {}".format(nice, os.getpid()))
def teardown(self):
self._set_nice(self._old_nice)
self._set_io_nice(self._old_io_nice)
@register(ExecRunDriver, "env_randomize", Dict({
"min": NaturalNumber() // Default(4) // Description("Minimum number of added random environment variables"),
"max": PositiveInt() // Default(4) // Description("Maximum number of added random environment variables"),
"var_max": PositiveInt() // Default(get_memory_page_size()) // Description("Maximum length of each random value"),
"key_max": PositiveInt() // Default(get_memory_page_size()) // Description("Maximum length of each random key")
}))
class EnvRandomizePlugin(AbstractRunDriverPlugin):
"""
Adds random environment variables.
"""
def setup_block_run(self, block: RunProgramBlock, runs: int = 1):
env = {}
for i in range(random.randint(self.misc_settings["min"], self.misc_settings["max"] + 1)):
env["a" * random.randint(0, self.misc_settings["key_max"])] \
= "a" * random.randint(0, self.misc_settings["var_max"])
block["env"] = env
"""
pass
def _exec_command(self, cmd: str) -> str:
proc = subprocess.Popen(["/bin/sh", "-c", cmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = proc.communicate()
if proc.poll() > 0:
msg = "Error executing '" + cmd + "' in {}: ".format(type(self)) + str(err) + " " + str(out)
#logging.error(msg)
raise EnvironmentError(msg)
return str(out)
@register(ExecRunDriver, "nice", Dict({
"nice": Int(range=range(-20, 20)) // Description("Niceness values range from -20 (most favorable "
"to the process) to 19 (least favorable to the process).")
// Default(-15),
"io_nice": Int(range=range(0, 4)) // Description("Specify the name or number of the scheduling class to use;"
"0 for none, 1 for realtime, 2 for best-effort, 3 for idle.")
// Default(1)
}))
class NicePlugin(AbstractRunDriverPlugin):
"""
Allows the setting of the nice and ionice values of the benchmarking process.
"""
needs_root_privileges = True
def __init__(self, misc_settings):
super().__init__(misc_settings)
"max": lambda single: single.max(),
"stddev per mean": lambda single: single.std_dev_per_mean()
}
num = mod[modifier](single)
if baseline:
num = num / baseline.mean()
return FNumber(num,
abs_deviation=single.std_dev(),
is_percent=("%" in opts),
scientific_notation=("s" in opts),
parentheses=("o" in opts or "p" in opts),
parentheses_mode=ParenthesesMode.DIGIT_CHANGE if "p" in opts else \
(ParenthesesMode.ORDER_OF_MAGNITUDE if "o" in opts else None)).format()
@register(ReporterRegistry, "codespeed", Dict({
"project": Str() // Default("") // Description("Project name reported to codespeed."),
"executable": Str() // Default("") // Description("Executable name reported to codespeed. Defaults to the project name."),
"environment": Str() // Default("") // Description("Environment name reported to codespeed. Defaults to current host name."),
"branch": Str() // Default("") // Description("Branch name reported to codespeed. Defaults to current branch or else 'master'."),
"commit_id": Str() // Default("") // Description("Commit ID reported to codespeed. Defaults to current commit."),
}))
class CodespeedReporter(AbstractReporter):
"""
Reporter that outputs JSON as expected by `codespeed `_.
Branch name and commit ID are taken from the current directory.
Use it like this:
.. code:: sh
temci report --reporter codespeed ... | curl --data-urlencode json@- http://localhost:8000/result/add/json/
#logging.info(err)
pass
def _set_nice(self, pid: int, nice: int):
self._exec_command("renice -n {} -p {}".format(nice, pid))
def teardown(self):
for pid in self._old_nices:
try:
self._set_nice(pid, self._old_nices[pid])
except EnvironmentError as err:
#logging.info(err)
pass
@register(ExecRunDriver, "stop_start", Dict({
"min_nice": Int(range=range(-15, 20)) // Default(-10)
// Description("Processes with lower nice values are ignored."),
"min_id": PositiveInt() // Default(1500)
// Description("Processes with lower id are ignored."),
"comm_prefixes": ListOrTuple(Str()) // Default(["ssh", "xorg", "bluetoothd"])
// Description("Each process which name (lower cased) starts with one of the prefixes is not ignored. "
"Overrides the decision based on the min_id."),
"comm_prefixes_ignored": ListOrTuple(Str()) // Default(["dbus", "kworker"])
// Description("Each process which name (lower cased) starts with one of the prefixes is ignored. "
"It overrides the decisions based on comm_prefixes and min_id."),
"subtree_suffixes": ListOrTuple(Str()) // Default(["dm", "apache"])
// Description("Suffixes of processes names which are stopped."),
"dry_run": Bool() // Default(False)
// Description("Just output the to be stopped processes but don't actually stop them?")
}))
class StopStartPlugin(AbstractRunDriverPlugin):
self.assertListEqual(MockRegistry.get_used(), ["plugin2", "plugin"])
Settings().modify_setting("abc", Dict(all_keys=False), {})
class MockRegistryNoList(AbstractRegistry):
settings_key_path = "abc"
use_key = "test"
use_list = False
default = "plugin"
registry = {}
with self.assertRaises(ValueError):
MockRegistryNoList.get_for_name("asd")
@register(MockRegistryNoList, "plugin", Dict(), {})
class Plugin:
def __init__(self, a):
self.a = a
@register(MockRegistryNoList, "plugin2", Dict(), {})
class Plugin2:
def __init__(self, a):
self.a = a
self.assertEqual(MockRegistryNoList.get_used(), "plugin")
Settings()["abc/test"] = "plugin2"
self.assertEqual(MockRegistryNoList.get_used(), "plugin2")
settings_key_path = "abcd"
use_key = "test"
use_list = True
default = ["plugin"]
registry = {}
with self.assertRaises(ValueError):
MockRegistry.get_for_name("asd")
@register(MockRegistry, "plugin", Dict(), {})
class Plugin:
def __init__(self, a):
self.a = a
@register(MockRegistry, "plugin2", Dict(), {})
class Plugin2:
def __init__(self, a):
self.a = a
self.assertListEqual(MockRegistry.get_used(), ["plugin"])
Settings()["abcd/test"] = ["plugin2", "plugin"]
self.assertListEqual(MockRegistry.get_used(), ["plugin2", "plugin"])
Settings().modify_setting("abc", Dict(all_keys=False), {})
class MockRegistryNoList(AbstractRegistry):
settings_key_path = "abc"
use_key = "test"
use_list = False
default = "plugin"
settings_key_path = "abc"
use_key = "test"
use_list = False
default = "plugin"
registry = {}
with self.assertRaises(ValueError):
MockRegistryNoList.get_for_name("asd")
@register(MockRegistryNoList, "plugin", Dict(), {})
class Plugin:
def __init__(self, a):
self.a = a
@register(MockRegistryNoList, "plugin2", Dict(), {})
class Plugin2:
def __init__(self, a):
self.a = a
self.assertEqual(MockRegistryNoList.get_used(), "plugin")
Settings()["abc/test"] = "plugin2"
self.assertEqual(MockRegistryNoList.get_used(), "plugin2")
@register(ExecRunDriver, "disable_ht", Dict({}))
class DisableHyperThreading(AbstractRunDriverPlugin):
"""
Disable hyper-threading
"""
needs_root_privileges = True
def setup(self):
AbstractRunWorkerPool.disable_hyper_threading()
def teardown(self):
AbstractRunWorkerPool.enable_hyper_threading()
@register(ExecRunDriver, "disable_intel_turbo", Dict({}))
class DisableIntelTurbo(AbstractRunDriverPlugin):
"""
Disable intel turbo mode
"""
needs_root_privileges = True
def setup(self):
self._exec_command("echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo")
def teardown(self):
self._exec_command("echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo")
@register(ExecRunDriver, "cpuset", Dict({}))
class CPUSet(AbstractRunDriverPlugin):
for run in self.stats_helper.runs
for prop in sorted(run.properties)]
json.dump(data, sys.stdout)
def _report_prop(self, run: RunData, prop: str) -> dict:
return {
**self.meta,
"benchmark": "{}: {}".format(run.description(), prop),
"result_value": np.mean(run[prop]),
"std_dev": np.std(run[prop]),
"min": min(run[prop]),
"max": max(run[prop]),
}
@register(ReporterRegistry, "codespeed2", Dict({}))
class Codespeed2Reporter(AbstractReporter):
"""
Reporter that outputs JSON as specified by
the `codespeed runner spec `_.
"""
def report(self):
"""
Create a report and output it as configured.
"""
import json
res = {}
for run in self.stats_helper.errorneous_runs:
bench_res = {}
for prop in run.properties:
bench_res[prop] = {