Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run_temci_click(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
-> Result:
"""
Run temci with the passed arguments
:param args: arguments for temci
:param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
:param files: {file name: content as string or dictionary that is converted into YAML first}
:param expect_success: expect a zero return code
:param misc_env: additional environment variables
:return: result of the call
"""
runner = CliRunner()
set = Settings().type_scheme.get_default().copy()
prior = set.copy()
set.update(settings or {})
with runner.isolated_filesystem():
cmd = args
_store_files(files)
with open("settings.yaml", "w") as f:
yaml.dump(set, f)
env = os.environ.copy()
env["LC_ALL"] = "en_US.utf-8"
env.update(misc_env or {})
args = sys.argv.copy()
sys.argv = shlex.split("temci " + cmd)
err_code = None
exc = None
try:
result = runner.invoke(cli, cmd.replace(" ", " --settings settings.yaml ", 1), env=env, catch_exceptions=True)
def setUp(self):
Settings().reset()
with self.assertRaises(ValueError):
MockRegistryNoList.get_for_name("asd")
@register(MockRegistryNoList, "plugin", Dict(), {})
class Plugin:
def __init__(self, a):
self.a = a
@register(MockRegistryNoList, "plugin2", Dict(), {})
class Plugin2:
def __init__(self, a):
self.a = a
self.assertEqual(MockRegistryNoList.get_used(), "plugin")
Settings()["abc/test"] = "plugin2"
self.assertEqual(MockRegistryNoList.get_used(), "plugin2")
def _fail(self, message: str):
"""
Fail with the given error message and send an error mail if configured to do so
:param message: given error message
"""
logging.error(message)
send_mail(Settings()["package/send_mail"], "Error", message)
exit(1)
def temci__short__shell(command: str, **kwargs):
Settings()["run/driver"] = "shell"
Settings()["run/runs"] = 1
Settings()["run/discarded_runs"] = 0
Settings()["run/cpuset/parallel"] = 0
benchmark_and_exit([{"run_config": {
"run_cmd": command
},
"attributes": {
"description": command
}
exit(1)
@cli.command(short_help=command_docs["setup"])
def setup():
temci__setup()
@document_func(command_docs["setup"])
def temci__setup():
from temci.setup.setup import make_scripts
make_scripts()
if sphinx_doc():
Settings.__doc__ += """
The whole configuration file has the following structure:
""" + get_doc_for_type_scheme(Settings().type_scheme)
if __name__ == "__main__":
# for testing purposes only
cli()
self.description = self.type_scheme.description.strip().split("\n")[0] # type: str
""" Description of this option """
self.has_description = self.description not in [None, ""] # type: bool
""" Does this option has a description? """
if not self.has_description:
warnings.warn("Option {} is without documentation.".format(option_name))
self.has_default = True # type: bool
""" Does this option has a default value? """
self.default = None # type: t.Any
""" Default value of this option """
try:
self.default = self.type_scheme.get_default()
except ValueError:
self.has_default = False
if settings_key:
self.default = Settings()[settings_key]
if hasattr(self.type_scheme, "completion_hints") and self.completion_hints is None:
self.completion_hints = self.type_scheme.completion_hints
self.is_flag = is_flag is True or (is_flag is None and type(self.type_scheme) in [Bool, BoolOrNone]) # type: bool
""" Is this option flag like? """
if self.is_flag:
self.completion_hints = None
self.short = None
def callback(context: Context, param, val):
if val is not None and context.get_parameter_source(param.name) != ParameterSource.DEFAULT:
try:
Settings().set(settings_key, val, validate=False)
except SettingsError as err:
logging.error("Error while processing the passed value ({val}) of option {opt}: {msg}".format(
val=val,
opt=option_name,
def get_bench_user() -> str:
user = Settings()["env"]["USER"]
return os.getenv("USER", "") if user == "" else user
def is_unequal(self, p_val: float) -> bool:
""" Is the passed value above the uncertainty range for null hypothesis probabilities? """
return p_val < min(*Settings()["stats/uncertainty_range"])