Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif not test_extracors:
return api_extrators
else:
extractor_dict = OrderedDict()
for api_extrator in api_extrators:
if len(api_extrator) != 1:
logger.log_warning("incorrect extractor: {}".format(api_extrator))
continue
var_name = list(api_extrator.keys())[0]
extractor_dict[var_name] = api_extrator[var_name]
for test_extrator in test_extracors:
if len(test_extrator) != 1:
logger.log_warning("incorrect extractor: {}".format(test_extrator))
continue
var_name = list(test_extrator.keys())[0]
extractor_dict[var_name] = test_extrator[var_name]
extractor_list = []
for key, value in extractor_dict.items():
extractor_list.append({key: value})
return extractor_list
def load_file(file_path):
if not os.path.isfile(file_path):
raise exception.FileNotFoundError("{} does not exist.".format(file_path))
file_suffix = os.path.splitext(file_path)[1].lower()
if file_suffix == '.json':
return _load_json_file(file_path)
elif file_suffix in ['.yaml', '.yml']:
return _load_yaml_file(file_path)
elif file_suffix == ".csv":
return _load_csv_file(file_path)
else:
# '' or other suffix
err_msg = u"Unsupported file format: {}".format(file_path)
logger.log_warning(err_msg)
return []
extend_test_api(test_block_dict)
testset["testcases"].append(test_block_dict)
elif "suite" in test_block_dict:
ref_name = test_block_dict["suite"]
test_info = get_testinfo_by_reference(ref_name, "suite")
testset["testcases"].extend(test_info["testcases"])
else:
testset["testcases"].append(test_block_dict)
elif key == "api":
api_def = item["api"].pop("def")
function_meta = parse_function(api_def)
func_name = function_meta["func_name"]
if func_name in testset["api"]:
logger.log_warning("api definition duplicated: {}".format(func_name))
api_info = {}
api_info["function_meta"] = function_meta
api_info.update(item["api"])
testset["api"][func_name] = api_info
else:
logger.log_warning(
"unexpected block: {}. block should only be 'config', 'test' or 'api'.".format(key)
)
return testset
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
if processes_count_index >= len(sys.argv):
""" do not specify processes count explicitly
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
else:
try:
""" locusts -f locustfile.py --processes 4 """
processes_count = int(sys.argv[processes_count_index])
sys.argv.pop(processes_count_index)
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
sys.argv.pop(processes_index)
locusts.run_locusts_with_processes(sys.argv, processes_count)
else:
locusts.main()
def main_locust():
""" Performance test with locust: parse command line options and run commands.
"""
logger.setup_logger("INFO")
try:
from httprunner import locusts
except ImportError:
msg = "Locust is not installed, install first and try again.\n"
msg += "install command: pip install locustio"
logger.log_warning(msg)
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.main()
sys.exit(0)
try:
testcase_index = sys.argv.index('-f') + 1
assert testcase_index < len(sys.argv)
except (ValueError, AssertionError):
logger.log_error("Testcase file is not specified, exit.")
sys.exit(1)
def prettify_json_file(file_list):
""" prettify JSON testset format
"""
for json_file in set(file_list):
if not json_file.endswith(".json"):
logger.log_warning("Only JSON file format can be prettified, skip: {}".format(json_file))
continue
logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN")
dir_path = os.path.dirname(json_file)
file_name, file_suffix = os.path.splitext(os.path.basename(json_file))
outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name))
with io.open(json_file, 'r', encoding='utf-8') as stream:
try:
obj = json.load(stream)
except ValueError as e:
raise SystemExit(e)
with io.open(outfile, 'w', encoding='utf-8') as out:
json.dump(obj, out, indent=4, separators=(',', ': '))
def validate_json_file(file_list):
""" validate JSON testset format
"""
for json_file in set(file_list):
if not json_file.endswith(".json"):
logger.log_warning("Only JSON file format can be validated, skip: {}".format(json_file))
continue
logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN")
with io.open(json_file) as stream:
try:
json.load(stream)
except ValueError as e:
raise SystemExit(e)
print("OK")
def extract_output(self, output_variables_list):
""" extract output variables
"""
variables_mapping = self.context.testcase_variables_mapping
output = {}
for variable in output_variables_list:
if variable not in variables_mapping:
logger.log_warning(
"variable '{}' can not be found in variables mapping, failed to output!"\
.format(variable)
)
continue
output[variable] = variables_mapping[variable]
return output
teststep_dict.get("variables", []),
teststep_dict.get("parameters", [])
)
for testcase_variables in testcase_parametered_variables_list:
teststep_dict["variables"] = testcase_variables
# eval teststep name with bind variables
variables = utils.override_variables_binds(
config_variables,
testcase_variables
)
self.testcase_parser.update_binded_variables(variables)
try:
testcase_name = self.testcase_parser.eval_content_with_bindings(teststep_dict["name"])
except (AssertionError, exceptions.ParamsError):
logger.log_warning("failed to eval teststep name: {}".format(teststep_dict["name"]))
testcase_name = teststep_dict["name"]
self.test_runner_list.append((test_runner, variables))
self._add_test_to_suite(testcase_name, test_runner, teststep_dict)