Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self.assertEqual(values.get('')[0].get("msg"), "message")
self.assertEqual(values.get('')[1].get("msg"), "FOUND")
self.assertEqual(values.get('')[2].get("msg"), "second message")
self.assertEqual(values.get('')[3].get("msg"), "NOT FOUND")
self.assertEqual(values.get('')[3].get("cnt"), 2)
self.assertEqual(values.get('tc1')[0].get("msg"), "FOUND")
self.assertEqual(values.get("tc1")[0].get("type"), KPISet.ERRTYPE_SUBSAMPLE)
self.assertEqual(values.get('tc3')[0].get("msg"), "message")
self.assertEqual(values.get("tc3")[0].get("type"), KPISet.ERRTYPE_ERROR)
self.assertEqual(values.get("tc3")[1].get("type"), KPISet.ERRTYPE_ERROR)
self.assertEqual(values.get('tc3')[1].get("msg"), "second message")
self.assertEqual(values.get('tc4')[0].get("msg"), "NOT FOUND")
self.assertEqual(values.get("tc4")[0].get("type"), KPISet.ERRTYPE_SUBSAMPLE)
self.assertEqual(values.get('tc5')[0].get("msg"), "NOT FOUND")
self.assertEqual(values.get("tc5")[0].get("type"), KPISet.ERRTYPE_SUBSAMPLE)
for point in mock.datapoints():
self.assertNotEquals(0, point[DataPoint.CUMULATIVE][''][KPISet.CONCURRENCY])
mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
for point in mock.datapoints():
pass
for point in mock.datapoints(True):
pass
for point in mock.results:
overall = point[DataPoint.CURRENT]['']
self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005716549770704078,
KPISet.FAILURES: 29656})
cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
KPISet.RESP_TIMES: Counter(
{0.0: 17219, 0.001: 11246, 0.002: 543,
0.003: 341, 0.004: 121,
0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
0.009: 12, 0.011: 6,
0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
0.079: 1, 0.016: 1,
0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
KPISet.ERRORS: [],
KPISet.STDEV_RESP_TIME: 0.04073402130687656,
KPISet.AVG_LATENCY: 1.7196034796682178e-06,
KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
'99.9': 0.009, '90.0': 0.001,
'100.0': 0.081, '99.0': 0.004,
'50.0': 0.0},
KPISet.SUCCESSES: 29658,
KPISet.SAMPLE_COUNT: 29658,
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005164542450603551,
KPISet.FAILURES: 0})
return datapoint
def test_from_extension(self):
self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_from_extension.yml").read()))
self.obj.prepare()
self.obj.get_widget()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
results = list(self.obj.runner.reader.datapoints(final_pass=True))
self.obj.runner._tailer.close()
self.obj.runner.reader.underlings[0].csvreader.file.close()
self.assertEquals(1, len(results))
self.assertFalse(results[0][DataPoint.CUMULATIVE][''][KPISet.ERRORS]) # error msg
def aggregated_second(self, data):
for x in data[DataPoint.CURRENT].values():
a = x[KPISet.FAILURES] / x[KPISet.SAMPLE_COUNT]
obj.log.debug("TS: %s %s", data[DataPoint.TIMESTAMP], x[KPISet.SAMPLE_COUNT])
for x in data[DataPoint.CUMULATIVE].values():
a = x[KPISet.FAILURES] / x[KPISet.SAMPLE_COUNT]
obj.log.debug("TS: %s %s", data[DataPoint.TIMESTAMP], x[KPISet.SAMPLE_COUNT])
def __get_datapoint(self, ts=0):
datapoint = DataPoint(ts, None)
cumul_data = datapoint[DataPoint.CUMULATIVE]
cumul_data[""] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
KPISet.RESP_TIMES: Counter(
{0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
0.019: 1, 0.015: 1}),
KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}],
KPISet.STDEV_RESP_TIME: 0.04947974228872108,
KPISet.AVG_LATENCY: 0.0002825639815220692,
KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
'100.0': 0.081, '99.0': 0.003, '50.0': 0.0},
KPISet.SUCCESSES: 29658,
KPISet.SAMPLE_COUNT: 59314,
KPISet.CONCURRENCY: 0,
"""
:type xunit: XUnitFileWriter
"""
xunit.report_test_suite('sample_labels')
labels = self.last_second[DataPoint.CUMULATIVE]
for key in sorted(labels.keys()):
if key == "": # skip total label
continue
errors = []
for er_dict in labels[key][KPISet.ERRORS]:
rc = str(er_dict["rc"])
msg = str(er_dict["msg"])
cnt = str(er_dict["cnt"])
if er_dict["type"] == KPISet.ERRTYPE_ASSERT:
err_element = etree.Element("failure", message=msg, type="Assertion Failure")
else:
err_element = etree.Element("error", message=msg, type="Error")
err_desc = "%s\n(status code is %s)\n(total errors of this type: %s)" % (msg, rc, cnt)
err_element.text = err_desc
errors.append(err_element)
xunit.report_test_case('sample_labels', key, errors)
def _extract_common(self, elem, label, r_code, t_stamp, r_msg):
f_msg, f_url, f_rc, f_tag, f_type = self.find_failure(elem, r_msg, r_code)
if f_type == KPISet.ERRTYPE_SUBSAMPLE:
url_counts = Counter({f_url: 1})
else:
urls = elem.xpath(self.url_xpath)
if urls:
url_counts = Counter({urls[0].text: 1})
else:
url_counts = Counter()
err_item = KPISet.error_item_skel(f_msg, f_rc, 1, f_type, url_counts, f_tag)
buf = self.buffer.get(t_stamp, force_set=True)
KPISet.inc_list(buf.get(label, [], force_set=True), ("msg", f_msg), err_item)
KPISet.inc_list(buf.get('', [], force_set=True), ("msg", f_msg), err_item)
overall = KPISet()
for item in data['stats']:
if timestamp not in item['num_reqs_per_sec']:
continue
kpiset = KPISet()
kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
kpiset[KPISet.CONCURRENCY] = data['user_count']
kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
if item['num_requests']:
avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt
for err in data['errors'].values():
if err['name'] == item['name']:
new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
Counter(), None)
KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
kpiset[KPISet.FAILURES] += err['occurences']
kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[KPISet.FAILURES]
point[DataPoint.CURRENT][item['name']] = kpiset
overall.merge_kpis(kpiset, sid)
point[DataPoint.CURRENT][''] = overall
point.recalculate()
return point
def __report_percentiles(self, summary_kpi_set):
"""
reports percentiles
"""
fmt = "Average times: total %.3f, latency %.3f, connect %.3f"
self.log.info(fmt, summary_kpi_set[KPISet.AVG_RESP_TIME], summary_kpi_set[KPISet.AVG_LATENCY],
summary_kpi_set[KPISet.AVG_CONN_TIME])
data = [("Percentile, %", "Resp. Time, s")]
for key in sorted(summary_kpi_set[KPISet.PERCENTILES].keys(), key=float):
data.append((float(key), summary_kpi_set[KPISet.PERCENTILES][key]))
# self.log.info("Percentile %.1f%%: %.3f", )
table = SingleTable(data) if sys.stdout.isatty() else AsciiTable(data)
table.justify_columns[0] = 'right'
table.justify_columns[1] = 'right'
self.log.info("Percentiles:\n%s", table.table)