How to use the stestr.output function in stestr

To help you get started, we’ve selected a few stestr examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mtreinish / stestr / stestr / commands / slowest.py View on Github external
try:
        latest_id = repo.latest_id()
    except KeyError:
        return 3
    # what happens when there is no timing info?
    test_times = repo.get_test_times(repo.get_test_ids(latest_id))
    known_times = list(test_times['known'].items())
    known_times.sort(key=itemgetter(1), reverse=True)
    if len(known_times) > 0:
        # By default show 10 rows
        if not show_all:
            known_times = known_times[:10]
        known_times = format_times(known_times)
        header = ('Test id', 'Runtime (s)')
        rows = [header] + known_times
        output.output_table(rows, output=stdout)
    return 0
github mtreinish / stestr / stestr / results.py View on Github external
if failures:
            if previous_summary:
                num_failures_delta = failures - \
                    previous_summary.get_num_failures()
            values.append(('failures', failures, num_failures_delta))
        if previous_summary:
            num_tests_run_delta = self._summary.testsRun - \
                previous_summary.testsRun
            if time:
                previous_time_taken = previous_summary.get_time_taken()
                if previous_time_taken:
                    time_delta = time - previous_time_taken
        skips = len(self._summary.skipped)
        if skips:
            values.append(('skips', skips, None))
        output.output_summary(
            not bool(failures), self._summary.testsRun, num_tests_run_delta,
            time, time_delta, values, output=self.stream)
github mtreinish / stestr / stestr / commands / failing.py View on Github external
failed = False
    result, summary = _make_result(repo, list_tests=list_tests)
    result.startTestRun()
    try:
        case.run(result)
    finally:
        result.stopTestRun()
    failed = not results.wasSuccessful(summary)
    if failed:
        result = 1
    else:
        result = 0
    if list_tests:
        failing_tests = [
            test for test, _ in summary.errors + summary.failures]
        output.output_tests(failing_tests, output=stdout)
    return result
github mtreinish / stestr / stestr / bisect_tests.py View on Github external
bottom = bottom + check_width
                    if width == 1:
                        # there will be no more to check, so we didn't
                        # reproduce the failure.
                        width = 0
                    else:
                        width = top - bottom
            if spurious_failure not in test_conflicts:
                # Could not determine cause
                test_conflicts[spurious_failure] = 'unknown - no conflicts'
        if test_conflicts:
            table = [('failing test', 'caused by test')]
            for failure in sorted(test_conflicts):
                causes = test_conflicts[failure]
                table.append((failure, causes))
            output.output_table(table)
            return 3
        return 0
github mtreinish / stestr / stestr / commands / load.py View on Github external
def _load_case(inserter, repo, case, subunit_out, pretty_out,
               color, stdout, abbreviate, suppress_attachments,
               all_attachments, show_binary_attachments):
    if subunit_out:
        output_result, summary_result = output.make_result(inserter.get_id,
                                                           output=stdout)
    elif pretty_out:
        outcomes = testtools.StreamToDict(
            functools.partial(subunit_trace.show_outcome, stdout,
                              enable_color=color, abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments))
        summary_result = testtools.StreamSummary()
        output_result = testtools.CopyStreamResult([outcomes, summary_result])
        output_result = testtools.StreamResultRouter(output_result)
        cat = subunit.test_results.CatFiles(stdout)
        output_result.add_rule(cat, 'test_id', test_id=None)
    else:
        try:
            previous_run = repo.get_latest_run()