How to use the pyperf.BenchmarkSuite.load function in pyperf

To help you get started, we’ve selected a few pyperf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cropsinsilico / yggdrasil / yggdrasil / timing.py View on Github external
"""
        if not os.path.isfile(self.filename):
            return None
        if self.dont_use_pyperf:
            with open(self.filename, 'rb') as fd:
                if backwards.PY2:  # pragma: Python 2
                    out = backwards.pickle.load(fd)
                else:  # pragma: Python 3
                    out = backwards.pickle.load(fd, encoding='latin1')
        else:
            assert(self.filename.endswith('.json'))
            if as_json:
                with open(self.filename, 'r') as fd:
                    out = json.load(fd)
            else:
                out = pyperf.BenchmarkSuite.load(self.filename)
        return out
github vstinner / pyperf / pyperf / __main__.py View on Github external
def cmd_convert(args):
    suite = pyperf.BenchmarkSuite.load(args.input_filename)

    if args.add:
        suite2 = pyperf.BenchmarkSuite.load(args.add)
        for bench in suite2.get_benchmarks():
            suite._add_benchmark_runs(bench)

    if args.include_benchmarks:
        names = args.include_benchmarks
        try:
            suite._convert_include_benchmark(names)
        except KeyError:
            fatal_missing_benchmarks(suite, names)

    elif args.exclude_benchmarks:
        names = args.exclude_benchmarks
        try:
github python / pyperformance / pyperformance / run.py View on Github external
def run_perf_script(python, options, name, extra_args=[]):
    bm_path = Relative("bm_%s.py" % name)
    cmd = list(python)
    cmd.append('-u')
    cmd.append(bm_path)
    cmd.extend(extra_args)
    copy_perf_options(cmd, options)

    with temporary_file() as tmp:
        cmd.extend(('--output', tmp))
        run_command(cmd, hide_stderr=not options.verbose)
        return pyperf.BenchmarkSuite.load(tmp)
github python / pyperformance / pyperformance / compare.py View on Github external
def compare_results(options):
    base_label, changed_label = get_labels(options.baseline_filename,
                                           options.changed_filename)

    base_suite = pyperf.BenchmarkSuite.load(options.baseline_filename)
    changed_suite = pyperf.BenchmarkSuite.load(options.changed_filename)

    results = []
    common = set(base_suite.get_benchmark_names()) & set(
        changed_suite.get_benchmark_names())
    for name in sorted(common):
        base_bench = base_suite.get_benchmark(name)
        changed_bench = changed_suite.get_benchmark(name)
        result = BenchmarkResult(base_bench, changed_bench)
        results.append(result)

    hidden = []
    shown = []
    for result in results:
        name = result.base.get_name()

        significant = significant_msg(result.base, result.changed)
github python / pyperformance / pyperformance / compare.py View on Github external
def cmd_show(options):
    suite = pyperf.BenchmarkSuite.load(options.filename)
    display_benchmark_suite(suite)