How to use the pyperf.Runner function in pyperf

To help you get started, we’ve selected a few pyperf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github python / pyperformance / pyperformance / benchmarks / bm_json_dumps.py View on Github external
def main():
    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
    runner.argparser.add_argument("--cases",
                                  help="Comma separated list of cases. Available cases: %s. By default, run all cases."
                                       % ', '.join(CASES))
    runner.metadata['description'] = "Benchmark json.dumps()"

    args = runner.parse_args()
    if args.cases:
        cases = []
        for case in args.cases.split(','):
            case = case.strip()
            if case:
                cases.append(case)
        if not cases:
            print("ERROR: empty list of cases")
            sys.exit(1)
    else:
github python / pyperformance / pyperformance / benchmarks / bm_deltablue.py View on Github external
edit.destroy_constraint()


# HOORAY FOR GLOBALS... Oh wait.
# In spirit of the original, we'll keep it, but ugh.
planner = None


def delta_blue(n):
    chain_test(n)
    projection_test(n)


if __name__ == "__main__":
    runner = pyperf.Runner()
    runner.metadata['description'] = "DeltaBlue benchmark"

    n = 100
    runner.bench_func('deltablue', delta_blue, n)
github python / pyperformance / pyperformance / benchmarks / bm_django_template.py View on Github external
context = Context({"table": table})

    runner.bench_func('django_template', template.render, context)


def prepare_cmd(runner, cmd):
    cmd.append("--table-size=%s" % runner.args.table_size)


if __name__ == "__main__":
    django.conf.settings.configure(TEMPLATES=[{
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
    }])
    django.setup()

    runner = pyperf.Runner()
    cmd = runner.argparser
    cmd.add_argument("--table-size",
                     type=int, default=DEFAULT_SIZE,
                     help="Size of the HTML table, height and width "
                          "(default: %s)" % DEFAULT_SIZE)

    args = runner.parse_args()
    runner.metadata['description'] = "Django template"
    runner.metadata['django_version'] = django.__version__
    runner.metadata['django_table_size'] = args.table_size

    bench_django_template(runner, args.table_size)
github python / pyperformance / pyperformance / benchmarks / bm_pidigits.py View on Github external
z = compose(z, next(x))
            y = extract(z, 3)
        z = compose((10, -10 * y, 0, 1), z)
        yield y


def calc_ndigits(n):
    return list(islice(gen_pi_digits(), n))


def add_cmdline_args(cmd, args):
    cmd.extend(("--digits", str(args.digits)))


if __name__ == "__main__":
    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)

    cmd = runner.argparser
    cmd.add_argument("--digits", type=int, default=DEFAULT_DIGITS,
                     help="Number of computed pi digits (default: %s)"
                          % DEFAULT_DIGITS)

    args = runner.parse_args()
    runner.metadata['description'] = "Compute digits of pi."
    runner.metadata['pidigits_ndigit'] = args.digits
    runner.bench_func('pidigits', calc_ndigits, args.digits)
github python / pyperformance / pyperformance / benchmarks / bm_spambayes.py View on Github external
import pyperf

from spambayes import hammie, mboxutils


__author__ = "skip.montanaro@gmail.com (Skip Montanaro)"
__contact__ = "collinwinter@google.com (Collin Winter)"


def bench_spambayes(ham_classifier, messages):
    for msg in messages:
        ham_classifier.score(msg)


if __name__ == "__main__":
    runner = pyperf.Runner()
    runner.metadata['description'] = "Run the SpamBayes benchmark."

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    mailbox = os.path.join(data_dir, "spambayes_mailbox")
    ham_data = os.path.join(data_dir, "spambayes_hammie.pkl")
    messages = list(mboxutils.getmbox(mailbox))
    ham_classifier = hammie.open(ham_data, "pickle", "r")

    runner.bench_func('spambayes', bench_spambayes, ham_classifier, messages)
github giampaolo / psutil / scripts / internal / bench_oneshot_2.py View on Github external
def main():
    runner = pyperf.Runner()

    args = runner.parse_args()
    if not args.worker:
        print("%s methods involved on platform %r (psutil %s):" % (
            len(names), sys.platform, psutil.__version__))
        for name in sorted(names):
            print("    " + name)

    runner.bench_func("normal", call_normal)
    runner.bench_func("oneshot", call_oneshot)
github zopefoundation / zope.interface / benchmarks / micro.py View on Github external
for _ in range(loops):
        for _ in range(INNER):
            for iface in ifaces:
                iface(inst)
    return pyperf.perf_counter() - t0


def bench_iface_call_no_conform_provided_wide(loops):
    return _bench_iface_call_simple(loops, WideInheritance())


def bench_iface_call_no_conform_provided_deep(loops):
    return _bench_iface_call_simple(loops, DeepestInheritance())


runner = pyperf.Runner()

runner.bench_time_func(
    'call interface (provides; deep)',
    bench_iface_call_no_conform_provided_deep,
    inner_loops=INNER * len(ifaces)
)

runner.bench_time_func(
    'call interface (provides; wide)',
    bench_iface_call_no_conform_provided_wide,
    inner_loops=INNER * len(ifaces)
)

runner.bench_time_func(
    'call interface (no alternate, no conform, not provided)',
    bench_iface_call_no_conform_no_alternate_not_provided,
github python / pyperformance / pyperformance / benchmarks / bm_sqlalchemy_imperative.py View on Github external
# do 'npeople' queries per insert
        for i in range(npeople):
            cur = Person.select()
            cur.execute()

        total_dt += (pyperf.perf_counter() - t0)

    return total_dt


def add_cmdline_args(cmd, args):
    cmd.extend(("--rows", str(args.rows)))


if __name__ == "__main__":
    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
    runner.metadata['description'] = ("SQLAlchemy Imperative benchmark "
                                      "using SQLite")
    runner.argparser.add_argument("--rows", type=int, default=100,
                                  help="Number of rows (default: 100)")

    args = runner.parse_args()
    runner.bench_time_func('sqlalchemy_imperative',
                           bench_sqlalchemy, args.rows)
github python / pyperformance / pyperformance / benchmarks / bm_sympy.py View on Github external
func()
        dt += (timer() - t0)

    return dt


BENCHMARKS = ("expand", "integrate", "sum", "str")


def add_cmdline_args(cmd, args):
    if args.benchmark:
        cmd.append(args.benchmark)


if __name__ == "__main__":
    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
    runner.metadata['description'] = "SymPy benchmark"
    runner.argparser.add_argument("benchmark", nargs='?',
                                  choices=BENCHMARKS)

    import gc
    gc.disable()

    args = runner.parse_args()
    if args.benchmark:
        benchmarks = (args.benchmark,)
    else:
        benchmarks = BENCHMARKS

    for bench in benchmarks:
        name = 'sympy_%s' % bench
        func = globals()['bench_' + bench]
github python / pyperformance / pyperformance / benchmarks / bm_spectral_norm.py View on Github external
for dummy in range(10):
            v = eval_AtA_times_u(u)
            u = eval_AtA_times_u(v)

        vBv = vv = 0

        for ue, ve in zip(u, v):
            vBv += ue * ve
            vv += ve * ve

    return pyperf.perf_counter() - t0


if __name__ == "__main__":
    runner = pyperf.Runner()
    runner.metadata['description'] = (
        'MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", '
        'Challenge #3.')
    runner.bench_time_func('spectral_norm', bench_spectral_norm)