How to use the pyinstrument.Profiler function in pyinstrument

To help you get started, we’ve selected a few pyinstrument examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joerick / pyinstrument / test / test_overflow.py View on Github external
def deep_profiler_session():
    profiler = Profiler()
    profiler.start()

    # give 120 frames for pyinstrument to do its work.
    recursion_depth = sys.getrecursionlimit() - current_stack_depth() - 120
    recurse(recursion_depth)

    profiler.stop()
    return profiler.last_session
github mikedh / trimesh / tests / regression.py View on Github external
setup.format(file_name),
                                         repeat=repeats,
                                         number=iterations))
            timings[test_name][file_name] = time_min / iterations

    result = {}

    result['cpu_info'] = subprocess.check_output(['cat', '/proc/cpuinfo'])
    result['baseline'] = baseline.tolist()
    result['timestamp'] = time.time()
    result['timings'] = timings
    '''

    import pyinstrument

    profiler = pyinstrument.Profiler()
    profiler.start()

    typical_application()

    profiler.stop()
    print(profiler.output_text(unicode=True, color=True))
github joerick / pyinstrument / test / test_profiler.py View on Github external
def test_json_output():
    with Profiler() as profiler:
        long_function_a()
        long_function_b()

    output_data = profiler.output(renderers.JSONRenderer(), root=True)

    output = json.loads(output_data)

    assert output['function'] == 'test_json_output'
    assert len(output['children']) == 2
github pywbem / pywbem / tests / manualtest / run_response_performance.py View on Github external
def execute_test_code(xml_string, profiler):
    """
    The test code to be executed. If a profiler is defined it is enabled
    just before the test code is executed and disabled just after the
    code is executed.
    """
    if profiler:
        if isinstance(profiler, cProfile.Profile):
            profiler.enable()
        elif isinstance(profiler, Profiler):
            profiler.start()

    # The code to be tested
    tt = _tupletree.xml_to_tupletree_sax(xml_string, "TestData")

    parse_cim(tt)

    if profiler:
        if isinstance(profiler, cProfile.Profile):
            profiler.disable()
        elif isinstance(profiler, Profiler):
            profiler.stop()
github joerick / pyinstrument / metrics / multi_overhead.py View on Github external
def time_pyinstrument_signal(function, repeats):
    timer = Timer(stmt=function)
    p = pyinstrument.Profiler()
    p.start()
    result = timer.repeat(number=repeats)
    p.stop()
    return result
github heronsystems / adeptRL / adept / scripts / p2p.py View on Github external
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    torch.backends.cudnn.benchmark = True
    agent = make_agent(network, device, env.engine, env.gpu_preprocessor, args)
    # construct container
    container = P2PWorker(agent, env, make_optimizer, args.nb_env, logger, summary_writer, args.summary_frequency,
                          save_interval=args.epoch_len, saver=saver, shared_seed=p2pseed,
                          synchronize_step_interval=args.synchronize_step_interval,
                          share_optimizer_params=args.share_optimizer_params)

    # Run the container
    if args.profile:
        try:
            from pyinstrument import Profiler
        except:
            raise ImportError('You must install pyinstrument to use profiling.')
        profiler = Profiler()
        profiler.start()
        container.run(10e3)
        profiler.stop()
        print(profiler.output_text(unicode=True, color=True))
    else:
        container.run(args.max_train_steps)
github heronsystems / adeptRL / scripts / attention.py View on Github external
def _weight_losses(self, loss_dict):
        return loss_dict['policy_loss'] + 0.5 * loss_dict['value_loss']


if __name__ == '__main__':
    os.environ["OMP_NUM_THREADS"] = "1"
    parser = base_parser()
    parser.add_argument('--name', default='attention', help='logdir/tensorboard name')
    parser.add_argument('--nb-head', default=1, type=int, help='number of attention heads')
    args = parser.parse_args()
    training_loop = TrainingLoop(args)

    if args.profile:
        from pyinstrument import Profiler

        profiler = Profiler()
        profiler.start()
        training_loop.run()
        profiler.stop()
        print(profiler.output_text(unicode=True, color=True))
    else:
        training_loop.run()
github joerick / pyinstrument / metrics / overflow.py View on Github external
from pyinstrument import Profiler

p = Profiler(use_signal=False)

p.start()

def func(num):
    if num == 0:
        return
    b = 0
    for x in range(1,100000):
        b += x

    return func(num - 1)

func(900)

p.stop()
github heronsystems / adeptRL / scripts / cnn.py View on Github external
def _weight_losses(self, loss_dict):
        return loss_dict['policy_loss'] + 0.5 * loss_dict['value_loss']


if __name__ == '__main__':
    os.environ["OMP_NUM_THREADS"] = "1"
    parser = base_parser()
    parser.add_argument('--name', default='cnn', help='logdir/tensorboard name')
    args = parser.parse_args()
    training_loop = TrainingLoop(args)

    if args.profile:
        from pyinstrument import Profiler

        profiler = Profiler()
        profiler.start()
        training_loop.run()
        profiler.stop()
        print(profiler.output_text(unicode=True, color=True))
    else:
        training_loop.run()