How to use the pyprind.ProgBar function in PyPrind

To help you get started, we’ve selected a few PyPrind examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github anhaidgroup / deepmatcher / deepmatcher / runner.py View on Github external
print('* Number of trainable parameters:', tally_parameters(model))

        epoch_str = 'Epoch {0:d}'.format(epoch + 1)
        print('===> ', run_type, epoch_str)
        batch_end = time.time()

        # The tqdm-bar for Jupyter notebook is under development.
        if progress_style == 'tqdm-bar':
            pbar = tqdm(
                total=len(run_iter) // log_freq,
                bar_format='{l_bar}{bar}{postfix}',
                file=sys.stdout)

        # Use the pyprind bar as the default progress bar.
        if progress_style == 'bar':
            pbar = pyprind.ProgBar(len(run_iter) // log_freq, bar_char='█', width=30)

        for batch_idx, batch in enumerate(run_iter):
            batch_start = time.time()
            datatime += batch_start - batch_end

            output = model(batch)

            # from torchviz import make_dot, make_dot_from_trace
            # dot = make_dot(output.mean(), params=dict(model.named_parameters()))
            # pdb.set_trace()

            loss = float('NaN')
            if criterion:
                loss = criterion(output, getattr(batch, label_attr))

            if hasattr(batch, label_attr):
github jonasrothfuss / ProMP / maml_zoo / samplers / maml_sampler.py View on Github external
log (boolean): whether to log sampling times
            log_prefix (str) : prefix for logger

        Returns: 
            (dict) : A dict of paths of size [meta_batch_size] x (batch_size) x [5] x (max_path_length)
        """

        # initial setup / preparation
        paths = OrderedDict()
        for i in range(self.meta_batch_size):
            paths[i] = []

        n_samples = 0
        running_paths = [_get_empty_running_paths_dict() for _ in range(self.vec_env.num_envs)]

        pbar = ProgBar(self.total_samples)
        policy_time, env_time = 0, 0

        policy = self.policy
        policy.reset(dones=[True] * self.meta_batch_size)

        # initial reset of envs
        obses = self.vec_env.reset()
        
        while n_samples < self.total_samples:
            
            # execute policy
            t = time.time()
            obs_per_task = np.split(np.asarray(obses), self.meta_batch_size)
            actions, agent_infos = policy.get_actions(obs_per_task)
            policy_time += time.time() - t
github rlworkgroup / garage / garage / tf / optimizers / first_order_optimizer.py View on Github external
if extra_inputs is None:
                extra_inputs = tuple()

            last_loss = f_loss(*(tuple(inputs) + extra_inputs))

            start_time = time.time()

            dataset = BatchDataset(
                inputs, self._batch_size, extra_inputs=extra_inputs)

            sess = tf.get_default_session()

            for epoch in range(self._max_epochs):
                if self._verbose:
                    logger.log("Epoch %d" % (epoch))
                    progbar = pyprind.ProgBar(len(inputs[0]))

                for batch in dataset.iterate(update=True):
                    sess.run(self._train_op,
                             dict(list(zip(self._input_vars, batch))))
                    if self._verbose:
                        progbar.update(len(batch[0]))

                if self._verbose:
                    if progbar.active:
                        progbar.stop()

                new_loss = f_loss(*(tuple(inputs) + extra_inputs))

                if self._verbose:
                    logger.log("Epoch: %d | Loss: %f" % (epoch, new_loss))
                if self._callback or callback:
github anhaidgroup / py_stringsimjoin / py_stringsimjoin / matcher / apply_matcher.py View on Github external
rtable_dict = build_dict_from_table(rtable, r_key_attr_index,
                                        r_match_attr_index, remove_null=False)

    # Find indices of l_key_attr and r_key_attr in candset
    candset_columns = list(candset.columns.values)
    candset_l_key_attr_index = candset_columns.index(candset_l_key_attr)
    candset_r_key_attr_index = candset_columns.index(candset_r_key_attr)

    comp_fn = COMP_OP_MAP[comp_op]
    has_output_attributes = (l_out_attrs is not None or
                             r_out_attrs is not None) 

    output_rows = []

    if show_progress:
        prog_bar = pyprind.ProgBar(len(candset))

    tokenize_flag = False
    if tokenizer is not None:
        tokenize_flag =  True
        use_cache = False
        # check if we have cached the tokens.
        if l_tokens is not None and r_tokens is not None:
            use_cache = True

    for candset_row in candset.itertuples(index = False):
        l_id = candset_row[candset_l_key_attr_index]
        r_id = candset_row[candset_r_key_attr_index]

        l_row = ltable_dict[l_id]
        r_row = rtable_dict[r_id]
github rasbt / pyprind / examples / ex1_progress_bar_stderr.py View on Github external
def example_1():
    n = 1000000
    my_bar = pyprind.ProgBar(n, width=40, stream=2)
    for i in range(n):
        my_bar.update()
github rlworkgroup / garage / src / garage / tf / optimizers / first_order_optimizer.py View on Github external
extra_inputs = tuple()

        last_loss = f_loss(*(tuple(inputs) + extra_inputs))

        start_time = time.time()

        dataset = BatchDataset(inputs,
                               self._batch_size,
                               extra_inputs=extra_inputs)

        sess = tf.compat.v1.get_default_session()

        for epoch in range(self._max_epochs):
            if self._verbose:
                logger.log('Epoch {}'.format(epoch))
                progbar = pyprind.ProgBar(len(inputs[0]))

            for batch in dataset.iterate(update=True):
                sess.run(self._train_op,
                         dict(list(zip(self._input_vars, batch))))
                if self._verbose:
                    progbar.update(len(batch[0]))

            if self._verbose:
                if progbar.active:
                    progbar.stop()

            new_loss = f_loss(*(tuple(inputs) + extra_inputs))

            if self._verbose:
                logger.log('Epoch: {} | Loss: {}'.format(epoch, new_loss))
            if self._callback or callback:
github mplewis / expose.py / expose.py View on Github external
"""
    if not jobs:
        return
    if is_video:
        wrapper = convert_video_wrap
        media_type = 'video'
    else:
        wrapper = convert_image_wrap
        media_type = 'image'
    l.info('Processing {}s...'.format(media_type))
    with Pool() as pool:
        manager = Manager()
        queue = manager.Queue()
        wrapped_jobs = [(queue, j) for j in jobs]
        total = len(wrapped_jobs)
        bar = pyprind.ProgBar(total)
        pool.map_async(wrapper, wrapped_jobs)
        while total > 0:
            queue.get()
            bar.update()
            total -= 1
github lepisma / blackbird / utils / beetsplug / blackbird.py View on Github external
def features_func(lib, opts, args):
            filtered_items = lib.items(decargs(args))

            if len(filtered_items) == 0:
                print("Query didn't match any item")
                return

            seq_features_file = config["blackbird"]["features"].get("unicode")
            seq_features = cPickle.load(open(seq_features_file, "rb"))

            print("Finding features...")
            bar = pyprind.ProgBar(len(filtered_items))
            for item in filtered_items:
                if item.id not in seq_features:
                    try:
                        data = get_mfcc(item.path)
                        seq_features[item.id] = data
                    except Exception as e:
                        print(e)
                bar.update()

            print("Saving data...")
            cPickle.dump(seq_features,
                         open(seq_features_file, "wb"),
                         protocol=cPickle.HIGHEST_PROTOCOL)