How to use the pyprind.prog_bar function in PyPrind

To help you get started, we’ve selected a few PyPrind examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rasbt / pyprind / tests / test_progress_bar.py View on Github external
def test_generator():
    for i in pyprind.prog_bar(range(n), stream=sys.stdout):
        time.sleep(sleeptime)
github rasbt / pyprind / test / custom_stream.py View on Github external
import pyprind

n = 1000000
mbar = pyprind.ProgBar(n, stream=sys.stdout)
for i in range(n):
    mbar.update()

mper = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
    mper.update()

mbar2 = pyprind.ProgBar(n, stream='test')
for i in range(n):
    mbar2.update()

for i in pyprind.prog_bar(range(n), stream=sys.stdout):
    # do something
    pass

for i in pyprind.prog_percent(range(n), stream=sys.stdout):
    # do something
    pass

for i in pyprind.prog_bar(range(n), stream='test'):
    # do something
    pass
github mplewis / expose.py / expose.py View on Github external
else:
        media_lc = 'image'
        media_uc = 'Image'
        src_media = src_images
        media_targets = img_targets

    l.info('Generating {} jobs...'.format(media_lc))
    jobs = []
    skipped = 0

    si = src_media(cfg)
    if not si:
        l.debug('No source {}s'.format(media_lc))
        return

    for src in pyprind.prog_bar(si):
        j, s = media_targets(cfg, src, dry_run)
        jobs.extend(j)
        skipped += s

    l.info('{} jobs: running {}, skipped {}, total {}'
           .format(media_uc, len(jobs), skipped, len(jobs) + skipped))

    return jobs
github Breakend / RLSSContinuousControlTutorial / code / ddpg / ddpg.py View on Github external
sess.run(tf.global_variables_initializer())
            itr = 0
            path_length = 0
            path_return = 0
            terminal = False
            initial = False
            observation = self.env.reset()

            with tf.variable_scope("sample_policy"):
                sample_policy = Serializable.clone(self.policy)

            for epoch in range(self.n_epochs):
                logger.push_prefix('epoch #%d | ' % epoch)
                logger.log("Training started")
                train_qf_itr, train_policy_itr = 0, 0
                for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
                    # Execute policy
                    if terminal:
                        # Note that if the last time step ends an episode, the very
                        # last state and observation will be ignored and not added
                        # to the replay pool
                        observation = self.env.reset()
                        sample_policy.reset()
                        self.es_path_returns.append(path_return)
                        path_length = 0
                        path_return = 0
                        initial = True
                    else:
                        initial = False
                    action = self.es.get_action(itr, observation, policy=sample_policy)  # qf=qf)

                    next_observation, reward, terminal, _ = self.env.step(action)
github rlworkgroup / garage / garage / theano / optimizers / first_order_optimizer.py View on Github external
if extra_inputs is None:
            extra_inputs = tuple()

        last_loss = f_loss(*(tuple(inputs) + extra_inputs))

        start_time = time.time()

        dataset = BatchDataset(
            inputs,
            self._batch_size,
            extra_inputs=extra_inputs
            #, randomized=self._randomized
        )

        itr = 0
        for epoch in pyprind.prog_bar(list(range(self._max_epochs))):
            for batch in dataset.iterate(update=True):
                f_opt(*batch)
                if yield_itr is not None and (itr % (yield_itr + 1)) == 0:
                    yield
                itr += 1

            new_loss = f_loss(*(tuple(inputs) + extra_inputs))
            if self._verbose:
                logger.log("Epoch %d, loss %s" % (epoch, new_loss))

            if self._callback or callback:
                elapsed = time.time() - start_time
                callback_args = dict(
                    loss=new_loss,
                    params=self._target.get_param_values(trainable=True)
                    if self._target else None,
github hpi-xnor / BMXNet-v2 / example / reinforcement-learning / ddpg / ddpg.py View on Github external
memory = ReplayMem(
            obs_dim=self.env.observation_space.flat_dim,
            act_dim=self.env.action_space.flat_dim,
            memory_size=self.memory_size)

        itr = 0
        path_length = 0
        path_return = 0
        end = False
        obs = self.env.reset()

        for epoch in range(self.n_epochs):
            logger.push_prefix("epoch #%d | " % epoch)
            logger.log("Training started")
            for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
                # run the policy
                if end:
                    # reset the environment and stretegy when an episode ends
                    obs = self.env.reset()
                    self.strategy.reset()
                    # self.policy.reset()
                    self.strategy_path_returns.append(path_return)
                    path_length = 0
                    path_return = 0
                # note action is sampled from the policy not the target policy
                act = self.strategy.get_action(obs, self.policy)
                nxt, rwd, end, _ = self.env.step(act)

                path_length += 1
                path_return += rwd
github Riashat / Bayesian-Exploration-Deep-RL / ddpg_bayesian_thompson.py View on Github external
terminal = False
            initial = False
            observation = self.env.reset()

            with tf.variable_scope("sample_policy"):
                sample_policy = Serializable.clone(self.policy)

            for epoch in range(self.n_epochs):
                logger.push_prefix('epoch #%d | ' % epoch)
                logger.log("Training started")
                train_qf_itr, train_policy_itr = 0, 0

                #sample a policy function from the posterior at every episode
                #move in the entire episode with the sampled policy function?

                for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
                    # Execute policy
                    if terminal:  # or path_length > self.max_path_length:
                        # Note that if the last time step ends an episode, the very
                        # last state and observation will be ignored and not added
                        # to the replay pool
                        observation = self.env.reset()
                        self.es.reset()
                        sample_policy.reset()
                        self.es_path_returns.append(path_return)
                        path_length = 0
                        path_return = 0
                        initial = True
                    else:
                        initial = False
                        
                    action = self.es.get_action(itr, observation, policy=sample_policy)  # qf=qf)
github OcelotProject / Ocelot / draft_spold2_writer.py View on Github external
except ValueError:
                        signature = []
                    except:
                        1/0#fix me now
                    if 'is_child' in signature:
                        d[field] = constructors[sel['Python type']][0](c, is_child = is_child)
                    else:
                        d[field] = constructors[sel['Python type']][0](c)
            
    return d

if __name__ == '__main__':
    folder = r'C:\Dropbox (ecoinvent)\ei-int\technical\releases\3.4\Undefined\datasets'
    result_folder = r'C:\Dropbox (ecoinvent)\ei-int\technical\releases\3.4\Undefined\test'
    filelist = utils.build_file_list(folder)
    for filename in pyprind.prog_bar(filelist):
        f = TestObject('', '', folder = folder, filename = filename)
#        f.write_to_spold(result_folder)
        break