How to use the lab.logger.write function in lab

To help you get started, we’ve selected a few lab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vpj / lab / lab / training_loop.py View on Github external
def __finish(self):
        try:
            signal.signal(signal.SIGINT, self.old_handler)
        except ValueError:
            pass
        logger.write()
        logger.new_line()
        if self.__is_save_models:
            logger.save_checkpoint()
github vpj / lab / backend / samples / getting_started.py View on Github external
with logger.section("process_samples", is_silent=True):
                    time.sleep(0.5)

                # A third section with an inner loop
                with logger.section("train", total_steps=100):
                    # Let it run for multiple iterations.
                    # We'll track the progress of that too
                    for i in range(100):
                        time.sleep(0.01)
                        # Progress is tracked manually unlike in the top level iterator.
                        # The progress updates do not have to be sequential.
                        logger.progress(i + 1)

                # Log stored values.
                # This will output to the console and write TensorBoard summaries.
                logger.write()

                # Store progress in the trials file and in the python code as a comment
                if (global_step + 1) % 10 == 0:
                    logger.save_progress()

                # By default we will overwrite the same console line.
                # `new_line` makes it go to the next line.
                # This helps keep the console output concise.
                if (global_step + 1) % 10 == 0:
                    logger.new_line()
        except KeyboardInterrupt:
            logger.finish_loop()
            logger.new_line()
            logger.log(f"Stopping the training at {global_step} and saving checkpoints")
            break
github vpj / lab / samples / mnist_loop.py View on Github external
data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            self.optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.add_global_step()

            # Print output to the console
            if i % self.train_log_interval == 0:
                # Output the indicators
                logger.write()
github vpj / lab / samples / tutorial / a_logger.py View on Github external
def loop():
    logger.info(a=2, b=1)

    logger.add_indicator('loss_ma', IndicatorType.queue, IndicatorOptions(queue_size=10))
    for i in range(10):
        logger.add_global_step(1)
        logger.store(loss=100 / (i + 1), loss_ma=100 / (i + 1))
        logger.write()
        if (i + 1) % 2 == 0:
            logger.new_line()

        time.sleep(2)
github vpj / lab / samples / mnist_configs.py View on Github external
data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            self.optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.add_global_step()

            # Print output to the console
            if i % self.log_interval == 0:
                # Output the indicators
                logger.write()
github vpj / lab / lab / training_loop.py View on Github external
def __next__(self):
        if self.__signal_received is not None:
            logger.log('\nKilling Loop.',
                       color=colors.Color.red)
            logger.finish_loop()
            self.__finish()
            raise StopIteration("SIGINT")

        try:
            epoch = next(self.__loop)
        except StopIteration as e:
            self.__finish()
            raise e

        if self.__is_interval(epoch, self.__log_write_interval):
            logger.write()
        if self.__is_interval(epoch, self.__log_new_line_interval):
            logger.new_line()

        if (self.__is_save_models and
                self.__is_interval(epoch, self.__save_models_interval)):
            logger.save_checkpoint()

        return epoch
github vpj / lab / backend / samples / mnist_pytorch.py View on Github external
optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.progress(batch_idx + 1)
            logger.set_global_step(epoch * len(train_loader) + batch_idx)

            # Print output to the console
            if batch_idx % args.log_interval == 0:
                # Output the indicators
                logger.write()