How to use the poutyne.framework.callbacks.CSVLogger function in Poutyne

To help you get started, we’ve selected a few Poutyne examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github GRAAL-Research / poutyne / tests / framework / callbacks / test_logger.py View on Github external
def test_logging_append(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = CSVLogger(self.csv_filename)
        history = self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[logger])
        logger = CSVLogger(self.csv_filename, append=True)
        history2 = self.model.fit_generator(train_gen,
                                            valid_gen,
                                            epochs=20,
                                            steps_per_epoch=5,
                                            initial_epoch=10,
                                            callbacks=[logger])
        self._test_logging(history + history2)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_logger.py View on Github external
def test_logging(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = CSVLogger(self.csv_filename)
        history = self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[logger])
        self._test_logging(history)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_logger.py View on Github external
def test_logging_with_batch_granularity(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = CSVLogger(self.csv_filename, batch_granularity=True)
        history = History()
        self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[logger, history])
        self._test_logging(history.history)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_logger.py View on Github external
def test_logging_append(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = CSVLogger(self.csv_filename)
        history = self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[logger])
        logger = CSVLogger(self.csv_filename, append=True)
        history2 = self.model.fit_generator(train_gen,
                                            valid_gen,
                                            epochs=20,
                                            steps_per_epoch=5,
                                            initial_epoch=10,
                                            callbacks=[logger])
        self._test_logging(history + history2)
github GRAAL-Research / poutyne / poutyne / framework / experiment.py View on Github external
callbacks = [] if callbacks is None else callbacks
        lr_schedulers = [] if lr_schedulers is None else lr_schedulers

        # Copy callback list.
        callbacks = list(callbacks)

        tensorboard_writer = None
        initial_epoch = 1
        if self.logging:
            if not os.path.exists(self.directory):
                os.makedirs(self.directory)

            # Restarting optimization if needed.
            initial_epoch = self._load_epoch_state(lr_schedulers)

            callbacks += [CSVLogger(self.log_filename, separator='\t', append=initial_epoch != 1)]

            callbacks += self._init_model_restoring_callbacks(initial_epoch, save_every_epoch)
            callbacks += [
                ModelCheckpoint(self.model_checkpoint_filename,
                                verbose=False,
                                temporary_filename=self.model_checkpoint_tmp_filename)
            ]
            callbacks += [
                OptimizerCheckpoint(self.optimizer_checkpoint_filename,
                                    verbose=False,
                                    temporary_filename=self.optimizer_checkpoint_tmp_filename)
            ]

            # We save the last epoch number after the end of the epoch so that the
            # _load_epoch_state() knows which epoch to restart the optimization.
            callbacks += [