Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
early_stop_callback=early_stopping,
default_save_path=exp_dir,
gpus=gpus,
distributed_backend='dp',
train_percent_check=1.0, # Useful for fast experiment
gradient_clip_val=200,)
trainer.fit(system)
with open(os.path.join(exp_dir, "best_k_models.json"), "w") as f:
json.dump(checkpoint.best_k_models, f, indent=0)
# Save last model for convenience
torch.save(system.model.state_dict(),
os.path.join(exp_dir, 'checkpoints/final.pth'))
class ChimeraSystem(System):
def __init__(self, *args, mask_mixture=True, **kwargs):
super().__init__(*args, **kwargs)
self.mask_mixture = mask_mixture
def common_step(self, batch, batch_nb, train=False):
inputs, targets, masks = self.unpack_data(batch)
embeddings, est_masks = self(inputs)
spec = take_mag(self.model.encoder(inputs.unsqueeze(1)))
if self.mask_mixture:
est_masks = est_masks * spec.unsqueeze(1)
masks = masks * spec.unsqueeze(1)
loss, loss_dic = self.loss_func(embeddings, targets, est_src=est_masks,
target_src=masks, mix_spec=spec)
return loss, loss_dic
def training_step(self, batch, batch_nb):
exp_dir = conf['main_args']['exp_dir']
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
# loss_class = PITLossContainer(pairwise_neg_sisdr, n_src=train_set.n_src)
# Checkpointing callback can monitor any quantity which is returned by
# validation step, defaults to val_loss here (see System).
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_best_only=False)
# New PL version will come the 7th of december / will have save_top_k
system = System(model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
config=conf)
trainer = pl.Trainer(max_nb_epochs=conf['training']['epochs'],
checkpoint_callback=checkpoint,
default_save_path=exp_dir,
gpus=conf['main_args']['gpus'],
distributed_backend='dp')
trainer.fit(system)
model, optimizer = make_model_and_optimizer(conf)
# Define scheduler
scheduler = None
if conf['training']['half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir = conf['main_args']['exp_dir']
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, mode='pairwise')
system = System(model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
scheduler=scheduler, config=conf)
# Define callbacks
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=5, verbose=1)
early_stopping = False
if conf['training']['early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
if not torch.cuda.is_available():
print('No available GPU were found, set gpus to None')
conf['main_args']['gpus'] = None
model, optimizer = make_model_and_optimizer(conf)
# Define scheduler
scheduler = None
if conf['training']['half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir = conf['main_args']['exp_dir']
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, mode='pairwise')
system = System(model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
scheduler=scheduler, config=conf)
# Define callbacks
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=5, verbose=1)
early_stopping = False
if conf['training']['early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
if not torch.cuda.is_available():
print('No available GPU were found, set gpus to None')
conf['main_args']['gpus'] = None
# Define scheduler
scheduler = None
if conf['training']['half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir = conf['main_args']['exp_dir']
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(lambda x, y: pairwise_neg_sisdr(x, y).mean(-1), pit_from='pw_mtx')
system = System(model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
scheduler=scheduler, config=conf)
# Define callbacks
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=5, verbose=1)
early_stopping = False
if conf['training']['early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
gpus = -1 if torch.cuda.is_available() else None
trainer = pl.Trainer(max_nb_epochs=conf['training']['epochs'],
checkpoint_callback=checkpoint,
from asteroid.engine.system import System as SystemCore
class SystemTwoStep(SystemCore):
"""
Inherits from the core system class and overrides the methods for the
common steps as well the train and evaluation steps for the two-step
source separation.
Args:
model (torch.nn.Module): Instance of model.
optimizer (torch.optim.Optimizer): Instance or list of optimizers.
loss_func (callable): Loss function with signature
(est_targets, targets).
train_loader (torch.utils.data.DataLoader): Training dataloader.
val_loader (torch.utils.data.DataLoader): Validation dataloader.
scheduler (torch.optim.lr_scheduler._LRScheduler): Instance, or list
of learning rate schedulers.
config: Anything to be saved with the checkpoints during training.
The config dictionary to re-instantiate the run for example.
optimizer = make_optimizer(model.parameters(), **conf['optim'])
# Define scheduler
scheduler = None
if conf['training']['half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir = conf['main_args']['exp_dir']
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
system = System(model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
scheduler=scheduler, config=conf)
# Define callbacks
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=5, verbose=1)
early_stopping = False
if conf['training']['early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
gpus = -1 if torch.cuda.is_available() else None
trainer = pl.Trainer(max_epochs=conf['training']['epochs'],
checkpoint_callback=checkpoint,