Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model = CapsuleModel(x_dim=p.NUM_PIXELS, y_dim=p.NUM_LABELS, **vars(args))
# initializing local variables to maintain the best validation accuracy
# seen across epochs over the supervised training set
# and the corresponding testing set and the state of the networks
best_valid_acc, corresponding_test_acc = 0.0, 0.0
# run inference for a certain number of epochs
for i in range(model.epoch, args.num_epochs):
# if you want to limit the datasets' entry size
sizes = { "train": 1000, "dev": 100 }
# prepare data loaders
datasets, data_loaders = dict(), dict()
for mode in ["train", "dev"]:
datasets[mode] = Aspire(mode=mode, data_size=sizes[mode])
data_loaders[mode] = AudioDataLoader(datasets[mode], batch_size=args.batch_size,
num_workers=args.num_workers, shuffle=True,
use_cuda=args.use_cuda, pin_memory=True)
# train an epoch
model.train_epoch(data_loaders["train"])
logger.info(f"epoch {model.epoch:03d}: "
f"training loss {model.meter_loss.value()[0]:5.3f} "
f"training accuracy {model.meter_accuracy.value()[0]:6.3f}")
# validate
model.test(data_loaders["dev"])
logger.info(f"epoch {model.epoch:03d}: "
f"validating loss {model.meter_loss.value()[0]:5.3f} "
f"validating accuracy {model.meter_accuracy.value()[0]:6.3f}")
# update the best validation accuracy and the corresponding
# randomly choose a number of data_size
size = min(self.data_size, len(manifest))
self.entries = random.sample(self.entries, size)
if self.mode == "train_unsup":
self.entry_frames = [_samples2frames(int(e[2])) for e in self.entries]
else:
self.entry_frames = [int(e[4]) for e in self.entries]
logger.info(f"{len(self.entries)} entries, {sum(self.entry_frames)} frames are loaded.")
if __name__ == "__main__":
if False:
reconstruct_manifest(DATA_ROOT)
if True:
train_dataset = Aspire(mode="test")
loader = AudioDataLoader(train_dataset, batch_size=10, num_workers=4, shuffle=True)
print(f"num_workers={loader.num_workers}")
for i, data in enumerate(loader):
tensors, targets = data
#for tensors, targets in data:
print(tensors, targets)
if False:
import matplotlib
matplotlib.use('TkAgg')
matplotlib.interactive(True)
import matplotlib.pyplot as plt
for tensor, target in zip(tensors, targets):
tensor = tensor.view(-1, p.CHANNEL, p.WIDTH, p.HEIGHT)
t = np.arange(0, tensor.size(3)) / 8000
def train_ssvae(args):
if args.visualize:
from plot import visualize_setup, plot_samples, plot_tsne
visualize_setup(args.log_dir)
# batch_size: number of images (and labels) to be considered in a batch
ss_vae = SsVae(x_dim=p.NUM_PIXELS, y_dim=p.NUM_LABELS, **vars(args))
# if you want to limit the datasets' entry size
sizes = { "train_unsup": 200000, "train_sup": 1000, "dev": 1000 }
# prepare data loaders
datasets, data_loaders = dict(), dict()
for mode in ["train_unsup", "train_sup", "dev"]:
datasets[mode] = Aspire(mode=mode, data_size=sizes[mode])
data_loaders[mode] = AudioDataLoader(datasets[mode], batch_size=args.batch_size,
num_workers=args.num_workers, shuffle=True,
use_cuda=args.use_cuda, pin_memory=True)
# initializing local variables to maintain the best validation accuracy
# seen across epochs over the supervised training set
# and the corresponding testing set and the state of the networks
best_valid_acc, corresponding_test_acc = 0.0, 0.0
# run inference for a certain number of epochs
for i in range(ss_vae.epoch, args.num_epochs):
# get the losses for an epoch
avg_losses_sup, avg_losses_unsup = ss_vae.train_epoch(data_loaders)
# validate
validation_accuracy = ss_vae.get_accuracy(data_loaders["dev"], desc="validating")
conv_am = ConvAM(x_dim=p.NUM_PIXELS, y_dim=p.NUM_LABELS, **vars(args))
# initializing local variables to maintain the best validation accuracy
# seen across epochs over the supervised training set
# and the corresponding testing set and the state of the networks
best_valid_acc, corresponding_test_acc = 0.0, 0.0
# run inference for a certain number of epochs
for i in range(conv_am.epoch, args.num_epochs):
# if you want to limit the datasets' entry size
sizes = { "train": 10000, "dev": 100 }
# prepare data loaders
datasets, data_loaders = dict(), dict()
for mode in ["train", "dev"]:
datasets[mode] = Aspire(mode=mode, data_size=sizes[mode])
data_loaders[mode] = AudioDataLoader(datasets[mode], batch_size=args.batch_size,
num_workers=args.num_workers, shuffle=True,
use_cuda=args.use_cuda, pin_memory=True)
# get the losses for an epoch
avg_loss = conv_am.train_epoch(data_loaders["train"])
# validate
validation_accuracy = conv_am.get_accuracy(data_loaders["dev"], desc="validating")
logger.info(f"epoch {conv_am.epoch:03d}: "
f"avg_loss {avg_loss:7.3f} "
f"val_accuracy {validation_accuracy:5.3f}")
# update the best validation accuracy and the corresponding
# testing accuracy and the state of the parent module (including the networks)
if best_valid_acc < validation_accuracy:
best_valid_acc = validation_accuracy