Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model, optimizer = manager.model, manager.optimizer
logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))
if use_cuda:
model_mem_allocation = torch.cuda.memory_allocated(device)
logger.info('Model memory allocation: {}'.format(model_mem_allocation))
else:
model_mem_allocation = None
writer = SummaryWriter(manager.log_dir)
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
act_mem_activations = AverageMeter()
ceriterion = loss
# ensure train_loader enumerates to max_epoch
max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
end = time.time()
for ind, (x, label) in enumerate(train_loader):
iteration = ind + 1 + start_iter
if iteration > max_iterations:
logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
break
if iteration == 40000 or iteration == 60000:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
"""train loop"""
device = torch.device('cpu' if not use_cuda else 'cuda')
model, optimizer = manager.model, manager.optimizer
logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))
if use_cuda:
model_mem_allocation = torch.cuda.memory_allocated(device)
logger.info('Model memory allocation: {}'.format(model_mem_allocation))
else:
model_mem_allocation = None
writer = SummaryWriter(manager.log_dir)
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
act_mem_activations = AverageMeter()
ceriterion = loss
# ensure train_loader enumerates to max_epoch
max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
end = time.time()
for ind, (x, label) in enumerate(train_loader):
iteration = ind + 1 + start_iter
if iteration > max_iterations:
logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
break
device = torch.device('cpu' if not use_cuda else 'cuda')
model, optimizer = manager.model, manager.optimizer
logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))
if use_cuda:
model_mem_allocation = torch.cuda.memory_allocated(device)
logger.info('Model memory allocation: {}'.format(model_mem_allocation))
else:
model_mem_allocation = None
writer = SummaryWriter(manager.log_dir)
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
act_mem_activations = AverageMeter()
ceriterion = loss
# ensure train_loader enumerates to max_epoch
max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
end = time.time()
for ind, (x, label) in enumerate(train_loader):
iteration = ind + 1 + start_iter
if iteration > max_iterations:
logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
break
if iteration == 40000 or iteration == 60000:
for param_group in optimizer.param_groups:
device = torch.device('cpu' if not use_cuda else 'cuda')
model, optimizer = manager.model, manager.optimizer
logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))
if use_cuda:
model_mem_allocation = torch.cuda.memory_allocated(device)
logger.info('Model memory allocation: {}'.format(model_mem_allocation))
else:
model_mem_allocation = None
writer = SummaryWriter(manager.log_dir)
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
act_mem_activations = AverageMeter()
ceriterion = loss
# ensure train_loader enumerates to max_epoch
max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
end = time.time()
for ind, (x, label) in enumerate(train_loader):
iteration = ind + 1 + start_iter
if iteration > max_iterations:
logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
break
if iteration == 40000 or iteration == 60000:
def validate(model, ceriterion, val_loader, device):
"""validation sub-loop"""
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
with torch.no_grad():
for x, label in val_loader:
x, label = x.to(device), label.to(device)
vx, vl = x, label
score = model(vx)
loss = ceriterion(score, vl)
prec1 = accuracy(score.data, label)
losses.update(loss.item(), x.size(0))
top1.update(prec1[0][0], x.size(0))
def validate(model, ceriterion, val_loader, device):
"""validation sub-loop"""
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
with torch.no_grad():
for x, label in val_loader:
x, label = x.to(device), label.to(device)
vx, vl = x, label
score = model(vx)
loss = ceriterion(score, vl)
prec1 = accuracy(score.data, label)
losses.update(loss.item(), x.size(0))
top1.update(prec1[0][0], x.size(0))
batch_time.update(time.time() - end)
def validate(model, ceriterion, val_loader, device):
"""validation sub-loop"""
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
with torch.no_grad():
for x, label in val_loader:
x, label = x.to(device), label.to(device)
vx, vl = x, label
score = model(vx)
loss = ceriterion(score, vl)
prec1 = accuracy(score.data, label)
losses.update(loss.item(), x.size(0))
top1.update(prec1[0][0], x.size(0))
batch_time.update(time.time() - end)
end = time.time()