Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
steps += 1
print "Train Loss", sum_loss/steps
#print "Real Epoch", train_dataloader.epoch
sol.eval()
sum_loss = 0.0
steps = 0.0
for step_i, x in enumerate(test_dataloader):
img = Variable(x['img'].type(dtype), requires_grad=False, volatile=True)
sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False, volatile=True)
predictions = sol(img)
predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
loss = alignment_loss(predictions, sol_gt, x['label_sizes'], alpha_alignment, alpha_backprop)
### Write images to file to visualization
#org_img = img[0].data.cpu().numpy().transpose([2,1,0])
#org_img = ((org_img + 1)*128).astype(np.uint8)
#org_img = org_img.copy()
#org_img = drawing.draw_sol_torch(predictions, org_img)
# cv2.imwrite("data/sol_val_2/{}.png".format(step_i), org_img)
sum_loss += loss.data[0]
steps += 1
cnt_since_last_improvement += 1
if lowest_loss > sum_loss/steps:
cnt_since_last_improvement = 0
lowest_loss = sum_loss/steps
print "Saving Best"
break
sol.train()
sum_loss = 0.0
steps = 0.0
start_time = time.time()
for step_i, x in enumerate(train_dataloader):
img = Variable(x['img'].type(dtype), requires_grad=False)
sol_gt = None
if x['sol_gt'] is not None:
sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)
predictions = sol(img)
predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
loss = alignment_loss(predictions, sol_gt, x['label_sizes'], alpha_alignment, alpha_backprop)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.data[0]
steps += 1
print "Train Loss", sum_loss/steps
print "Real Epoch", train_dataloader.epoch
print "Time:", time.time() - start_time
print "Train Time:",(time.time() - init_training_time), "Allowed Time:", allowed_training_time
sol.eval()
sum_loss = 0.0
steps = 0.0
start_time = time.time()
for step_i, x in enumerate(test_dataloader):
img = Variable(x['img'].type(dtype), requires_grad=False)
sol_gt = None
if x['sol_gt'] is not None:
sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)
predictions = sol(img)
predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
loss = alignment_loss(predictions, sol_gt, x['label_sizes'], alpha_alignment, alpha_backprop)
sum_loss += loss.data[0]
steps += 1
if epoch == 0:
print "First Validation Step Complete"
print "Benchmark Validation CER:", sum_loss/steps
lowest_loss = sum_loss/steps
sol, lf, hw = init_model(config, sol_dir='current', only_load='sol')
optimizer = torch.optim.Adam(sol.parameters(), lr=train_config['sol']['learning_rate'])
optim_path = os.path.join(train_config['snapshot']['current'], "sol_optim.pt")
if os.path.exists(optim_path):
print "Loading Optim Settings"
optimizer.load_state_dict(safe_load.torch_state(optim_path))
else:
steps = 0.0
for step_i, x in enumerate(train_dataloader):
img = Variable(x['img'].type(dtype), requires_grad=False)
sol_gt = None
if x['sol_gt'] is not None:
# This is needed because if sol_gt is None it means that there
# no GT positions in the image. The alignment loss will handle,
# it correctly as None
sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)
predictions = sol(img)
predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
loss = alignment_loss(predictions, sol_gt, x['label_sizes'], alpha_alignment, alpha_backprop)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.data[0]
steps += 1
print "Train Loss", sum_loss/steps
#print "Real Epoch", train_dataloader.epoch
sol.eval()
sum_loss = 0.0
steps = 0.0
for step_i, x in enumerate(test_dataloader):