Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in logger.iterator("Test", self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
# Add test loss and accuracy to logger
logger.store(test_loss=test_loss / len(self.test_loader.dataset))
logger.store(accuracy=correct / len(self.test_loader.dataset))
def _test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in logger.iterator("Test", self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
# Add test loss and accuracy to logger
logger.store(test_loss=test_loss / len(self.test_loader.dataset))
logger.store(accuracy=correct / len(self.test_loader.dataset))
def _test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in logger.iterator("Test", self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
# Add test loss and accuracy to logger
logger.store(test_loss=test_loss / len(self.test_loader.dataset))
logger.store(accuracy=correct / len(self.test_loader.dataset))
def _test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in logger.iterator("Test", self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
# Add test loss and accuracy to logger
logger.store(test_loss=test_loss / len(self.test_loader.dataset))
logger.store(accuracy=correct / len(self.test_loader.dataset))
def test(session: tf.Session, loss_value, accuracy_value, batches):
with logger.section("Test", total_steps=batches):
test_loss = 0
correct = 0
batch_idx = -1
while True:
batch_idx += 1
try:
l, a = session.run([loss_value, accuracy_value])
test_loss += l
correct += a
except tf.errors.OutOfRangeError:
break
logger.progress(batch_idx + 1)
logger.store(test_loss=test_loss / batches)
logger.store(accuracy=correct / batches)
with logger.section("Test", total_steps=len(test_loader)):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
logger.progress(batch_idx + 1)
# Add test loss and accuracy to logger
logger.store(test_loss=test_loss / len(test_loader.dataset))
logger.store(accuracy=correct / len(test_loader.dataset))
# A sample monitored section inside iterator
with logger.section("sample"):
time.sleep(0.5)
# A silent section is used only to organize code.
# It produces no output
with logger.section("logging", is_silent=True):
# Store values
logger.store(
reward=global_step / 3.0,
fps=12
)
# Store more values
for i in range(global_step, global_step + 10):
logger.store('loss', i)
logger.store(advantage_reward=(i, i * 2))
# Another silent section
with logger.section("process_samples", is_silent=True):
time.sleep(0.5)
# A third section with an inner loop
with logger.section("train", total_steps=100):
# Let it run for multiple iterations.
# We'll track the progress of that too
for i in range(100):
time.sleep(0.01)
# Progress is tracked manually unlike in the top level iterator.
# The progress updates do not have to be sequential.
logger.progress(i + 1)
# Log stored values.
with logger.section("Test", total_steps=batches):
test_loss = 0
correct = 0
batch_idx = -1
while True:
batch_idx += 1
try:
l, a = session.run([loss_value, accuracy_value])
test_loss += l
correct += a
except tf.errors.OutOfRangeError:
break
logger.progress(batch_idx + 1)
logger.store(test_loss=test_loss / batches)
logger.store(accuracy=correct / batches)
def _train(self):
self.model.train()
for i, (data, target) in logger.enumerator("Train", self.train_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.nll_loss(output, target)
loss.backward()
self.optimizer.step()
# Add training loss to the logger.
# The logger will queue the values and output the mean
logger.store(train_loss=loss.item())
logger.add_global_step()
# Print output to the console
if i % self.log_interval == 0:
# Output the indicators
logger.write()