Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Creation of the trainer
with logger.section("Create trainer"):
optimizer = tf.train.AdamOptimizer(learning_rate=args.lr)
train_iterator = train_dataset.make_initializable_iterator()
data, target = train_iterator.get_next()
train_loss = loss(model, data, target)
train_op = optimizer.minimize(train_loss)
test_iterator = test_dataset.make_initializable_iterator()
data, target = test_iterator.get_next()
test_loss = loss(model, data, target)
test_accuracy = accuracy(model, data, target)
logger.add_indicator("train_loss", queue_limit=10, is_print=True)
logger.add_indicator("test_loss", is_histogram=False, is_print=True)
logger.add_indicator("accuracy", is_histogram=False, is_print=True)
#
batches = len(x_train) // args.batch_size
with tf.Session() as session:
EXPERIMENT.start_train(session)
# Loop through the monitored iterator
for epoch in logger.loop(range(0, args.epochs)):
# Delayed keyboard interrupt handling to use
# keyboard interrupts to end the loop.
# This will capture interrupts and finish
# the loop at the end of processing the iteration;
# i.e. the loop won't stop in the middle of an epoch.
try:
with logger.delayed_keyboard_interrupt():
def __call__(self):
# Training and testing
logger_util.add_model_indicators(self.model)
logger.add_indicator("train_loss", IndicatorType.queue,
IndicatorOptions(queue_size=20, is_print=True))
logger.add_indicator("test_loss", IndicatorType.histogram,
IndicatorOptions(is_print=True))
logger.add_indicator("accuracy", IndicatorType.histogram,
IndicatorOptions(is_print=True))
for _ in self.loop:
self._train()
self._test()
self.__log_model_params()
def __call__(self):
# Training and testing
logger_util.add_model_indicators(self.model)
logger.add_indicator("train_loss", IndicatorType.queue,
IndicatorOptions(queue_size=20, is_print=True))
logger.add_indicator("test_loss", IndicatorType.histogram,
IndicatorOptions(is_print=True))
logger.add_indicator("accuracy", IndicatorType.histogram,
IndicatorOptions(is_print=True))
for _ in self.loop:
self._train()
self._test()
self.__log_model_params()
def startup(self):
logger_util.add_model_indicators(self.model)
logger.add_indicator("train_loss", IndicatorType.queue,
IndicatorOptions(queue_size=20, is_print=True))
logger.add_indicator("test_loss", IndicatorType.histogram,
IndicatorOptions(is_print=True))
logger.add_indicator("accuracy", IndicatorType.histogram,
IndicatorOptions(is_print=True))
def loop():
logger.info(a=2, b=1)
logger.add_indicator('loss_ma', IndicatorType.queue, IndicatorOptions(queue_size=10))
for i in range(10):
logger.add_global_step(1)
logger.store(loss=100 / (i + 1), loss_ma=100 / (i + 1))
logger.write()
if (i + 1) % 2 == 0:
logger.new_line()
time.sleep(2)
two=2,
string="string")
# ### Set logger indicators
# Reward is queued; this is useful when you want to track the moving
# average of something.
logger.add_indicator("reward", queue_limit=10)
# By default everything is a set of values and will create a TensorBoard histogram
# We specify that `fps` is a scalar.
# If you store multiple values for this it will output the mean.
logger.add_indicator("fps", is_histogram=False, is_print=False)
# This will produce a histogram
logger.add_indicator("loss", is_print=False)
# A heat map
logger.add_indicator("advantage_reward", is_histogram=False, is_print=False, is_pair=True)
# Create a TensorFlow session
with tf.Session() as session:
# Start the experiment from scratch, without loading from a
# saved checkpoint ('is_init=True')
# This will clear all the old checkpoints and summaries for this
# experiment.
# If you start with 'is_init=False',
# the experiment will load from the last saved checkpoint.
EXPERIMENT.start_train(session, is_init=True)
# This is the main training loop of this project.
for global_step in logger.loop(range(50)):
# ### Set logger indicators
# Reward is queued; this is useful when you want to track the moving
# average of something.
logger.add_indicator("reward", queue_limit=10)
# By default everything is a set of values and will create a TensorBoard histogram
# We specify that `fps` is a scalar.
# If you store multiple values for this it will output the mean.
logger.add_indicator("fps", is_histogram=False, is_print=False)
# This will produce a histogram
logger.add_indicator("loss", is_print=False)
# A heat map
logger.add_indicator("advantage_reward", is_histogram=False, is_print=False, is_pair=True)
# Create a TensorFlow session
with tf.Session() as session:
# Start the experiment from scratch, without loading from a
# saved checkpoint ('is_init=True')
# This will clear all the old checkpoints and summaries for this
# experiment.
# If you start with 'is_init=False',
# the experiment will load from the last saved checkpoint.
EXPERIMENT.start_train(session, is_init=True)
# This is the main training loop of this project.
for global_step in logger.loop(range(50)):
# You can set the global step explicitly with
# 'logger.set_global_step(global_step)'
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
# Model creation
with logger.section("Create model"):
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Specify indicators
logger.add_indicator("train_loss", queue_limit=10, is_print=True)
logger.add_indicator("test_loss", is_histogram=False, is_print=True)
logger.add_indicator("accuracy", is_histogram=False, is_print=True)
for name, param in model.named_parameters():
if param.requires_grad:
logger.add_indicator(name, is_histogram=True, is_print=False)
logger.add_indicator(f"{name}_grad", is_histogram=True, is_print=False)
# Start the experiment
EXPERIMENT.start_train()
# Loop through the monitored iterator
for epoch in logger.loop(range(0, args.epochs)):
# Delayed keyboard interrupt handling to use
# keyboard interrupts to end the loop.
# This will capture interrupts and finish
# the loop at the end of processing the iteration;
# i.e. the loop won't stop in the middle of an epoch.
try:
with logger.delayed_keyboard_interrupt():
# Training and testing
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
# Model creation
with logger.section("Create model"):
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Specify indicators
logger.add_indicator("train_loss", queue_limit=10, is_print=True)
logger.add_indicator("test_loss", is_histogram=False, is_print=True)
logger.add_indicator("accuracy", is_histogram=False, is_print=True)
for name, param in model.named_parameters():
if param.requires_grad:
logger.add_indicator(name, is_histogram=True, is_print=False)
logger.add_indicator(f"{name}_grad", is_histogram=True, is_print=False)
# Start the experiment
EXPERIMENT.start_train()
# Loop through the monitored iterator
for epoch in logger.loop(range(0, args.epochs)):
# Delayed keyboard interrupt handling to use
# keyboard interrupts to end the loop.
# This will capture interrupts and finish
# the loop at the end of processing the iteration;