Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
label = nn.Variable([args.batch_size, 1])
# Create `teacher` model_prediction graph.
pred = model_prediction(image, net=teacher, maps=maps, test=False)
pred.persistent = True
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create teacher prediction graph.
vpred = model_prediction(vimage, net=teacher, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
pred_fake, F.constant(1, pred_fake.shape)))
fake_dis = fake.get_unlinked_variable(need_grad=True)
fake_dis.need_grad = True # TODO: Workaround until v1.0.2
pred_fake_dis = discriminator(fake_dis)
loss_dis = F.mean(F.sigmoid_cross_entropy(
pred_fake_dis, F.constant(0, pred_fake_dis.shape)))
# Real path
x = nn.Variable([args.batch_size, 1, 28, 28])
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(args.learning_rate, beta1=0.5)
solver_dis = S.Adam(args.learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: (x + 1) / 2.)
data = data_iterator_mnist(args.batch_size, True)
pred_fake = discriminator(fake)
loss_gen = F.mean(F.sigmoid_cross_entropy(
pred_fake, F.constant(1, pred_fake.shape)))
fake_dis = fake.unlinked()
pred_fake_dis = discriminator(fake_dis)
loss_dis = F.mean(F.sigmoid_cross_entropy(
pred_fake_dis, F.constant(0, pred_fake_dis.shape)))
# Real path
x = nn.Variable([args.batch_size, 1, 28, 28])
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(args.learning_rate, beta1=0.5)
solver_dis = S.Adam(args.learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: x + 1 / 2.)
label = nn.Variable([args.batch_size, 1])
# Create `reference` prediction graph.
pred = mnist_cnn_prediction(image, scope=reference, test=False)
pred.persistent = True
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create reference prediction graph.
vpred = mnist_cnn_prediction(vimage, scope=reference, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
label = nn.Variable([args.batch_size, 1])
# Create prediction graph.
pred = mnist_cnn_prediction(image, test=False, aug=args.augment_train)
pred.persistent = True
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = mnist_cnn_prediction(vimage, test=True, aug=args.augment_test)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
from numpy.random import RandomState
data = data_iterator_mnist(args.batch_size, True, rng=RandomState(1223))
vdata = data_iterator_mnist(args.batch_size, False)
# Training loop.
for i in range(args.max_iter):
# Data Loading
logger.info("Initialing DataSource.")
train_iterator = facade.facade_data_iterator(
args.traindir,
args.batchsize,
shuffle=True,
with_memory_cache=False)
val_iterator = facade.facade_data_iterator(
args.valdir,
args.batchsize,
random_crop=False,
shuffle=False,
with_memory_cache=False)
monitor = nm.Monitor(args.logdir)
solver_gen = S.Adam(alpha=args.lrate, beta1=args.beta1)
solver_dis = S.Adam(alpha=args.lrate, beta1=args.beta1)
generator = unet.generator
discriminator = unet.discriminator
model_path = train(generator, discriminator, args.patch_gan,
solver_gen, solver_dis,
args.weight_l1, train_iterator, val_iterator,
args.epoch, monitor, args.monitor_interval)
if args.generate:
if model_path is not None:
# Data Loading
logger.info("Generating from DataSource.")
test_iterator = facade.facade_data_iterator(
args.testdir,
nn.set_default_context(ctx)
mnist_cnn_prediction = mnist_lenet_prediction
if args.net == 'resnet':
mnist_cnn_prediction = mnist_resnet_prediction
# Create a computation graph to be saved.
x = nn.Variable([args.batch_size, 1, 28, 28])
t = nn.Variable([args.batch_size, 1])
h_t = mnist_cnn_prediction(x, test=False, aug=False)
loss_t = F.mean(F.softmax_cross_entropy(h_t, t))
h_v = mnist_cnn_prediction(x, test=True, aug=False)
loss_v = F.mean(F.softmax_cross_entropy(h_v, t))
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Save NNP file (used in C++ inference later.).
nnp_file = '{}_initialized.nnp'.format(args.net)
training_contents = {
'global_config': {'default_context': ctx},
'training_config':
{'max_epoch': args.max_epoch,
'iter_per_epoch': args_added.iter_per_epoch,
'save_best': True},
'networks': [
{'name': 'training',
'batch_size': args.batch_size,
'outputs': {'loss': loss_t},
'names': {'x': x, 'y': t, 'loss': loss_t}},
{'name': 'validation',
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# SSL Regularization
loss += ssl_regularization(nn.get_parameters(),
args.filter_decay, args.channel_decay)
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = model_prediction(vimage, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
with nn.parameter_scope("gen"):
x_recon = generator(x_fake_unlinked, label_org)
x_recon.persistent = True
g_loss_rec = loss.recon_loss(x_real, x_recon)
g_loss_rec.persistent = True
# total generator loss.
g_loss = g_loss_fake + args.lambda_rec * \
g_loss_rec + args.lambda_cls * g_loss_cls
# -------------------- Solver Setup ---------------------
d_lr = args.d_lr # initial learning rate for Discriminator
g_lr = args.g_lr # initial learning rate for Generator
solver_dis = S.Adam(alpha=args.d_lr, beta1=args.beta1, beta2=args.beta2)
solver_gen = S.Adam(alpha=args.g_lr, beta1=args.beta1, beta2=args.beta2)
# register parameters to each solver.
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
# -------------------- Create Monitors --------------------
monitor = Monitor(args.monitor_path)
monitor_d_cls_loss = MonitorSeries(
'real_classification_loss', monitor, args.log_step)
monitor_g_cls_loss = MonitorSeries(
'fake_classification_loss', monitor, args.log_step)
monitor_loss_dis = MonitorSeries(
'discriminator_loss', monitor, args.log_step)