Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = model_prediction(vimage, maps=maps, test=True)
# Set mask
create_and_set_mask(nn.get_parameters(grad_only=False),
rrate=args.reduction_rate)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
vx = vimage / 255.0
with nn.parameter_scope("capsnet"):
_, _, _, _, vpred = model.capsule_net(vx, test=True, aug=False)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
train_iter = int(60000 / args.batch_size)
val_iter = int(10000 / args.batch_size)
logger.info("#Train: {} #Validation: {}".format(train_iter, val_iter))
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=1)
monitor_mloss = MonitorSeries("Training margin loss", monitor, interval=1)
monitor_rloss = MonitorSeries(
"Training reconstruction loss", monitor, interval=1)
monitor_err = MonitorSeries("Training error", monitor, interval=1)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=1)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
monitor_lr = MonitorSeries("Learning rate", monitor, interval=1)
# To_save_nnp
m_image, m_label, m_noise, m_recon = model_tweak_digitscaps(
args.batch_size)
contents = save_nnp({'x1': m_image, 'x2': m_label, 'x3': m_noise}, {
'y': m_recon}, args.batch_size)
save.save(os.path.join(args.monitor_path,
'capsnet_epoch0_result.nnp'), contents)
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = model_prediction(vimage, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create teacher prediction graph.
vpred = model_prediction(vimage, net=teacher, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
r = nn.Variable((args.batchsize_u,) + shape_x, need_grad=True)
eps = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False)
loss_u, yu = vat(xu, r, eps, forward, distance)
# Net for evaluating valiation data
xv = nn.Variable((args.batchsize_v,) + shape_x, need_grad=False)
hv = forward(xv, test=True)
tv = nn.Variable((args.batchsize_v, 1), need_grad=False)
# Create solver
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Monitor trainig and validation stats.
import nnabla.monitor as M
monitor = M.Monitor(args.model_save_path)
monitor_verr = M.MonitorSeries("Test error", monitor, interval=240)
monitor_time = M.MonitorTimeElapsed("Elapsed time", monitor, interval=240)
# Training Loop.
t0 = time.time()
for i in range(args.max_iter):
# Validation Test
if i % args.val_interval == 0:
n_error = calc_validation_error(
di_v, xv, tv, hv, args.val_iter)
monitor_verr.add(i, n_error)
#################################
## Training by Labeled Data #####
# TEST
# Create input variables.
vimage0 = nn.Variable([args.batch_size, 1, 28, 28])
vimage1 = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size])
# Create predition graph.
vpred = mnist_lenet_siamese(vimage0, vimage1, test=True)
vloss = F.mean(contrastive_loss(vpred, vlabel, margin))
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_vloss = M.MonitorSeries("Test loss", monitor, interval=10)
# Initialize DataIterator for MNIST.
rng = np.random.RandomState(313)
data = siamese_data_iterator(args.batch_size, True, rng)
vdata = siamese_data_iterator(args.batch_size, False, rng)
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage0.d, vimage1.d, vlabel.d = vdata.next()
vloss.forward(clear_buffer=True)
x = nn.Variable([args.batch_size, 1, 28, 28])
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(args.learning_rate, beta1=0.5)
solver_dis = S.Adam(args.learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: x + 1 / 2.)
data = data_iterator_mnist(args.batch_size, True)
# Training loop.
for i in range(args.max_iter):
if i % args.model_save_interval == 0:
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(
args.model_save_path, "generator_param_%06d.h5" % i))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = mnist_cnn_prediction(vimage / 255, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_err = M.MonitorSeries("Training error", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = M.MonitorSeries("Test error", monitor, interval=10)
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
if i % args.model_save_interval == 0:
image_size = args.image_size
n_classes = args.n_classes
not_sn = args.not_sn
threshold = args.truncation_threshold
# Model
nn.load_parameters(args.model_load_path)
z = nn.Variable([batch_size, latent])
y_fake = nn.Variable([batch_size])
x_fake = generator(z, y_fake, maps=maps, n_classes=n_classes, test=True, sn=not_sn)\
.apply(persistent=True)
# Generate All
if args.generate_all:
# Monitor
monitor = Monitor(args.monitor_path)
name = "Generated Image Tile All"
monitor_image = MonitorImageTile(name, monitor, interval=1,
num_images=args.batch_size,
normalize_method=normalize_method)
# Generate images for all classes
for class_id in range(args.n_classes):
# Generate
z_data = resample(batch_size, latent, threshold)
y_data = generate_one_class(class_id, batch_size)
z.d = z_data
y_fake.d = y_data
x_fake.forward(clear_buffer=True)
monitor_image.add(class_id, x_fake.d)
return
normalize_method=normalize_method)
# Generate images for all classes
for class_id in range(args.n_classes):
# Generate
z_data = resample(batch_size, latent, threshold)
y_data = generate_one_class(class_id, batch_size)
z.d = z_data
y_fake.d = y_data
x_fake.forward(clear_buffer=True)
monitor_image.add(class_id, x_fake.d)
return
# Generate Indivisually
monitor = Monitor(args.monitor_path)
name = "Generated Image Tile {}".format(
args.class_id) if args.class_id != -1 else "Generated Image Tile"
monitor_image_tile = MonitorImageTile(name, monitor, interval=1,
num_images=args.batch_size,
normalize_method=normalize_method)
name = "Generated Image {}".format(
args.class_id) if args.class_id != -1 else "Generated Image"
monitor_image = MonitorImage(name, monitor, interval=1,
num_images=args.batch_size,
normalize_method=normalize_method)
z_data = resample(batch_size, latent, threshold)
y_data = generate_random_class(n_classes, batch_size) if args.class_id == -1 else \
generate_one_class(args.class_id, batch_size)
z.d = z_data
y_fake.d = y_data
x_fake.forward(clear_buffer=True)