Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x = inputs[0]
n, c, h, w = x.shape
kh = h // self.ksize
kw = w // self.ksize
kc = c // (self.ksize**2)
y = xp.zeros((n, kc, h, w), dtype=np.float32)
for j in xrange(self.ksize):
for i in xrange(self.ksize):
y1 = kh * j
y2 = y1 + kh
x1 = kw * i
x2 = x1 + kw
c1 = kc * (j * self.ksize + i)
c2 = c1 + kc
y[:, :, y1:y2, x1:x2] = x[:, c1:c2, y1:y2, x1:x2]
return y,
def check_non_maximum_suppression(self, bbox, threshold, expect):
selec = non_maximum_suppression(bbox, threshold)
self.assertIsInstance(selec, type(bbox))
self.assertEqual(selec.dtype, np.int32)
np.testing.assert_equal(
cuda.to_cpu(selec),
cuda.to_cpu(expect))
def check_backward_gpu(self, op, **kwargs):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
**kwargs)
dataset_train, batch_size=batch_size)
iter_val = chainer.iterators.SerialIterator(
dataset_val, batch_size=batch_size, repeat=False, shuffle=False)
# 2. model
vgg_path = data.download_vgg16_chainermodel()
vgg = models.VGG16()
chainer.serializers.load_hdf5(vgg_path, vgg)
n_class = len(dataset_train.label_names)
model = models.FCN32s(n_class=n_class)
model.train = True
utils.copy_chainermodel(vgg, model)
if len(gpus) > 1 or gpus[0] >= 0:
chainer.cuda.get_device(gpus[0]).use()
if len(gpus) == 1 and gpus[0] >= 0:
model.to_gpu()
# 3. optimizer
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
# 4. trainer
if len(gpus) > 1:
devices = {'main': gpus[0]}
for gpu in gpus[1:]:
devices['gpu{}'.format(gpu)] = gpu
updater = chainer.training.ParallelUpdater(
iter_train, optimizer, devices=devices)
else:
updater = chainer.training.StandardUpdater(
with self.init_scope():
shape = (left_size, right_size, out_size)
if isinstance(initialW, (numpy.ndarray, cuda.ndarray)):
assert initialW.shape == shape
self.W = variable.Parameter(
initializers._get_initializer(initialW), shape)
if not self.nobias:
V1_shape = (left_size, out_size)
V2_shape = (right_size, out_size)
b_shape = (out_size,)
if isinstance(initial_bias, tuple):
initialV1, initialV2, initialb = initial_bias
if isinstance(initialV1, (numpy.ndarray, cuda.ndarray)):
assert initialV1.shape == V1_shape
if isinstance(initialV2, (numpy.ndarray, cuda.ndarray)):
assert initialV2.shape == V2_shape
if isinstance(initialb, (numpy.ndarray, cuda.ndarray)):
assert initialb.shape == b_shape
initialV1 = initializers._get_initializer(initialV1)
initialV2 = initializers._get_initializer(initialV2)
initialb = initializers._get_initializer(initialb)
elif initial_bias is None:
initialV1 = initializers._get_initializer(None)
initialV2 = initializers._get_initializer(None)
initialb = 0
else:
raise ValueError('initial_bias must be tuple or None')
self.V1 = variable.Parameter(initialV1, V1_shape)
self.V2 = variable.Parameter(initialV2, V2_shape)
self.b = variable.Parameter(initialb, b_shape)
- score (float): hypothesis score
pair of ~chainer.Variable(s)): decoder state of best hypothesis
"""
# encoder
es,ey = self.encoder(es, [x])
# beam search
ds = self.decoder.initialize(es, ey, sos)
hyplist = [([], 0., ds)]
best_state = None
comp_hyplist = []
for l in six.moves.range(maxlen):
new_hyplist = []
argmin = 0
for out,lp,st in hyplist:
logp = self.decoder.predict(st)
lp_vec = cuda.to_cpu(logp.data[0]) + lp
if l > 0:
new_lp = lp_vec[eos] + penalty * (len(out)+1)
new_st = self.decoder.update(st,eos)
comp_hyplist.append((out, new_lp))
if best_state is None or best_state[0] < new_lp:
best_state = (new_lp, new_st)
for o in np.argsort(lp_vec)[::-1]:
if o == unk or o == eos:# exclude and
continue
new_lp = lp_vec[o]
if len(new_hyplist) == beam:
if new_hyplist[argmin][1] < new_lp:
new_st = self.decoder.update(st, o)
new_hyplist[argmin] = (out+[o], new_lp, new_st)
argmin = min(enumerate(new_hyplist), key=lambda h:h[1][1])[0]
gs.tight_layout(fig)
plt.savefig(filename)
def sample(dataset, n_samples):
images, _ = test_mnist[np.random.choice(len(dataset), n_samples)]
images = chainer.cuda.to_gpu(images, args.gpu)
return images
# Setup model, dataset
model = MLP(args.unit, 10)
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
xp = chainer.cuda.get_array_module(model)
chainer.serializers.load_npz(args.model, model)
_, test_mnist = chainer.datasets.get_mnist()
# Fast Gradient Sign Method (simple)
images = sample(test_mnist, N_gen)
adv_images = fgsm(model, images, eps=0.2)
prob = F.softmax(model(adv_images), axis=1).data
visualize(cupy.asnumpy(adv_images), cupy.asnumpy(prob), img_size, 'fgsm.png')
# Fast Gradient Sign Method (iterative)
images = sample(test_mnist, N_gen)
adv_images = fgsm(model, images, eps=0.01, iterations=20)
prob = F.softmax(model(adv_images), axis=1).data
visualize(cupy.asnumpy(adv_images), cupy.asnumpy(prob), img_size,
'fgsm_iterative.png')
"""Compute a loss.
Returns:
Returns:
chainer.Variable: Scalar loss.
"""
y, taus = self._compute_y_and_taus(exp_batch)
with chainer.no_backprop_mode():
t = self._compute_target_values(exp_batch)
eltwise_loss = compute_eltwise_huber_quantile_loss(y, t, taus)
if errors_out is not None:
del errors_out[:]
delta = F.mean(abs(eltwise_loss), axis=(1, 2))
errors_out.extend(cuda.to_cpu(delta.array))
if self.batch_accumulator == 'sum':
# mean over N_prime, then sum over (batch_size, N)
return F.sum(F.mean(eltwise_loss, axis=2))
else:
# mean over (batch_size, N_prime), then sum over N
return F.sum(F.mean(eltwise_loss, axis=(0, 2)))
train = read_tokens(args.train_label, args.char_list_dict)
val = read_tokens(args.valid_label, args.char_list_dict)
show_token_counts(train, val, unk, args.n_vocab)
# Create the dataset iterators
train_iter, val_iter = get_iterators(train, val, args, eos)
# Prepare an RNNLM model
rnn = RNNLM(args.n_vocab, args.layer, args.unit)
model = ClassifierWithState(rnn)
if args.ngpu > 1:
logging.warning("currently, multi-gpu is not supported. use single gpu.")
if args.ngpu > 0:
# Make the specified GPU current
gpu_id = 0
chainer.cuda.get_device_from_id(gpu_id).use()
model.to_gpu()
else:
gpu_id = -1
write_conf(args)
# Set up an optimizer
if args.opt == 'sgd':
optimizer = chainer.optimizers.SGD(lr=1.0)
elif args.opt == 'adam':
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
updater = BPTTUpdater(train_iter, optimizer, gpu_id)