Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
w_init = UniformInitializer(
calc_uniform_lim_glorot(3, nmaps, kernel=(3, 3)),
rng=rng)
h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
w_init=w_init, with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", rng, False) # -> 32x32
h = res_unit(h, "conv3", rng, True) # -> 16x16
h = res_unit(h, "conv4", rng, False) # -> 16x16
h = res_unit(h, "conv5", rng, True) # -> 8x8
h = res_unit(h, "conv6", rng, False) # -> 8x8
h = res_unit(h, "conv7", rng, True) # -> 4x4
h = res_unit(h, "conv8", rng, False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
w_init = UniformInitializer(
calc_uniform_lim_glorot(int(np.prod(h.shape[1:])), ncls, kernel=(1, 1)), rng=rng)
pred = PF.affine(h, ncls, w_init=w_init)
return pred
angle=0.25,
flip_lr=True)
image.need_grad = False
h = masked_convolution(image, maps, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", False) # -> 32x32
h = res_unit(h, "conv3", True) # -> 16x16
h = res_unit(h, "conv4", False) # -> 16x16
h = res_unit(h, "conv5", True) # -> 8x8
h = res_unit(h, "conv6", False) # -> 8x8
h = res_unit(h, "conv7", True) # -> 4x4
h = res_unit(h, "conv8", False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, ncls)
return pred
return F.elu(x + h)
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.elu(
bn(PF.binary_weight_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = PF.binary_weight_affine(pl, 10)
return y
return F.elu(x + h)
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.elu(
bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = bn(PF.binary_connect_affine(pl, 10))
return y
angle=0.25,
flip_lr=True)
image.need_grad = False
h = PF.convolution(image, maps, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", False) # -> 32x32
h = res_unit(h, "conv3", True) # -> 16x16
h = res_unit(h, "conv4", False) # -> 16x16
h = res_unit(h, "conv5", True) # -> 8x8
h = res_unit(h, "conv6", False) # -> 8x8
h = res_unit(h, "conv7", True) # -> 4x4
h = res_unit(h, "conv8", False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, ncls)
return pred
def style_encoder(x, maps=64, name="style-encoder"):
h = x
with nn.parameter_scope("generator"):
with nn.parameter_scope(name):
h = convblock(h, maps * 1, 7, 3, 1, norm="", name="convblock-1")
h = convblock(h, maps * 2, 4, 1, 2, norm="", name="convblock-2")
h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-3")
h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-4")
h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-5")
h = F.average_pooling(h, h.shape[2:])
h = convolution(h, maps * 4, 1, 0, 1)
return h
def mnist_binary_net_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (BinaryNet version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_connect_convolution(image, 16, (5, 5))
c1 = PF.batch_normalization(c1, batch_stat=not test)
c1 = F.binary_tanh(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_connect_convolution(c1, 16, (5, 5))
c2 = PF.batch_normalization(c2, batch_stat=not test)
c2 = F.binary_tanh(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = PF.binary_connect_affine(c2, 50)
c3 = PF.batch_normalization(c3, batch_stat=not test)
c3 = F.binary_tanh(c3)
with nn.parameter_scope("fc4"):
c4 = PF.binary_connect_affine(c3, 10)
c4 = PF.batch_normalization(c4, batch_stat=not test)
return c4
def mnist_binary_weight_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (Binary Weight Network version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_weight_convolution(image, 16, (5, 5))
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_weight_convolution(c1, 16, (5, 5))
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = F.elu(PF.binary_weight_affine(c2, 50))
with nn.parameter_scope("fc4"):
c4 = PF.binary_weight_affine(c3, 10)
return c4
assert channel_last == False, "`channel_last = False` is only supported now."
# Inputs
x0 = inputs[0].data
dy = inputs[1].data
# Outputs
dx0 = outputs[0].data
# Grads of inputs
g_x0 = inputs[0].grad
g_dy = inputs[1].grad
# Grads of outputs
g_dx0 = outputs[0].grad
# Computation
if prop_down[1]:
g_dy_ = F.average_pooling(g_dx0, kernel, stride, ignore_border, pad,
channel_last, including_pad)
if accum[1]:
g_dy += g_dy_
else:
g_dy.copy_from(g_dy_)
def discriminators(x, maps=64, n=3):
h = x
discriminators = []
with nn.parameter_scope("discriminators"):
for i in range(n):
h = discriminator(x, maps, name="discriminator-{}x".format(2 ** i))
discriminators.append(h)
x = F.average_pooling(x, kernel=(3, 3), stride=(
2, 2), pad=(1, 1), including_pad=False)
return discriminators