Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def res_unit(x, scope_name, dn=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C // 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C // 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
def res_unit(x, scope_name, dn=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C // 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = svd_convolution(h, C // 2, kernel=(3, 3), pad=(1, 1),
with_bias=False, cr=compression_ratio)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
_, c, _, _ = h.shape
assert maps // 2 == c or maps == c
maps1 = c if maps // 2 == c else maps
maps2 = maps
with nn.parameter_scope(scopename):
# LeakyRelu -> Conv
with nn.parameter_scope("conv1"):
#h = F.leaky_relu(h, 0.2, False)
h = F.relu(h, False)
h = convolution(h, maps1, kernel=kernel, pad=pad, stride=stride,
with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))
# LeakyRelu -> Conv -> Downsample
with nn.parameter_scope("conv2"):
#h = F.leaky_relu(h, 0.2, True)
h = F.relu(h, True)
h = convolution(h, maps2, kernel=kernel, pad=pad, stride=stride,
with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))
if downsample:
h = F.average_pooling(h, kernel=(2, 2))
# Shortcut: Conv -> Downsample
if c != maps2 or downsample:
with nn.parameter_scope("shortcut"):
s = convolution(s, maps2, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
with_bias=True, sn=sn, test=test)
if downsample:
s = F.average_pooling(s, kernel=(2, 2))
return F.add2(h, s, True)
# return F.add2(h, s)
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C // 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C // 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
def main():
# Args
args = get_args()
# Context
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
logger.info(ctx)
nn.set_default_context(ctx)
nn.set_auto_forward(True)
# Monitor
monitor = Monitor(args.monitor_path)
# Validation
logger.info("Start validation")
num_images = args.valid_samples
num_batches = num_images // args.batch_size
# DataIterator
di = data_iterator(args.img_path, args.batch_size,
imsize=(args.imsize, args.imsize),
num_samples=args.valid_samples,
dataset_name=args.dataset_name)
mon.add(ii, loss.d)
# Save
if (i + 1) % (args.model_save_interval // n_devices) == 0:
for mon, x in monitor_images:
mon.add(ii, x.d)
nn.save_parameters(os.path.join(
args.monitor_path, "param_{:05d}.h5".format(i)))
if mpi_local_rank == 0:
# Monitor
for mon, loss in monitor_losses:
mon.add(ii, loss.d)
# Save
for mon, x in monitor_images:
mon.add(ii, x.d)
nn.save_parameters(os.path.join(
args.monitor_path, "param_{:05d}.h5".format(i)))
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
def test_categorical_classification_loss(inspecs, loss, nnabla_opts):
func = getattr(F, loss)
fb = FunctionBenchmark(
func, inspecs, [], dict(axis=1),
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
def res_unit(x, scope_name, dn=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = masked_convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = masked_convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = masked_convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
def res_unit(x, scope_name, dn=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))