Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x))
@_inject_backend_tests
class TestConvolution1D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4)).astype(numpy.float32)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'use_gamma': [True, False],
'lazy_init': [True, False],
'link': [L.Convolution2D, L.Deconvolution2D],
}))
@_inject_backend_tests
class TestConvolution2D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4)).astype(numpy.float32)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
*testing.product({
'wrap_m': [True, False],
'wrap_v': [True, False],
'reduce': ['no', 'sum', 'mean']
})
)
class TestGaussianKLDivergence(unittest.TestCase):
def setUp(self):
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.ln_var = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
# Refer to Appendix B in the original paper
# Auto-Encoding Variational Bayes (https://arxiv.org/abs/1312.6114)
loss = -(1 + self.ln_var -
self.mean * self.mean -
numpy.exp(self.ln_var)) * 0.5
eps=self.eps, running_mean=running_mean,
running_var=running_var,
update_statistics=self.update_statistics)
def f_expected(x, gamma, beta, running_mean, running_var):
return _naive_batch_renormalization(
x, gamma, beta, self.rmax, self.dmax, self.eps,
avg_mean=running_mean,
avg_std=(self.eps + running_var) ** 0.5,
axis=self.aggr_axes)
tested = compute(f_tested)
expected = compute(f_expected)
# test forward
testing.assert_allclose(
tested[0], expected[0], **self.check_forward_options)
# test backward
for g, g_expected in zip(tested[1:], expected[1:]):
testing.assert_allclose(
g, g_expected, **self.check_backward_options)
return x, gamma, beta
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.group_normalization(x, self.groups, gamma, beta,
eps=self.eps)
return y,
def forward_expected(self, inputs):
x, gamma, beta = inputs
y = _simple_group_normalization(x, self.groups, gamma, beta,
eps=self.eps)
return y,
@testing.parameterize(*(testing.product({
'shape': [(15, 10)],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
@chainer.testing.attr.gpu
def test_cycle_model_gpu(param):
check_cycle_model(True, param)
def predict(self, imgs):
bboxes = list()
labels = list()
scores = list()
for _ in imgs:
n_bbox = np.random.randint(0, 20)
bboxes.append(np.random.uniform(size=(n_bbox, 4)))
labels.append(np.random.randint(0, self.n_fg_class, size=n_bbox))
scores.append(np.random.uniform(size=n_bbox))
return bboxes, labels, scores
@testing.parameterize(
{'with_hook': False},
{'with_hook': True},
)
class TestApplyDetectionLink(unittest.TestCase):
def setUp(self):
self.link = DummyDetectionLink()
self.imgs = list()
for _ in range(5):
H, W = np.random.randint(8, 16, size=2)
self.imgs.append(np.random.randint(0, 256, size=(3, H, W)))
def test_image_dataset(self):
dataset = self.imgs
iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
y = self.link(chainer.Variable(self.x), n_batch_axes=2)
assert y.shape == (2, 5, 4)
class TestInvalidLinear(unittest.TestCase):
def setUp(self):
self.link = links.Linear(3, 2)
self.x = numpy.random.uniform(-1, 1, (4, 1, 2)).astype(numpy.float32)
def test_invalid_size(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(self.x))
testing.run_module(__name__, __file__)
from chainercv.transforms import resize_point
class TestResizePoint(unittest.TestCase):
def test_resize_point(self):
point = np.random.uniform(
low=0., high=32., size=(12, 2))
out = resize_point(point, in_size=(16, 32), out_size=(8, 64))
point[:, 0] *= 0.5
point[:, 1] *= 2
np.testing.assert_equal(out, point)
testing.run_module(__name__, __file__)
raise ValueError(
'Length of no_grads param and xs should be same.')
for skip, x, cx in six.moves.zip(no_grads, xs, casted_xs):
if skip:
assert x.grad is None
continue
gx, = numerical_grad(f, (cx.data,), y_grad, eps=eps)
testing.assert_allclose(gx, x.grad, atol=atol, rtol=rtol)
if dtype is None:
assert gx.dtype == x.grad.dtype
else:
assert gx.dtype.kind == 'f' and gx.dtype == dtype
for p in params:
gp, = numerical_grad(f, (p.data,), y_grad, eps=eps)
testing.assert_allclose(gp, p.grad, atol=atol, rtol=rtol)
assert gp.dtype is p.grad.dtype