Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def check_sample(self):
counts = numpy.zeros(len(self.ps), numpy.float32)
for _ in range(1000):
vs = self.sampler.sample((4, 3))
numpy.add.at(counts, cuda.to_cpu(vs), 1)
counts /= (1000 * 12)
counts *= sum(self.ps)
testing.assert_allclose(self.ps, counts, atol=0.1, rtol=0.1)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
if self.cover_all:
expect = numpy.array([
[x[0:2, 0:2].max(), x[0:2, 1:3].max()],
[x[1:4, 0:2].max(), x[1:4, 1:3].max()],
[x[3:4, 0:2].max(), x[3:4, 1:3].max()]])
else:
expect = numpy.array([
[x[0:2, 0:2].max(), x[0:2, 1:3].max()],
[x[1:4, 0:2].max(), x[1:4, 1:3].max()]])
testing.assert_allclose(expect, y_data[k, c])
def check_numerical_grad_one(self, f, df, xs, gys, eps):
dfxs = df(xs)
gys = tuple(0 if gy is None else gy for gy in gys)
# matrix-vector multiplication of dfxs and dys
dx_expect = tuple(map(lambda dfx: _dot(dfx, gys), dfxs))
def func():
return f(xs)
dx_actual = gradient_check.numerical_grad(func, xs, gys, eps)
self.assertEqual(len(dx_expect), len(dx_actual))
for e, a in zip(dx_expect, dx_actual):
testing.assert_allclose(e, a, atol=self.atol, rtol=self.rtol)
h_prev = e_hy[layer_idx, :batch]
if self.activation == 'tanh':
e_h = numpy.tanh(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
elif self.activation == 'relu':
e_h = _relu(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
e_hy[layer_idx, :batch] = e_h
xb.append(e_h)
xb.reverse()
xs_next = [numpy.concatenate([hfi, hbi], axis=1) for (hfi, hbi) in
zip(xf, xb)]
for k, (ysi, xsi) in enumerate(zip(ys, xs_next)):
testing.assert_allclose(ysi.data, xsi, rtol=1e-4, atol=1e-4)
testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
def test_consistency_with_cudnn_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
x_cpu, grid_cpu, y_cpu = self._apply_backward(
self.x, self.grid, self.grads)
with chainer.using_config('use_cudnn', 'always'):
x_cudnn, grid_cudnn, y_cudnn = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
testing.assert_allclose(y_cpu.data, y_cudnn.data)
testing.assert_allclose(x_cpu.grad, x_cudnn.grad)
testing.assert_allclose(grid_cpu.grad, grid_cudnn.grad)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.local_response_normalization(x)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
# Naive implementation
y_expect = numpy.zeros_like(self.x)
for n, c, h, w in numpy.ndindex(self.x.shape):
s = 0
for i in six.moves.range(max(0, c - 2), min(7, c + 2)):
s += self.x[n, i, h, w] ** 2
denom = (2 + 1e-4 * s) ** .75
y_expect[n, c, h, w] = self.x[n, c, h, w] / denom
testing.assert_allclose(
y_expect, y_data, **self.check_forward_options)
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data
theta = cuda.to_cpu(theta)
B = theta.shape[0]
H, W = output_shape
expected = []
for b in range(B):
for i in numpy.linspace(-1., 1., H):
for j in numpy.linspace(-1., 1., W):
coord = numpy.array([j, i, 1])
expected.append(self.theta[b].dot(coord))
expected = numpy.array(
expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2)
testing.assert_allclose(grid, expected)
self.assertEqual(grid.dtype, theta.dtype)
def check_backward(self, x_data):
x = chainer.Variable(x_data)
y = functions.transpose(x, self.axes)
y.grad = y.data
y.backward()
testing.assert_allclose(x.data, x.grad, atol=0, rtol=0)
def check_forward(self, s_data, i_data):
xp = backend.get_array_module(s_data)
s_old = s_data.copy()
s = chainer.Variable(s_data)
i = chainer.Variable(i_data)
x, t = thin_stack.thin_stack_get(s, i)
expect = s_old[xp.arange(len(i_data)), i_data]
testing.assert_allclose(x.array, expect)
# Thin stack reuses the same ndarray.
self.assertIs(s_data, t.array)
def check_gaussian_nll(self, x, mean, ln_var):
if self.wrap_x:
x = chainer.Variable(x)
if self.wrap_m:
mean = chainer.Variable(mean)
if self.wrap_v:
ln_var = chainer.Variable(ln_var)
actual = cuda.to_cpu(F.gaussian_nll(x, mean, ln_var, self.reduce).data)
testing.assert_allclose(self.expect, actual)