Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_forward_gpu(gwm, data):
embed_atom_data, new_embed_atom_data, supernode = data[:3]
embed_atom_data = cuda.to_gpu(embed_atom_data)
new_embed_atom_data = cuda.to_gpu(new_embed_atom_data)
supernode = cuda.to_gpu(supernode)
gwm.to_gpu()
check_forward(gwm, embed_atom_data, new_embed_atom_data, supernode)
def to_gpu(self):
self.x = cuda.to_gpu(self.x)
self.t = cuda.to_gpu(self.t)
self.y = cuda.to_gpu(self.y)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.theta), self.output_shape)
def test_allreduce_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(18) + self.communicator.rank
x = x.astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_allreduce(x, dtype, 18)
x = np.array(1).astype(dtype)
y = self.communicator.allreduce(x)
a = x * self.communicator.size
chainer.testing.assert_allclose(a, y)
self.teardown()
def test_forward_gpu(self):
self.chain.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def mini_batch(hcpevec):
features1 = np.empty((len(hcpevec), FEATURES1_NUM, 9, 9), dtype=np.float32)
features2 = np.empty((len(hcpevec), FEATURES2_NUM, 9, 9), dtype=np.float32)
move = np.empty((len(hcpevec)), dtype=np.int32)
result = np.empty((len(hcpevec)), dtype=np.int32)
value = np.empty((len(hcpevec)), dtype=np.float32)
cppshogi.hcpe_decode_with_value(hcpevec, features1, features2, move, result, value)
return (Variable(cuda.to_gpu(features1)),
Variable(cuda.to_gpu(features2)),
Variable(cuda.to_gpu(result.reshape((len(hcpevec), 1)))),
Variable(cuda.to_gpu(value.reshape((len(value), 1))))
)
unigram_ids = unigram_ids[:possibole_t_length]
bigram_ids = bigram_ids[:possibole_t_length]
t_length = len(unigram_ids)
# t
t_batch[batch_idx, :t_length] = unigram_ids
bigram_batch[batch_idx, :t_length] = bigram_ids
t_length_batch.append(t_length)
x_batch = (x_batch - x_mean) / x_std
# GPU
if gpu:
x_batch = cuda.to_gpu(x_batch.astype(np.float32))
t_batch = cuda.to_gpu(t_batch.astype(np.int32))
bigram_batch = cuda.to_gpu(bigram_batch.astype(np.int32))
x_length_batch = cuda.to_gpu(np.asarray(x_length_batch).astype(np.int32))
t_length_batch = cuda.to_gpu(np.asarray(t_length_batch).astype(np.int32))
return x_batch, x_length_batch, t_batch, t_length_batch, bigram_batch
def load_parameter(self, fileName):
param_list = pic.load(open(fileName, "rb"))
if self.xp != np:
param_list = [cuda.to_gpu(param) for param in param_list]
self.lambda_NFT, self.G_NFM, self.Q_FMM, self.U_F, self.V_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT = param_list
def _sequential_var(self, xs):
if self._cpu:
xs = [Variable(cuda.to_cpu(x), volatile='auto') for x in xs]
else:
xs = [Variable(cuda.to_gpu(x), volatile='auto') for x in xs]
return xs