Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _make_positive_indices(self, impl, args, kw):
ks = [k for k, v in kw.items() if v in _unsigned_dtypes]
for k in ks:
kw[k] = numpy.intp
mask = cupy.asnumpy(impl(self, *args, **kw)) >= 0
return numpy.nonzero(mask)
def array_str(arr, max_line_width=None, precision=None, suppress_small=None):
"""Returns the string representation of the content of an array.
Args:
arr (array_like): Input array. It should be able to feed to
:func:`cupy.asnumpy`.
max_line_width (int): The maximum number of line lengths.
precision (int): Floating point precision. It uses the current printing
precision of NumPy.
suppress_small (bool): If ``True``, very small number are printed as
zeros.
.. seealso:: :func:`numpy.array_str`
"""
return numpy.array_str(cupy.asnumpy(arr), max_line_width, precision,
suppress_small)
def to_numpy(tensor):
if isinstance(tensor, cp.ndarray):
return cp.asnumpy(tensor)
return tensor
with open(os.path.join(args.output, "losses/losses.txt"), 'a') as loss_file:
loss_file.write('loss: %.6e \t batch time: %.6e \t compute time: %.6e \n'
% (moving_average / (1E6 * 2 * args.window_size), batch_end - batch_start, step_end - step_start))
moving_average = 0
end = time.time()
msg = 'Train Epoch: {} \tLoss: {} \t Time: {}'
msg = msg.format(epoch, total, end - start)
logging.info(msg)
embedds = dict()
embedds['means'] = cp.asnumpy(w_model.means)
embedds['vars'] = cp.asnumpy(wr.to_full(w_model.vars) + args.lbda * cp.eye(args.dim).reshape(1, args.dim, args.dim).repeat(w_model.vars.shape[0], axis=0))
embedds['vars'][0] *= 0
embedds['c_means'] = cp.asnumpy(w_model.c_means)
embedds['c_vars'] = cp.asnumpy(wr.to_full(w_model.c_vars) + args.lbda * cp.eye(args.dim).reshape(1, args.dim, args.dim).repeat(w_model.c_vars.shape[0], axis=0))
embedds['c_vars'][0] *= 0
with open(os.path.join(args.output, "embeddings/embeddings" + '_' + str(epoch + 1)), "wb") as output_file:
pkl.dump(embedds, output_file)
with open(os.path.join(args.output, "losses/epoches" + '_' + str(epoch + 1)), "wb") as epoch_file:
epoch_file.write(str.encode('epoch: %u \t loss: %.6e\n' % (epoch + 1, total)))
if w_model.optim == 'adagrad':
w_model.means_adagrad *= 0
w_model.c_means_adagrad *= 0
w_model.vars_adagrad *= 0
w_model.c_vars_adagrad *= 0
def check_add_deconv_layers(self, nobias=True):
"""Add a deconvolutional layer for each convolutional layer already
defined in the network."""
if len(self.deconv_blocks) == len(self.conv_blocks):
return
for conv_block in self.conv_blocks:
deconv_block = []
for conv in conv_block:
out_channels, in_channels, kh, kw = conv.W.data.shape
if isinstance(conv.W.data, cupy.ndarray):
initialW = cupy.asnumpy(conv.W.data)
else:
initialW = conv.W.data
deconv = L.Deconvolution2D(out_channels, in_channels,
(kh, kw), stride=conv.stride,
pad=conv.pad,
initialW=initialW,
nobias=nobias)
if isinstance(conv.W.data, cupy.ndarray):
deconv.to_gpu()
self.add_link('de{}'.format(conv.name), deconv)
deconv_block.append(deconv)
self.deconv_blocks.append(deconv_block)
def getNumpyVersion(self):
return cupy.asnumpy(self.CUPYcorpus)
def ccg(st1, st2, nbins, tbin):
st1 = cp.asnumpy(st1)
st2 = cp.asnumpy(st2)
return _ccg(st1, st2, nbins, tbin)
def solve(R, X, H, lam, rank, alpha, gpu):
"""
Linear function solver, in the form R = XH^T with weighted loss
"""
if gpu:
import cupy as cp
H = cp.array(H)
HT = H.T
matrix_A = HT.dot(H) + cp.array((lam * sparse.identity(rank, dtype=np.float32)).toarray())
for i in tqdm(xrange(R.shape[1])):
vector_r = R[:, i]
vector_x = per_item_gpu(vector_r, matrix_A, H, HT, alpha)
y_i_gpu = cp.asnumpy(vector_x)
y_i_cpu = np.copy(y_i_gpu)
X[i] = y_i_cpu
else:
HT = H.T
matrix_A = HT.dot(H) + (lam * sparse.identity(rank, dtype=np.float32)).toarray()
for i in tqdm(xrange(R.shape[1])):
vector_r = R[:, i]
vector_x = per_item_cpu(vector_r, matrix_A, H, HT, alpha)
y_i_cpu = vector_x
X[i] = y_i_cpu
bbox = list()
label = list()
score = list()
# skip cls_id = 0 because it is the background class
for l in range(1, self.n_class):
cls_bbox_l = raw_cls_bbox
prob_l = raw_prob[:, l]
mask = prob_l > self.score_thresh
cls_bbox_l = cls_bbox_l[mask]
prob_l = prob_l[mask]
keep = non_maximum_suppression(
cp.array(cls_bbox_l), self.nms_thresh, prob_l)
keep = cp.asnumpy(keep)
bbox.append(cls_bbox_l[keep])
# The labels are in [0, self.n_class - 2].
label.append((l - 1) * np.ones((len(keep),)))
score.append(prob_l[keep])
bbox = np.concatenate(bbox, axis=0).astype(np.float32)
label = np.concatenate(label, axis=0).astype(np.int32)
score = np.concatenate(score, axis=0).astype(np.float32)
return bbox, label, score