Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def roi_align(feature, rois, pooled_h, pooled_w,
spatial_scale, sampling_ratio=2):
ctx = MakeContext(inputs=[feature])
key = 'torch.ops.roi_align/{}:{}/pool_h:{}/pool_w:{}/' \
'spatial_scale:{}/sampling_ratio:{}'.format(
ctx[0], ctx[1], pooled_h, pooled_w, spatial_scale, sampling_ratio)
module = get_module(RoIAlign, key, ctx, pooled_h=pooled_h,
pooled_w=pooled_w, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
return module.forward(feature, rois)
def _log(input, out=None):
ctx = MakeContext(inputs=[input])
key = 'torch.ops.log/{}:{}'.format(ctx[0], ctx[1])
module = get_module(Log, key, ctx)
return module.forward(input, out)
def _exp(input, out=None):
ctx = MakeContext(inputs=[input])
key = 'torch.ops.exp/{}:{}'.format(ctx[0], ctx[1])
module = get_module(Exp, key, ctx)
return module.forward(input, out)
def _indexing(input, starts, sizes):
n_starts, n_sizes = len(starts), len(sizes)
ctx = MakeContext(inputs=[input])
key = 'torch.ops.indexing/{}:{}/n_starts:{}/n_sizes:{}'.format(
ctx[0], ctx[1], n_starts, n_sizes)
module = get_module(Indexing, key, ctx, n_starts=n_starts, n_sizes=n_sizes)
return module.forward(input, starts, sizes)
The sizes indicating the shape of the output tensor.
out : dragon.vm.torch.Tensor
The optional output tensor.
Returns
-------
vm.torch.FloatTensor
The output tensor.
"""
arguments = {'mean': 0.0, 'std': 1.0, 'dims': sizes}
out = kwargs['out'] if 'out' in kwargs else None
if out is None:
out = LeafTensor(sizes, requires_grad=kwargs['requires_grad'] \
if 'requires_grad' in kwargs else False)
inputs = []; outputs = [out]; ctx = MakeContext(inputs, outputs)
meta = ('ONCE', 'RandomNormal', ctx)
return RunOperator(inputs, outputs, meta, **arguments)
def _reduce(input, operation, dim=None, keepdim=False, out=None):
ctx = MakeContext(inputs=[input])
if dim is None: keepdim = False
key = 'torch.ops.{}/{}:{}/dim:{}/keepdim:{}'.format(operation.lower(),
ctx[0], ctx[1], dim, int(keepdim))
module = get_module(Reduce, key, ctx,
operation=operation, dim=dim, keepdim=keepdim)
return module.forward(input, out)
def _resize_2d(input, op_type, dsize, fx, fy):
if dsize is None:
if fx < 0 or fy < 0:
raise ValueError('Set fx and fy if dsize is None.')
else:
if len(dsize) != 2:
raise ValueError('The dsize should be a list with 2 elements.')
if dsize is None and (fy == -1.0 or fx == -1.0):
raise RuntimeError('The dsize, fx/fy should be specified either.')
ctx = MakeContext(inputs=[input])
key = 'torch.ops.{}/{}:{}/dsize:{}/fx:{}/fy:{}'.format(
op_type.lower(), ctx[0], ctx[1], '2' if dsize else 'none', fx, fy)
module = get_module(Resize2d, key, ctx,
op_type=op_type, dsize=dsize, fx=fx, fy=fy)
return module.forward(input, dsize)
def _arg_reduce(input, operation, dim=None, keepdim=False, top_k=1, out=None):
ctx = MakeContext(inputs=[input])
if dim is None: keepdim = False
key = 'torch.ops.{}/{}:{}/dim:{}/keepdim:{}/top_k:{}'.format(operation.lower(),
ctx[0], ctx[1], dim, int(keepdim), top_k)
module = get_module(ArgReduce, key, ctx, operation=operation,
axis=dim, keepdim=keepdim, top_k=top_k)
return module.forward(input, out)
def _repeat(input, times):
ctx = MakeContext(inputs=[input]); n_times = len(times)
key = 'torch.ops.repeat/{}:{}/n_times:{}'.format(ctx[0], ctx[1], n_times)
module = get_module(Repeat, key, ctx, n_times=n_times)
return module.forward(input, times)
def _update(param, grad, op_type, slot,
lr_mult=1.0, decay_mult=1.0):
ctx = MakeContext(inputs=[param])
key = 'torch.ops.{}/{}:{}/{}/{}'.format(op_type.lower(),
ctx[0], ctx[1], slot, param.name)
module = get_module(Update, key, ctx, op_type=op_type,
lr_mult=lr_mult, decay_mult=decay_mult, slot=slot)
return module.forward(param, grad)