Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_dataset(dataset, data_shape):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(data_shape, data_shape))
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataset(dataset, data_shape):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(data_shape, data_shape), post_affine=get_post_transform)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.mixup:
from gluoncv.data.mixup import detection
train_dataset = detection.MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.75, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017')
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
Filter top proposals before NMS in testing of RPN.
rpn_test_post_nms : int, default is 300
Return top proposal results after NMS in testing of RPN.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = mask_rcnn_fpn_bn_mobilenet1_0_coco(pretrained=True)
>>> print(model)
"""
from ..mobilenet import mobilenet1_0
from ...data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
rcnn_max_dets = rpn_test_post_nms if rcnn_max_dets > rpn_test_post_nms else rcnn_max_dets
gluon_norm_kwargs = {'num_devices': num_devices} if num_devices >= 1 else {}
sym_norm_kwargs = {'ndev': num_devices} if num_devices >= 1 else {}
base_network = mobilenet1_0(pretrained=pretrained_base, norm_layer=SyncBatchNorm,
norm_kwargs=gluon_norm_kwargs, **kwargs)
features = FPNFeatureExpander(
network=base_network,
outputs=['relu6_fwd', 'relu10_fwd', 'relu22_fwd', 'relu26_fwd'],
num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=False, pretrained=pretrained_base,
norm_layer=mx.sym.contrib.SyncBatchNorm, norm_kwargs=sym_norm_kwargs)
top_features = None
box_features = nn.HybridSequential()
box_features.add(nn.AvgPool2D(pool_size=(3, 3), strides=2, padding=1)) # reduce to 7x7
box_features.add(nn.Conv2D(256, 3, padding=1),
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
def fcos_resnet50_v1_coco(pretrained=False, pretrained_base=True, **kwargs):
from ..resnet import resnet50_v1
from ...data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1(pretrained=pretrained_base, **kwargs)
features = RetinaFeatureExpander(network=base_network,
pretrained=pretrained_base,
outputs=['stage2_activation3',
'stage3_activation5',
'stage4_activation2'])
return get_fcos(name="resnet50_v1", dataset="coco", pretrained=pretrained,
features=features, classes=classes, base_stride=128, short=800,
max_size=1333, norm_layer=None, norm_kwargs=None,
valid_range=[(512, np.inf), (256, 512), (128, 256), (64, 128), (0, 64)],
nms_thresh=0.6, nms_topk=1000, save_topk=100)
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(
iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(
splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(
splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
else:
raise NotImplementedError(
'Dataset: {} not implemented.'.format(dataset))
if args.num_samples < 0:
args.num_samples = len(train_dataset)
if args.mixup:
from gluoncv.data import MixupDetection
train_dataset = MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
from gluoncv.data import COCODetection
from mobilenet import get_mobilenet
pretrained_base = False if pretrained else pretrained_base
base_net = get_mobilenet(
multiplier=1,
pretrained=pretrained_base,
num_sync_bn_devices=num_sync_bn_devices,
**kwargs)
stages = [base_net.features[:33],
base_net.features[33:69],
base_net.features[69:-2]]
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62,
45, 59, 119], [116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
classes = COCODetection.CLASSES
return get_yolov3(
'mobile', stages, [512, 256, 128], anchors, strides, classes, 'coco',
pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)