Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_dataset(dataset, args):
if dataset.lower() == 'coco':
train_dataset = gdata.COCOInstance(splits='instances_train2017')
val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
starting_id = 0
if args.horovod and MPI:
length = len(val_dataset)
shard_len = length // hvd.size()
rest = length % hvd.size()
# Compute the start index for this partition
starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
use_ext=args.use_ext, starting_id=starting_id)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.horovod and MPI:
val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
return train_dataset, val_dataset, val_metric
epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2))
btic = time.time()
#############################################################################################
# Save finetuned weights to disk
net.save_parameters('ssd_512_mobilenet1.0_pikachu.params')
#############################################################################################
# Predict with finetuned model
# ----------------------------
# We can test the performance using finetuned weights
test_url = 'https://raw.githubusercontent.com/zackchase/mxnet-the-straight-dope/master/img/pikachu.jpg'
download(test_url, 'pikachu_test.jpg')
net = gcv.model_zoo.get_model('ssd_512_mobilenet1.0_custom', classes=classes, pretrained_base=False)
net.load_parameters('ssd_512_mobilenet1.0_pikachu.params')
x, image = gcv.data.transforms.presets.ssd.load_test('pikachu_test.jpg', 512)
cid, score, bbox = net(x)
ax = viz.plot_bbox(image, bbox[0], score[0], cid[0], class_names=classes)
plt.show()
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017')
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.mixup:
from gluoncv.data.mixup import detection
train_dataset = detection.MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(
iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(
splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(
splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
else:
raise NotImplementedError(
'Dataset: {} not implemented.'.format(dataset))
def get_dali_dataset(dataset_name, devices, args):
if dataset_name.lower() == "coco":
# training
expanded_file_root = os.path.expanduser(args.dataset_root)
coco_root = os.path.join(expanded_file_root,
'coco',
'train2017')
coco_annotations = os.path.join(expanded_file_root,
'coco',
'annotations',
'instances_train2017.json')
if args.horovod:
train_dataset = [gdata.COCODetectionDALI(num_shards=hvd.size(), shard_id=hvd.rank(), file_root=coco_root,
annotations_file=coco_annotations, device_id=hvd.local_rank())]
else:
train_dataset = [gdata.COCODetectionDALI(num_shards= len(devices), shard_id=i, file_root=coco_root,
annotations_file=coco_annotations, device_id=i) for i, _ in enumerate(devices)]
# validation
if (not args.horovod or hvd.rank() == 0):
val_dataset = gdata.COCODetection(root=os.path.join(args.dataset_root + '/coco'),
splits='instances_val2017',
skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
else:
val_dataset = None
val_metric = None
else:
raise NotImplementedError('Dataset: {} not implemented with DALI.'.format(dataset_name))
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = VOCLike(root='D:\VOCdevkit', splits=[(2028, 'trainval')])
val_dataset = VOCLike(root='D:\VOCdevkit', splits=[(2028, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.num_samples < 0:
args.num_samples = len(train_dataset)
if args.mixup:
from gluoncv.data import MixupDetection
train_dataset = MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
det_score = det_score[i].asnumpy()
det_mask = det_mask[i].asnumpy()
det_info = det_info[i].asnumpy()
# filter by conf threshold
im_height, im_width, im_scale = det_info
valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid] / im_scale
det_mask = det_mask[valid]
# fill full mask
im_height, im_width = int(round(im_height / im_scale)), int(
round(im_width / im_scale))
full_masks = []
for bbox, mask in zip(det_bbox, det_mask):
full_masks.append(gdata.transforms.mask.fill(mask, bbox, (im_width, im_height)))
full_masks = np.array(full_masks)
eval_metric.update(det_bbox, det_id, det_score, full_masks)
return eval_metric.get()
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.75, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric