Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_transforms_presets_yolo():
im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +
'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg')
x, orig_img = yolo.load_test(im_fname, short=512)
x1, orig_img1 = yolo.transform_test(mx.image.imread(im_fname), short=512)
np.testing.assert_allclose(x.asnumpy(), x1.asnumpy())
np.testing.assert_allclose(orig_img, orig_img1)
if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')):
return
train_dataset = gcv.data.VOCDetection(splits=((2007, 'trainval'), (2012, 'trainval')))
val_dataset = gcv.data.VOCDetection(splits=[(2007, 'test')])
width, height = (512, 512)
net = gcv.model_zoo.get_model('yolo3_darknet53_voc', pretrained=False, pretrained_base=False)
net.initialize()
num_workers = 0
batch_size = 4
batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=-1) for _ in range(1)]))
train_loader = gluon.data.DataLoader(
train_dataset.transform(yolo.YOLO3DefaultTrainTransform(width, height, net)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = gluon.data.DataLoader(
val_dataset.transform(yolo.YOLO3DefaultValTransform(width, height)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
train_loader2 = gluon.data.DataLoader(
train_dataset.transform(yolo.YOLO3DefaultTrainTransform(width, height)),
batch_size, True, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers)
for loader in [train_loader, val_loader, train_loader2]:
def test_segmentation_utils():
ctx = mx.context.current_context()
import os
if not os.path.isdir(os.path.expanduser('~/.mxnet/datasets/voc')):
return
transform_fn = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225])
])
# get the dataset
# TODO FIXME: change it to ADE20K dataset and pretrained model
dataset = ADE20KSegmentation(split='val')
# load pretrained net
net = gluoncv.model_zoo.get_model('fcn_resnet50_ade', pretrained=True, ctx=ctx)
# count for pixAcc and mIoU
total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
np_inter, np_union, np_correct, np_label = 0, 0, 0, 0
tbar = tqdm(range(10))
for i in tbar:
img, mask = dataset[i]
# prepare data and make prediction
img = transform_fn(img)
img = img.expand_dims(0).as_in_context(ctx)
mask = mask.expand_dims(0)
pred = net.evaluate(img).as_in_context(mx.cpu(0))
# gcv prediction
correct1, labeled1 = batch_pix_accuracy(pred, mask)
inter1, union1 = batch_intersection_union(pred, mask, dataset.num_class)
total_correct += correct1
total_label += labeled1
def test_viz_network():
try:
import graphviz
for name in ['mobilenet1.0', 'resnet50_v1b']:
net = gcv.model_zoo.get_model(name, pretrained=True)
for shape in [(1, 3, 224, 224), (1, 3, 448, 448)]:
gcv.utils.viz.plot_network(net)
except ImportError:
pass
def test_ssd_reset_class():
ctx = mx.context.current_context()
x = mx.random.uniform(shape=(1, 3, 512, 544), ctx=ctx) # allow non-squre and larger inputs
model_name = 'ssd_300_vgg16_atrous_voc'
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(["bus", "car", "bird"], reuse_weights=["bus", "car", "bird"])
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(["bus", "car", "bird"], reuse_weights={"bus": "bus"})
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(["person", "car", "bird"], reuse_weights={"person": 14})
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(["person", "car", "bird"], reuse_weights={0: 14})
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(["person", "car", "bird"], reuse_weights={0: "person"})
test_classes = ['bird', 'bicycle', 'bus', 'car', 'cat']
test_classes_dict = dict(zip(test_classes, test_classes))
net = gcv.model_zoo.get_model(model_name, pretrained=True, ctx=ctx)
net.reset_class(test_classes, reuse_weights=test_classes_dict)
net(x)
def get_classif_model(model_name, use_tensorrt, ctx=mx.gpu(0), batch_size=128):
mx.contrib.tensorrt.set_use_fp16(False)
h, w = 32, 32
net = gluoncv.model_zoo.get_model(model_name, pretrained=True)
net.hybridize()
net.forward(mx.nd.zeros((batch_size, 3, h, w)))
net.export(model_name)
_sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, 0)
if use_tensorrt:
sym = _sym.get_backend_symbol('TensorRT')
mx.contrib.tensorrt.init_tensorrt_params(sym, arg_params, aux_params)
else:
sym = _sym
executor = sym.simple_bind(ctx=ctx, data=(batch_size, 3, h, w),
softmax_label=(batch_size,),
grad_req='null', force_rebind=True)
executor.copy_params_from(arg_params, aux_params)
return executor
# training contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# network
module_list = []
if args.use_fpn:
module_list.append('fpn')
net_name = '_'.join(('mask_rcnn', *module_list, args.network, args.dataset))
args.save_prefix += net_name
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(net_name, pretrained=True)
else:
net = gcv.model_zoo.get_model(net_name, pretrained=False)
net.load_parameters(args.pretrained.strip(), cast_dtype=True)
net.collect_params().reset_ctx(ctx)
# training data
val_dataset, eval_metric = get_dataset(args.dataset, args)
val_data = get_dataloader(
net, val_dataset, args.batch_size, args.num_workers)
# validation
if not args.eval_all:
names, values = validate(net, val_data, ctx, eval_metric, len(val_dataset))
for k, v in zip(names, values):
print(k, v)
else:
saved_models = glob.glob(args.save_prefix + '*.params')
for epoch, saved_model in enumerate(sorted(saved_models)):
pbar.update(len(ctx))
return eval_metric.get()
if __name__ == '__main__':
args = parse_args()
# training contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# network
net_name = '_'.join(('cascade_rfcn', args.network, args.dataset))
args.save_prefix += net_name
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(net_name, pretrained=True)
else:
net = gcv.model_zoo.get_model(net_name, pretrained=False)
net.load_parameters(args.pretrained.strip())
net.collect_params().reset_ctx(ctx)
# training data
val_dataset, eval_metric = get_dataset(args.dataset, args)
val_data = get_dataloader(
net, val_dataset, args.batch_size, args.num_workers)
# validation
if not args.eval_all:
current_map = 0
for i in range(10):
iou_thresh = 0.5 + 0.05*i
eval_metric = VOC07MApMetric(iou_thresh=iou_thresh, class_names=val_dataset.classes)
def convert(mxnet_name, torch_name):
# download and load the pre-trained model
net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True)
# create corresponding torch model
torch_net = create_model(torch_name)
mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k]
torchp = list(torch_net.named_parameters())
torch_params = {}
# convert parameters
# NOTE: we are relying on the fact that the order of parameters
# are usually exactly the same between these models, thus no key name mapping
# is necessary. Asserts will trip if this is not the case.
for (tn, tv), (mn, mv) in zip(torchp, mxp):
m_split = mn.split('_')
t_split = tn.split('.')
print(t_split, m_split)
# context list
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = [mx.cpu()] if not ctx else ctx
# grab some image if not specified
if not args.images.strip():
gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +
'gluoncv/detection/biking.jpg?raw=true', 'biking.jpg')
image_list = ['biking.jpg']
else:
image_list = [x.strip() for x in args.images.split(',') if x.strip()]
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(args.network, pretrained=True)
else:
net = gcv.model_zoo.get_model(args.network, pretrained=False)
net.load_parameters(args.pretrained)
net.set_nms(0.3, 200)
net.collect_params().reset_ctx(ctx)
for image in image_list:
x, img = presets.rcnn.load_test(image, short=net.short, max_size=net.max_size)
x = x.as_in_context(ctx[0])
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in net(x)]
masks, _ = gcv.utils.viz.expand_mask(masks, bboxes, (img.shape[1], img.shape[0]), scores)
img = gcv.utils.viz.plot_mask(img, masks)
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 1, 1)
ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids,
class_names=net.classes, ax=ax)
"""Script for export pre-trained models in GluonCV model zoo."""
from __future__ import print_function
import argparse
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
def parse_args():
parser = argparse.ArgumentParser("Export model helper.")
parser.add_argument('--model', '-m', required=True, type=str, help='Name of the model')
parser.add_argument('--no-preprocess', action='store_true', help='Do not include standard preprocess.')
args = parser.parse_args()
return args
args = parse_args()
net = gcv.model_zoo.get_model(args.model, pretrained=True)
gcv.utils.export_block(args.model, net, preprocess=(not args.no_preprocess), layout='HWC')
print('Done...')