Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
image_list = ['street.jpg']
else:
image_list = [x.strip() for x in args.images.split(',') if x.strip()]
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(args.network, pretrained=True)
else:
net = gcv.model_zoo.get_model(args.network, pretrained=False)
net.load_parameters(args.pretrained)
net.set_nms(0.3, 200)
ax = None
for image in image_list:
x, img = presets.rcnn.load_test(image, short=args.short, max_size=args.max_size)
ids, scores, bboxes = [xx.asnumpy() for xx in net(x)]
ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids,
class_names=net.classes, ax=ax)
plt.show()
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(
epoch, (time.time() - tic), msg))
if not (epoch + 1) % args.val_interval:
# consider reduce the frequency of validation to save time
result = validate(net, val_data, ctx, eval_metric, args)
else:
current_map = 0.
save_params(net, logger, best_map, current_map, epoch, args.save_interval, args.save_prefix)
if __name__ == '__main__':
args = parse_args()
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(args.seed)
# training contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# network
module_list = []
if args.use_fpn:
module_list.append('fpn')
net_name = '_'.join(('panoptic', *module_list, args.network, args.dataset))
args.save_prefix += net_name
net = get_model(net_name, pretrained_base=True)
if args.resume.strip():
net.load_parameters(args.resume.strip())
else:
##################################################################
# Take bounding boxes by slice columns from 0 to 4
bounding_boxes = train_label[:, :4]
print('Num of objects:', bounding_boxes.shape[0])
print('Bounding boxes (num_boxes, x_min, y_min, x_max, y_max):\n',
bounding_boxes)
##################################################################
# take class ids by slice the 5th column
class_ids = train_label[:, 4:5]
print('Class IDs (num_boxes, ):\n', class_ids)
##################################################################
# Visualize image, bounding boxes
utils.viz.plot_bbox(train_image.asnumpy(), bounding_boxes, scores=None,
labels=class_ids, class_names=train_dataset.classes)
plt.show()
import argparse, time, logging, os, sys, math
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv.data import imagenet
from gluoncv.utils import makedirs, LRSequential, LRScheduler
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from efficientnet_model import get_efficientnet, get_efficientnet_lite
# CLI
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='training and validation pictures to use.')
parser.add_argument('--rec-train', type=str, default='~/.mxnet/datasets/imagenet/rec/train.rec',
help='the training data')
parser.add_argument('--rec-train-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/train.idx',
help='the index of training data')
parser.add_argument('--rec-val', type=str, default='~/.mxnet/datasets/imagenet/rec/val.rec',
from __future__ import division
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import argparse
import glob
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import mxnet as mx
from tqdm import tqdm
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv import data as gdata
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import MaskRCNNDefaultValTransform
from gluoncv.utils.metrics.coco_instance import COCOInstanceMetric
def parse_args():
parser = argparse.ArgumentParser(description='Validate Mask RCNN networks.')
parser.add_argument('--network', type=str, default='resnet50_v1b',
help="Base feature extraction network name")
parser.add_argument('--dataset', type=str, default='coco',
help='Training dataset.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
to install ``MXNet`` and ``GluonCV`` if you haven't done so yet.
"""
import matplotlib.pyplot as plt
from mxnet import gluon, nd, image
from mxnet.gluon.data.vision import transforms
from gluoncv import utils
from gluoncv.model_zoo import get_model
################################################################
#
# Then, we download and show the example image:
url = 'https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/classification/plane-draw.jpeg'
im_fname = utils.download(url)
img = image.imread(im_fname)
plt.imshow(img.asnumpy())
plt.show()
################################################################
# In case you don't recognize it, the image is a poorly-drawn airplane :)
#
# Now we define transformations for the image.
transform_fn = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
import argparse, time, logging, os
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv.data import imagenet
from gluoncv.loss import MixSoftmaxCrossEntropyLoss
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler, LRSequential
# CLI
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='training and validation pictures to use.')
parser.add_argument('--rec-train', type=str, default='~/.mxnet/datasets/imagenet/rec/train.rec',
help='the training data')
parser.add_argument('--rec-train-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/train.idx',
help='the index of training data')
parser.add_argument('--rec-val', type=str, default='~/.mxnet/datasets/imagenet/rec/val.rec',
help='the validation data')
def init_default_train_args(batch_size, net, epochs, iters_per_epoch):
train_args = {}
base_lr = 0.1 * batch_size / 256
lr_scheduler = gcv.utils.LRScheduler('cosine', base_lr=base_lr, target_lr=0.0001,
nepochs=epochs, iters_per_epoch=iters_per_epoch)
optimizer_params = {'wd': 1e-4, 'momentum': 0.9, 'lr_scheduler': lr_scheduler}
train_args['trainer'] = gluon.Trainer(net.collect_params(), 'sgd', optimizer_params)
train_args['batch_size'] = batch_size
train_args['criterion'] = gluon.loss.SoftmaxCrossEntropyLoss()
return train_args
from __future__ import division
import argparse, time, logging, os, math
from tqdm import tqdm
import numpy as np
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv.data import mscoco
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler, LRSequential
from gluoncv.data.transforms.presets.alpha_pose import AlphaPoseDefaultTrainTransform
from gluoncv.utils.metrics import HeatmapAccuracy
from validate_tools import get_val_data_loader, validate
# CLI
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--dataset', type=str, default='coco',
help='training dataset to use, if you use custom dataset, check get_dataset function.')
parser.add_argument('--num-joints', type=int, required=True,
help='Number of joints to detect')
parser.add_argument('--batch-size', type=int, default=32,
help='training batch size per device (CPU/GPU).')