Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
parser.add_argument('--use-difficult', dest='use_difficult', type=bool, default=False,
help='use difficult ground-truths in evaluation')
parser.add_argument('--voc07', dest='use_voc07_metric', type=bool, default=True,
help='use PASCAL VOC 07 metric')
parser.add_argument('--deploy', dest='deploy_net', help='Load network from model',
action='store_true', default=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
# choose ctx
if args.cpu:
ctx = mx.cpu()
else:
ctx = [mx.gpu(int(i)) for i in args.gpu_id.split(',')]
# parse # classes and class_names if applicable
num_class = args.num_class
if len(args.class_names) > 0:
if os.path.isfile(args.class_names):
# try to open it to read class names
with open(args.class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in args.class_names.split(',')]
assert len(class_names) == num_class
for name in class_names:
assert len(name) > 0
else:
class_names = None
network = None if args.deploy_net else args.network
def test(self):
ctx=[gpu(0)]
plt.ion()
fig = plt.figure()
ax = []
for i in range(self.bs):
ax.append(fig.add_subplot(1,1,1+i))
#ax.append(fig.add_subplot(4,4,1+i))
t=time.time()
background_iter = image.ImageIter(self.bs, (3, self.h, self.w),
path_imgrec='/media/nolan/9fc64877-3935-46df-9ad0-c601733f5888/HP_31/sun2012_val.rec',
path_imgidx='/media/nolan/9fc64877-3935-46df-9ad0-c601733f5888/HP_31/sun2012_val.idx',
shuffle=True, pca_noise=0,
brightness=0.5, saturation=0.5, contrast=0.5, hue=1.0,
rand_crop=True, rand_resize=True, rand_mirror=True, inter_method=10
)
# TODO: check epoch_size for 'dist_sync'
epoch_size = args.num_examples / args.batch_size
model_args['begin_num_update'] = epoch_size * args.load_epoch
# save model
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
# data
(train, val) = data_loader(args, kv)
# train
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
if 'lr_factor' in args and args.lr_factor < 1:
model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
step = max(int(epoch_size * args.lr_factor_epoch), 1),
factor = args.lr_factor)
if 'clip_gradient' in args and args.clip_gradient is not None:
model_args['clip_gradient'] = args.clip_gradient
# disable kvstore for single device
dev_batch_size = args.dev_batch_size
task_name = args.task_name
lr = args.lr
epsilon = args.epsilon
accumulate = args.accumulate
log_interval = args.log_interval * accumulate if accumulate else args.log_interval
if accumulate:
logging.info('Using gradient accumulation. Effective batch size = ' \
'batch_size * accumulate = %d', accumulate * batch_size)
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
ctx = mx.cpu() if args.gpu is None else mx.gpu(args.gpu)
task = tasks[task_name]
# data type with mixed precision training
if args.dtype == 'float16':
amp.init()
# model and loss
only_inference = args.only_inference
model_name = args.bert_model
dataset = args.bert_dataset
pretrained_bert_parameters = args.pretrained_bert_parameters
model_parameters = args.model_parameters
# load symbolic model
deploy = args.deploy
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p] / 255.
Y = Y[p]
X_show = X[p]
# This is just to normalize the input and separate train set and test set
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
print("Data prepared.")
# Article's suggestion on batch size
batch_size = 200
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()
results = {}
for output in [mlp_svm_l2, mlp_svm_l1, mlp_softmax]:
print("\nTesting with %s \n" % output.name)
label = output.name + "_label"
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name=label)
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name=label)
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convenience
mod = mx.mod.Module(
def create_CNN_codes(CnnCodes, arg_params,aux_params):
'''
:param CnnCodes: symbol. orig conv net with some layer (probably just FC one) removed
:param arg_params: orig conv net weights, biases
:param aux_params: orig conv net batch norm params
'''
# create a module using CNNCodes only
orig_CNN_mod = mx.mod.Module(symbol=CnnCodes, label_names=None, context=mx.gpu())
orig_CNN_mod.bind(for_training=False, data_shapes=[('data', (1, 3, 224, 224))])
# init params, including the new net
orig_CNN_mod.init_params(initializer=mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
force_init=True)
# then set the orig net to orig params
orig_CNN_mod.set_params(arg_params, aux_params, allow_missing=True, force_init=True)
# get what the headless conv outputs, and the correct labels
train_outputs, train_labels = getHeadlessConvOutputs(orig_CNN_mod, path_imgrec=settings.RECORDIO_TRAIN_FILE)
test_outputs, test_labels = getHeadlessConvOutputs(orig_CNN_mod, path_imgrec=settings.RECORDIO_TEST_FILE)
return(train_outputs, train_labels,test_outputs, test_labels)
from mxgan import module, generator, encoder, viz
def ferr(label, pred):
pred = pred.ravel()
label = label.ravel()
return np.abs(label - (pred > 0.5)).sum() / label.shape[0]
ngf= 64
lr = 0.0003
beta1 = 0.5
batch_size = 100
rand_shape = (batch_size, 100)
num_epoch = 100
data_shape = (batch_size, 3, 32, 32)
context = mx.gpu()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
sym_gen = generator.dcgan32x32(oshape=data_shape, ngf=ngf, final_act="tanh")
sym_dec = encoder.dcgan(ngf=ngf / 2)
gmod = module.GANModule(
sym_gen,
sym_dec,
context=context,
data_shape=data_shape,
code_shape=rand_shape)
gmod.modG.init_params(mx.init.Normal(0.05))
gmod.modD.init_params(mx.init.Xavier(factor_type="in", magnitude=2.34))
gmod.init_optimizer(
optimizer="adam",
def get_train_context(num_cpus, num_gpus):
if num_gpus > 0:
return mx.gpu()
return mx.cpu()
def train(args):
np.random.seed(args.seed)
if args.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu(0)
# dataloader
transform = utils.Compose([utils.Scale(args.image_size),
utils.CenterCrop(args.image_size),
utils.ToTensor(ctx),
])
train_dataset = data.ImageFolder(args.dataset, transform)
train_loader = gluon.data.DataLoader(train_dataset, batch_size=args.batch_size,
last_batch='discard')
style_loader = utils.StyleLoader(args.style_folder, args.style_size, ctx=ctx)
print('len(style_loader):',style_loader.size())
# models
vgg = net.Vgg16()
utils.init_vgg_params(vgg, 'models', ctx=ctx)
style_model = net.Net(ngf=args.ngf)