Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
dataset = NDG(args.val, args.v_land)
if dataset.have_landmarks:
log.info('Use alignment for the train data')
dataset.transform = t.Compose([Rescale((48, 48)), ToTensor(switch_rb=True)])
else:
exit()
val_loader = DataLoader(dataset, batch_size=args.val_batch_size, num_workers=4, shuffle=False, pin_memory=True)
model = models_landmarks['landnet']()
assert args.snapshot is not None
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
log.info('Testing snapshot ' + args.snapshot + ' ...')
model = load_model_state(model, args.snapshot, args.device, eval_state=True)
model.eval()
cudnn.benchmark = True
model = torch.nn.DataParallel(model, device_ids=[args.device], )
log.info('Face landmarks model:')
log.info(model)
avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
log.info('Avg RMSE error: {}'.format(avg_err))
log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
log.info('Failure rate: {}'.format(failures_rate))
if args.compr_config and "sparsity_level" in compression_algo.statistics():
log_path = './logs/{:%Y_%m_%d_%H_%M}_{}'.format(datetime.datetime.now(), args.snap_prefix)
writer = SummaryWriter(log_path)
if not osp.exists(args.snap_folder):
os.mkdir(args.snap_folder)
model = models_backbones[args.model](embedding_size=args.embed_size,
num_classes=dataset.get_num_classes(), feature=False)
set_dropout_fn = model.set_dropout_ratio
compression_algo = None
if args.snap_to_resume is not None:
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
model = load_model_state(model, args.snap_to_resume, args.devices[0], eval_state=False)
model = torch.nn.DataParallel(model, device_ids=args.devices)
else:
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
model.cuda()
model.train()
cudnn.benchmark = True
if args.to_onnx is not None:
if args.compr_config:
compression_algo.export_model(args.to_onnx)
else:
model = model.eval().cpu()
parser.add_argument('-c', '--compr_config', help='Path to a file with compression parameters', required=False)
args = parser.parse_args()
if args.engine == 'pt':
assert args.snap is not None, 'To evaluate PyTorch snapshot, please, specify --snap option.'
if args.compr_config:
patch_torch_operators()
with torch.cuda.device(args.devices[0]):
data, embeddings_fun = load_test_dataset(args)
model = models_backbones[args.model](embedding_size=args.embed_size, feature=True)
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
model = load_model_state(model, args.snap, args.devices[0])
evaluate(args, data, model, embeddings_fun, args.val_batch_size, args.dump_embeddings,
args.roc_fname, args.snap, True, args.show_failed)
if args.compr_config and "sparsity_level" in compression_algo.statistics():
log.info("Sparsity level: {0:.2f}".format(
compression_algo.statistics()['sparsity_rate_for_sparsified_modules']))
else:
from utils.ie_tools import load_ie_model
assert args.fr_model is not None, 'To evaluate IE model, please, specify --fr_model option.'
fr_model = load_ie_model(args.fr_model, 'CPU', args.plugin_dir)
lm_model = None
if args.lm_model:
def create_model(config):
ssd_net = build_ssd(config.model, config.ssd_params, config.input_sample_size[-1], config.num_classes, config)
ssd_net.to(config.device)
compression_algo = create_compression_algorithm(ssd_net, config)
ssd_net = compression_algo.model
weights = config.get('weights')
if weights:
sd = torch.load(weights, map_location='cpu')
load_state(ssd_net, sd)
ssd_net.train()
model, _ = prepare_model_for_execution(ssd_net, config)
return compression_algo, model