Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
opt['val_partition'] = 'REDS4'
opt['num_frame'] = 5
opt['gt_size'] = 256
opt['interval_list'] = [1]
opt['random_reverse'] = True
opt['use_flip'] = True
opt['use_rot'] = True
opt['use_shuffle'] = True
opt['num_worker'] = 1
opt['batch_size'] = 16
opt['scale'] = 4
opt['dataset_enlarge_ratio'] = 1
mmcv.mkdir_or_exist('tmp')
dataset = create_dataset(opt)
data_loader = create_dataloader(dataset, opt, opt, None)
nrow = int(math.sqrt(opt['batch_size']))
padding = 2 if opt['phase'] == 'train' else 0
print('start...')
for i, data in enumerate(data_loader):
if i > 5:
break
print(i)
lq = data['lq']
gt = data['gt']
key = data['key']
opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub.lmdb'
opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb'
opt['io_backend'] = dict(type='lmdb')
opt['gt_size'] = 128
opt['use_flip'] = True
opt['use_rot'] = True
opt['use_shuffle'] = True
opt['num_worker'] = 1
opt['batch_size'] = 16
opt['scale'] = 4
opt['dataset_enlarge_ratio'] = 1
mmcv.mkdir_or_exist('tmp')
dataset = create_dataset(opt)
data_loader = create_dataloader(dataset, opt, opt, None)
nrow = int(math.sqrt(opt['batch_size']))
padding = 2 if opt['phase'] == 'train' else 0
print('start...')
for i, data in enumerate(data_loader):
if i > 5:
break
print(i)
lq = data['lq']
gt = data['gt']
lq_path = data['lq_path']
opt['io_backend'] = dict(type='lmdb')
opt['num_frame'] = 7
opt['gt_size'] = 256
opt['random_reverse'] = True
opt['use_flip'] = True
opt['use_rot'] = True
opt['use_shuffle'] = True
opt['num_worker'] = 1
opt['batch_size'] = 16
opt['scale'] = 4
opt['dataset_enlarge_ratio'] = 1
mmcv.mkdir_or_exist('tmp')
dataset = create_dataset(opt)
data_loader = create_dataloader(dataset, opt, opt, None)
nrow = int(math.sqrt(opt['batch_size']))
padding = 2 if opt['phase'] == 'train' else 0
print('start...')
for i, data in enumerate(data_loader):
if i > 5:
break
print(i)
lq = data['lq']
gt = data['gt']
key = data['key']
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.validate is not None:
cfg.validate = args.validate
if args.gpus is not None:
cfg.gpus = args.gpus
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mkdir_or_exist(cfg.work_dir)
# init logger before other step and setup training logger
logger = get_root_logger(cfg.work_dir, cfg.log_level)
logger.info("Using {} GPUs".format(cfg.gpus))
logger.info('Distributed training: {}'.format(distributed))
# log environment info
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info(args)
logger.info("Running with config:\n{}".format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}'.format(args.seed))
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('MMDetection Version: {}'.format(__version__))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
def save_result(result, out_dir, image_name):
result_tool = ShowResultTool()
result = result_tool(result, color_map='gray', bins=100)
if 'GrayDisparity' in result.keys():
grayEstDisp = result['GrayDisparity']
gray_save_path = osp.join(out_dir, 'disp_0')
mkdir_or_exist(gray_save_path)
skimage.io.imsave(osp.join(gray_save_path, image_name), (grayEstDisp * 256).astype('uint16'))
if 'ColorDisparity' in result.keys():
colorEstDisp = result['ColorDisparity']
color_save_path = osp.join(out_dir, 'color_disp')
mkdir_or_exist(color_save_path)
plt.imsave(osp.join(color_save_path, image_name), colorEstDisp, cmap=plt.cm.hot)
if 'GroupColor' in result.keys():
group_save_path = os.path.join(out_dir, 'group_disp')
mkdir_or_exist(group_save_path)
plt.imsave(osp.join(group_save_path, image_name), result['GroupColor'], cmap=plt.cm.hot)
if 'ColorConfidence' in result.keys():
conf_save_path = os.path.join(out_dir, 'confidence')
mkdir_or_exist(conf_save_path)
def prepare_visualize(result, epoch, work_dir, image_name):
result_tool = ShowResultTool()
result = result_tool(result, color_map='gray', bins=100)
mkdir_or_exist(os.path.join(work_dir, image_name))
save_path = os.path.join(work_dir, image_name, '{}.png'.format(epoch))
plt.imsave(save_path, result['GroupColor'], cmap=plt.cm.hot)
log_result = {}
for pred_item in result.keys():
log_name = image_name + '/' + pred_item
if pred_item == 'Disparity':
log_result['image/' + log_name] = result[pred_item]
if pred_item == 'GroundTruth':
log_result['image/' + log_name] = result[pred_item]
if pred_item == 'Confidence':
log_result['image/' + log_name] = result[pred_item]
# save confidence map
conf_save_path = os.path.join(work_dir, image_name, 'conf_{}.png'.format(epoch))
plt.imsave(conf_save_path, log_result['image/' + log_name][0].transpose((1, 2, 0)))