Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((100, 32)))
nclass = len(opt.alphabet) + 1
nc = 1
converter = utils.strLabelConverter(opt.alphabet)
criterion = CTCLoss()
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))
ngpu = int(opt.ngpu)
nh = int(opt.nh)
alphabet = opt.alphabet
nclass = len(alphabet) + 1
nc = 1
converter = utils.strLabelConverter(alphabet)
criterion = CTCLoss()
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if False:#opt.random_sample: use shuffle
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((100, 32)))
nclass = len(opt.alphabet) + 1
nc = 1
converter = utils.strLabelConverter(opt.alphabet)
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
if opt.experiment is None:
opt.experiment = 'expr'
os.system('mkdir {0}'.format(opt.experiment))
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((100, 32)))
nclass = len(opt.alphabet) + 1
nc = 1
def data_loader():
# train
train_dataset = dataset.lmdbDataset(root=args.trainroot)
assert train_dataset
if not params.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, params.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batchSize, \
shuffle=True, sampler=sampler, num_workers=int(params.workers), \
collate_fn=dataset.alignCollate(imgH=params.imgH, imgW=params.imgW, keep_ratio=params.keep_ratio))
# val
val_dataset = dataset.lmdbDataset(root=args.valroot, transform=dataset.resizeNormalize((params.imgW, params.imgH)))
assert val_dataset
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers))
return train_loader, val_loader
if i > n - 1:
i = 0
break
yield [
X, Y,
np.ones(batchs) * int(Length),
np.ones(batchs) * n_len
], np.ones(batchs)
if random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, batchSize)
else:
sampler = None
train_dataset = dataset.lmdbDataset(root=trainroot, target_transform=one_hot)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batchSize,
shuffle=True,
sampler=sampler,
num_workers=int(workers),
collate_fn=dataset.alignCollate(
imgH=imgH, imgW=imgW, keep_ratio=keep_ratio))
test_dataset = dataset.lmdbDataset(
root=valroot,
transform=dataset.resizeNormalize((imgW, imgH)),
target_transform=one_hot)
test_loader = torch.utils.data.DataLoader(
label = np.zeros(length)
for i, char in enumerate(text):
index = characters.find(char)
if index == -1:
index = characters.find(u' ')
if i < length:
label[i] = index
return label
# 导入数据
if random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, batchSize)
else:
sampler = None
train_dataset = dataset.lmdbDataset(root=trainroot, target_transform=one_hot)
# print(len(train_dataset))
test_dataset = dataset.lmdbDataset(
root=valroot,
transform=dataset.resizeNormalize((imgW, imgH)),
target_transform=one_hot)
# 生成训练用数据
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batchSize,
shuffle=True,
sampler=sampler,
num_workers=int(workers),
collate_fn=dataset.alignCollate(
imgH=imgH, imgW=imgW, keep_ratio=keep_ratio))
if __name__ == '__main__':
args = init_args()
manualSeed = random.randint(1, 10000) # fix seed
random.seed(manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
cudnn.benchmark = True
# store model path
if not os.path.exists('./expr'):
os.mkdir('./expr')
# read train set
train_dataset = dataset.lmdbDataset(root=args.trainroot)
assert train_dataset
if not params.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, params.batchSize)
else:
sampler = None
# images will be resize to 32*160
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=params.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(params.workers),
collate_fn=dataset.alignCollate(imgH=params.imgH, imgW=params.imgW, keep_ratio=params.keep_ratio))
# read test set
# images will be resize to 32*160
if opt.experiment is None:
opt.experiment = 'expr'
os.system('mkdir {0}'.format(opt.experiment))
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))
ngpu = int(opt.ngpu)
nh = int(opt.nh)
alphabet = opt.alphabet