Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
trg_vocab['stoi'] = load_vocab(opt.trg_vocab)
src_vocab['itos'] = invert_vocab(src_vocab['stoi'])
trg_vocab['itos'] = invert_vocab(trg_vocab['stoi'])
UNK = ''
SOS = ''
EOS = ''
PAD = ''
opt.enc_pad = src_vocab['stoi'][PAD]
opt.dec_sos = trg_vocab['stoi'][SOS]
opt.dec_eos = trg_vocab['stoi'][EOS]
opt.dec_pad = trg_vocab['stoi'][PAD]
opt.enc_ntok = len(src_vocab['stoi'])
opt.dec_ntok = len(trg_vocab['stoi'])
# load dataset for testing
test_dataset = dataset(opt.test_src, opt.test_trg)
test_iter = torch.utils.data.DataLoader(test_dataset, 1, shuffle=False, collate_fn=lambda x: zip(*x))
# create the model
model = getattr(model, opt.model)(opt).to(device)
state_dict = torch.load(os.path.join(opt.checkpoint, opt.name))
model.load_state_dict(state_dict)
model.eval()
def bleu_script(f):
ref_stem = opt.test_trg[0][:-1] + '*'
cmd = '{eval_script} {refs} {hyp}'.format(eval_script=opt.eval_script, refs=ref_stem, hyp=f)
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode > 0:
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes
def data_loader(image_root, data_list, shuffle=True, batch_size=64, workers=20, is_cuda=True, is_visualization=False):
kwargs = {'num_workers': workers, 'pin_memory': True} if is_cuda else {}
transform=transforms.Compose([
trans.person_crop(ratio=(1, 0.75),crop_type=1),\
trans.scale(size=(64, 128)),\
transforms.ToTensor()
])
preid = dataset.listDataset(
image_root,
data_list,
shuffle,
transform=transform,
is_visualization=is_visualization)
data_loader = torch.utils.data.DataLoader(preid,
batch_size=batch_size,
shuffle=True,
**kwargs)
return data_loader
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes
src_vocab['itos'] = invert_vocab(src_vocab['stoi'])
trg_vocab['itos'] = invert_vocab(trg_vocab['stoi'])
UNK = ''
SOS = ''
EOS = ''
PAD = ''
opt.enc_pad = src_vocab['stoi'][PAD]
opt.dec_sos = trg_vocab['stoi'][SOS]
opt.dec_eos = trg_vocab['stoi'][EOS]
opt.dec_pad = trg_vocab['stoi'][PAD]
opt.enc_ntok = len(src_vocab['stoi'])
opt.dec_ntok = len(trg_vocab['stoi'])
# load dataset for training and validation
train_dataset = dataset(opt.train_src, opt.train_trg, opt.src_max_len, opt.trg_max_len)
valid_dataset = dataset(opt.valid_src, opt.valid_trg)
train_iter = torch.utils.data.DataLoader(train_dataset, opt.batch_size, shuffle=True, num_workers=4, collate_fn=lambda x: zip(*x))
valid_iter = torch.utils.data.DataLoader(valid_dataset, 1, shuffle=False, collate_fn=lambda x: zip(*x))
# create the model
model = getattr(model, opt.model)(opt).to(device)
# initialize the parameters
for p in model.parameters():
p.data.uniform_(-0.1, 0.1)
if opt.name:
state_dict = torch.load(os.path.join(opt.checkpoint, opt.name))
model.load_state_dict(state_dict)
param_list = list(model.parameters())
param_group = param_list
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes