Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
xp = device.xp
device.use()
model, vocab = memnn.load_model(args.MODEL)
model.to_device(device)
network = model.predictor
max_memory = network.max_memory
id_to_vocab = {i: v for v, i in vocab.items()}
test_data = babi.read_data(vocab, args.DATA)
print('Test data: %s: %d' % (args.DATA, len(test_data)))
sentence_len = max(max(len(s.sentence) for s in story)
for story in test_data)
correct = total = 0
for story in test_data:
mem = xp.zeros((max_memory, sentence_len), dtype=numpy.int32)
i = 0
for sent in story:
if isinstance(sent, babi.Sentence):
if i == max_memory:
mem[0:i - 1, :] = mem[1:i, :]
i -= 1
mem[i, 0:len(sent.sentence)] = xp.asarray(sent.sentence)
i += 1
elif isinstance(sent, babi.Query):
help='Maximum number of memory')
parser.add_argument('--sentence-repr',
choices=['bow', 'pe'], default='bow',
help='Sentence representation. '
'Select from BoW ("bow") or position encoding ("pe")')
args = parser.parse_args()
vocab = collections.defaultdict(lambda: len(vocab))
vocab[''] = 0
for data_id in six.moves.range(1, 21):
train_data = babi.read_data(
vocab,
glob.glob('%s/qa%d_*train.txt' % (args.data, data_id))[0])
test_data = babi.read_data(
vocab,
glob.glob('%s/qa%d_*test.txt' % (args.data, data_id))[0])
print('Training data: %d' % len(train_data))
train_data = convert_data(train_data, args.max_memory)
test_data = convert_data(test_data, args.max_memory)
if args.sentence_repr == 'bow':
encoder = BoWEncoder()
elif args.sentence_repr == 'pe':
encoder = PositionEncoder()
else:
print('Unknonw --sentence-repr option: "%s"' % args.sentence_repr)
sys.exit(1)
memnn = MemNN(args.unit, len(vocab), encoder, args.max_memory, args.hop)
def train(train_data_path, test_data_path, args):
device = chainer.get_device(args.device)
device.use()
vocab = collections.defaultdict(lambda: len(vocab))
vocab[''] = 0
train_data = babi.read_data(vocab, train_data_path)
test_data = babi.read_data(vocab, test_data_path)
print('Training data: %s: %d' % (train_data_path, len(train_data)))
print('Test data: %s: %d' % (test_data_path, len(test_data)))
train_data = memnn.convert_data(train_data, args.max_memory)
test_data = memnn.convert_data(test_data, args.max_memory)
encoder = memnn.make_encoder(args.sentence_repr)
network = memnn.MemNN(
args.unit, len(vocab), encoder, args.max_memory, args.hop)
model = chainer.links.Classifier(network, label_key='answer')
opt = chainer.optimizers.Adam()
model.to_device(device)
opt.setup(model)
parser.add_argument('--hop', '-H', type=int, default=3,
help='Number of hops')
parser.add_argument('--max-memory', type=int, default=50,
help='Maximum number of memory')
parser.add_argument('--sentence-repr',
choices=['bow', 'pe'], default='bow',
help='Sentence representation. '
'Select from BoW ("bow") or position encoding ("pe")')
args = parser.parse_args()
vocab = collections.defaultdict(lambda: len(vocab))
vocab[''] = 0
for data_id in six.moves.range(1, 21):
train_data = babi.read_data(
vocab,
glob.glob('%s/qa%d_*train.txt' % (args.data, data_id))[0])
test_data = babi.read_data(
vocab,
glob.glob('%s/qa%d_*test.txt' % (args.data, data_id))[0])
print('Training data: %d' % len(train_data))
train_data = convert_data(train_data, args.max_memory)
test_data = convert_data(test_data, args.max_memory)
if args.sentence_repr == 'bow':
encoder = BoWEncoder()
elif args.sentence_repr == 'pe':
encoder = PositionEncoder()
else:
print('Unknonw --sentence-repr option: "%s"' % args.sentence_repr)
def train(train_data_path, test_data_path, args):
device = chainer.get_device(args.device)
device.use()
vocab = collections.defaultdict(lambda: len(vocab))
vocab[''] = 0
train_data = babi.read_data(vocab, train_data_path)
test_data = babi.read_data(vocab, test_data_path)
print('Training data: %s: %d' % (train_data_path, len(train_data)))
print('Test data: %s: %d' % (test_data_path, len(test_data)))
train_data = memnn.convert_data(train_data, args.max_memory)
test_data = memnn.convert_data(test_data, args.max_memory)
encoder = memnn.make_encoder(args.sentence_repr)
network = memnn.MemNN(
args.unit, len(vocab), encoder, args.max_memory, args.hop)
model = chainer.links.Classifier(network, label_key='answer')
opt = chainer.optimizers.Adam()
model.to_device(device)
opt.setup(model)