Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ChainBiMapper(
first_layer=BiRecurrentMapper(GruCellSpec(80)),
second_layer=BiRecurrentMapper(GruCellSpec(80))
),
aggregate="sum"
)
)
with open(__file__, "r") as f:
notes = f.read()
corpus = SquadCorpus()
train_batching = ClusteredBatcher(45, ContextLenBucketedKey(3), True, False)
eval_batching = ClusteredBatcher(45, ContextLenKey(), False, False)
data = DocumentQaTrainingData(corpus, None, train_batching, eval_batching)
eval = [LossEvaluator(), BoundedSquadSpanEvaluator(bound=[17])]
trainer.start_training(data, model, train_params, eval, trainer.ModelDir(out), notes, False)
aggregate="sum"
)
)
with open(__file__, "r") as f:
notes = f.read()
train_batching = ClusteredBatcher(45, ContextLenBucketedKey(3), True, False)
eval_batching = ClusteredBatcher(45, ContextLenKey(), False, False)
data = PreprocessedData(SquadCorpus(),
TagTextAnswers(),
ParagraphAndQuestionDatasetBuilder(train_batching, eval_batching),
# sample=20, sample_dev=20,
eval_on_verified=False)
data.preprocess()
eval = [LossEvaluator(), BoundedSquadSpanEvaluator(bound=[17])]
trainer.start_training(data, model, train_params, eval, model_dir.ModelDir(out), notes, False)
first_layer=recurrent_layer,
second_layer=recurrent_layer
),
span_predictor=BoundedSpanPredictor(20)
)
)
with open(__file__, "r") as f:
notes = f.read()
corpus = SquadCorpus()
train_batching = ClusteredBatcher(45, ContextLenBucketedKey(3), True, False)
eval_batching = ClusteredBatcher(45, ContextLenKey(), False, False)
data = DocumentQaTrainingData(corpus, None, train_batching, eval_batching)
eval = [LossEvaluator(), BoundedSquadSpanEvaluator(bound=[17])]
trainer.start_training(data, model, train_params, eval, model_dir.ModelDir(out), notes)
WithProjectedProduct(include_tiled=True),
ChainBiMapper(
first_layer=recurrent_layer,
second_layer=recurrent_layer
),
IndependentBoundsJointLoss()
)
)
with open(__file__, "r") as f:
notes = f.read()
train_batching = ClusteredBatcher(45, ContextLenBucketedKey(3), True, False)
eval_batching = ClusteredBatcher(45, ContextLenKey(), False, False)
data = DocumentQaTrainingData(SquadCorpus(), None, train_batching, eval_batching)
eval = [LossEvaluator(), SpanProbability(), BoundedSquadSpanEvaluator(bound=[17])]
trainer.start_training(data, model, train_params, eval, model_dir.ModelDir(out), notes, False)