Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_adamax_optimizer(caplog):
"""Unit test of Adamax optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adamax"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Adamax setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0,
}
# Test new Adamax setting
config = {
"learner_config": {
def test_logging_manager_tensorboard(caplog):
"""Unit test of logging_manager (tensorboard)."""
caplog.set_level(logging.INFO)
emmental.init()
Meta.update_config(
config={
"logging_config": {
"counter_unit": "epoch",
"evaluation_freq": 1,
"checkpointing": False,
"checkpointer_config": {"checkpoint_freq": 2},
"writer_config": {"writer": "tensorboard"},
}
}
)
logging_manager = LoggingManager(n_batches_per_epoch=2)
logging_manager.update(5)
def test_logging_manager_wrong_counter_unit(caplog):
"""Unit test of logging_manager (wrong counter_unit)."""
caplog.set_level(logging.INFO)
emmental.init()
Meta.update_config(
config={
"logging_config": {
"counter_unit": "epochs",
"evaluation_freq": 1,
"checkpointing": False,
"checkpointer_config": {"checkpoint_freq": 2},
}
}
)
with pytest.raises(ValueError):
logging_manager = LoggingManager(n_batches_per_epoch=2)
logging_manager.update(5)
def test_adamw_optimizer(caplog):
"""Unit test of AdamW optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adamw"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default AdamW setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"amsgrad": False,
"weight_decay": 0,
}
# Test new AdamW setting
config = {
def test_e2e(caplog):
"""Run an end-to-end test."""
caplog.set_level(logging.INFO)
dirpath = "temp_test_e2e"
use_exact_log_path = False
Meta.reset()
emmental.init(dirpath, use_exact_log_path=use_exact_log_path)
config = {
"meta_config": {"seed": 0},
"learner_config": {
"n_epochs": 3,
"optimizer_config": {"lr": 0.01, "grad_clip": 100},
},
"logging_config": {
"counter_unit": "epoch",
"evaluation_freq": 1,
"writer_config": {"writer": "tensorboard", "verbose": True},
"checkpointing": True,
"checkpointer_config": {
"checkpoint_path": None,
"checkpoint_freq": 1,
"checkpoint_metric": {"model/all/train/loss": "min"},
def test_sparse_adam_optimizer(caplog):
"""Unit test of SparseAdam optimizer."""
caplog.set_level(logging.INFO)
optimizer = "sparse_adam"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default SparseAdam setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
}
# Test new SparseAdam setting
config = {
"learner_config": {
"optimizer_config": {
def test_logging_manager_no_writer(caplog):
"""Unit test of logging_manager (no writer)."""
caplog.set_level(logging.INFO)
emmental.init()
Meta.update_config(
config={
"logging_config": {
"counter_unit": "epoch",
"evaluation_freq": 1,
"checkpointing": False,
"checkpointer_config": {"checkpoint_freq": 2},
"writer_config": {"writer": None},
}
}
)
logging_manager = LoggingManager(n_batches_per_epoch=2)
logging_manager.update(5)
def test_r_prop_optimizer(caplog):
"""Unit test of Rprop optimizer."""
caplog.set_level(logging.INFO)
optimizer = "r_prop"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Rprop setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"etas": (0.5, 1.2),
"step_sizes": (1e-06, 50),
}
# Test new Rprop setting
config = {
"learner_config": {
"optimizer_config": {
tp_len = len(TP)
fp_len = len(FP)
fn_len = len(FN)
prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
logger.info(f"prec: {prec}")
logger.info(f"rec: {rec}")
logger.info(f"f1: {f1}")
assert f1 > 0.7
# Testing LSTM
emmental.Meta.reset()
emmental.init(fonduer.Meta.log_path)
emmental.Meta.update_config(config=config)
tasks = create_task(ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model="LSTM")
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader])
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_pred = [test_cands[0][_] for _ in positive[0]]
def test_exponential_scheduler(caplog):
"""Unit test of exponential scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "exponential"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test step per batch
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"exponential_config": {"gamma": 0.1},
},
}
}
emmental.Meta.update_config(config)
emmental_learner.n_batches_per_epoch = 1
emmental_learner._set_optimizer(model)
emmental_learner._set_lr_scheduler(model)