Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_adadelta_optimizer(caplog):
"""Unit test of Adadelta optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adadelta"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Adadelta setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert isequal(
emmental_learner.optimizer.defaults,
{"lr": 0.001, "rho": 0.9, "eps": 1e-06, "weight_decay": 0},
)
# Test new Adadelta setting
config = {
def test_rms_prop_optimizer(caplog):
"""Unit test of RMSprop optimizer."""
caplog.set_level(logging.INFO)
optimizer = "rms_prop"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default RMSprop setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"alpha": 0.99,
"eps": 1e-08,
"momentum": 0,
"centered": False,
"weight_decay": 0,
def test_asgd_optimizer(caplog):
"""Unit test of ASGD optimizer."""
caplog.set_level(logging.INFO)
optimizer = "asgd"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default ASGD setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert isequal(
emmental_learner.optimizer.defaults,
{
"lr": 0.001,
"lambd": 0.0001,
"alpha": 0.75,
"t0": 1_000_000.0,
def test_plateau_scheduler(caplog):
"""Unit test of plateau scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "plateau"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"plateau_config": {
"metric": "model/train/all/loss",
"mode": "min",
"factor": 0.1,
"patience": 1,
"threshold": 0.0001,
def test_adam_optimizer(caplog):
"""Unit test of Adam optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adam"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Adam setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"amsgrad": False,
"weight_decay": 0,
}
def test_sgd_optimizer(caplog):
"""Unit test of SGD optimizer."""
caplog.set_level(logging.INFO)
optimizer = "sgd"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default SGD setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"momentum": 0,
"dampening": 0,
"nesterov": False,
"weight_decay": 0.0,
}
def test_exponential_scheduler(caplog):
"""Unit test of exponential scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "exponential"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test step per batch
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"exponential_config": {"gamma": 0.1},
},
}
}
emmental.Meta.update_config(config)
def test_bert_adam_optimizer(caplog):
"""Unit test of BertAdam optimizer."""
caplog.set_level(logging.INFO)
optimizer = "bert_adam"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default BertAdam setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0.0,
}
def test_linear_scheduler(caplog):
"""Unit test of linear scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "linear"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test per batch
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {"lr_scheduler": lr_scheduler},
}
}
emmental.Meta.update_config(config)
emmental_learner.n_batches_per_epoch = 1
emmental_learner._set_optimizer(model)
emmental_learner._set_lr_scheduler(model)
def test_cyclic_scheduler(caplog):
"""Unit test of cyclic scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "cyclic"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"cyclic_config": {
"base_lr": 10,
"base_momentum": 0.8,
"cycle_momentum": True,
"gamma": 1.0,
"last_epoch": -1,