/
test_adamw_optimizer.py
65 lines (53 loc) · 1.56 KB
/
test_adamw_optimizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
"""Emmental AdamW optimizer unit tests."""
import logging
import shutil
from torch import nn as nn
import emmental
from emmental import Meta
from emmental.learner import EmmentalLearner
logger = logging.getLogger(__name__)
def test_adamw_optimizer(caplog):
"""Unit test of AdamW optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adamw"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default AdamW setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"amsgrad": False,
"weight_decay": 0,
}
# Test new AdamW setting
config = {
"learner_config": {
"optimizer_config": {
"optimizer": optimizer,
"lr": 0.02,
"l2": 0.05,
f"{optimizer}_config": {
"betas": (0.9, 0.99),
"eps": 1e-05,
"amsgrad": True,
},
}
}
}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.02,
"betas": (0.9, 0.99),
"eps": 1e-05,
"amsgrad": True,
"weight_decay": 0.05,
}
shutil.rmtree(dirpath)