-
Notifications
You must be signed in to change notification settings - Fork 2
/
imbsam.py
89 lines (80 loc) · 2.8 KB
/
imbsam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
from collections import defaultdict
class SAM():
def __init__(self, optimizer, model, rho=0.05):
self.optimizer = optimizer
self.model = model
self.rho = rho
self.state = defaultdict(dict)
@torch.no_grad()
def first_step(self):
grads = []
for n, p in self.model.named_parameters():
if p.grad is None:
continue
grads.append(torch.norm(p.grad, p=2))
grad_norm = torch.norm(torch.stack(grads), p=2) + 1.e-16
for n, p in self.model.named_parameters():
if p.grad is None:
continue
eps = self.state[p].get("eps")
if eps is None:
eps = torch.clone(p).detach()
self.state[p]["eps"] = eps
eps[...] = p.grad[...]
eps.mul_(self.rho / grad_norm)
p.add_(eps)
self.optimizer.zero_grad()
@torch.no_grad()
def second_step(self):
for n, p in self.model.named_parameters():
if p.grad is None:
continue
p.sub_(self.state[p]["eps"])
self.optimizer.step()
self.optimizer.zero_grad()
class ImbSAM:
def __init__(self, optimizer, model, rho=0.05):
self.optimizer = optimizer
self.model = model
self.rho = rho
self.state = defaultdict(dict)
@torch.no_grad()
def first_step(self):
for n, p in self.model.named_parameters():
if p.grad is None:
continue
grad_normal = self.state[p].get("grad_normal")
if grad_normal is None:
grad_normal = torch.clone(p).detach()
self.state[p]["grad_normal"] = grad_normal
grad_normal[...] = p.grad[...]
self.optimizer.zero_grad()
@torch.no_grad()
def second_step(self):
grads = []
for n, p in self.model.named_parameters():
if p.grad is None:
continue
grads.append(torch.norm(p.grad, p=2))
grad_norm = torch.norm(torch.stack(grads), p=2) + 1.e-16
for n, p in self.model.named_parameters():
if p.grad is None:
continue
eps = self.state[p].get("eps")
if eps is None:
eps = torch.clone(p).detach()
self.state[p]["eps"] = eps
eps[...] = p.grad[...]
eps.mul_(self.rho / grad_norm)
p.add_(eps)
self.optimizer.zero_grad()
@torch.no_grad()
def third_step(self):
for n, p in self.model.named_parameters():
if p.grad is None:
continue
p.sub_(self.state[p]["eps"])
p.grad.add_(self.state[p]["grad_normal"])
self.optimizer.step()
self.optimizer.zero_grad()