-
Notifications
You must be signed in to change notification settings - Fork 74
/
lock.go
122 lines (107 loc) · 2.87 KB
/
lock.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
package phase
import (
"context"
"fmt"
gos "os"
"sync"
"time"
"github.com/k0sproject/k0sctl/analytics"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
"github.com/k0sproject/k0sctl/pkg/retry"
"github.com/k0sproject/rig/exec"
log "github.com/sirupsen/logrus"
)
// Lock acquires an exclusive k0sctl lock on hosts
type Lock struct {
GenericPhase
cfs []func()
instanceID string
m sync.Mutex
wg sync.WaitGroup
}
// Prepare the phase
func (p *Lock) Prepare(c *v1beta1.Cluster) error {
p.Config = c
mid, _ := analytics.MachineID()
p.instanceID = fmt.Sprintf("%s-%d", mid, gos.Getpid())
return nil
}
// Title for the phase
func (p *Lock) Title() string {
return "Acquire exclusive host lock"
}
// Cancel releases the lock
func (p *Lock) Cancel() {
p.m.Lock()
defer p.m.Unlock()
for _, f := range p.cfs {
f()
}
p.wg.Wait()
}
// CleanUp calls Cancel to release the lock
func (p *Lock) CleanUp() {
p.Cancel()
}
// Run the phase
func (p *Lock) Run() error {
if err := p.parallelDo(p.Config.Spec.Hosts, p.startLock); err != nil {
return err
}
return p.Config.Spec.Hosts.ParallelEach(p.startTicker)
}
func (p *Lock) startTicker(h *cluster.Host) error {
p.wg.Add(1)
lfp := h.Configurer.K0sctlLockFilePath(h)
ticker := time.NewTicker(10 * time.Second)
ctx, cancel := context.WithCancel(context.Background())
p.m.Lock()
p.cfs = append(p.cfs, cancel)
p.m.Unlock()
go func() {
log.Debugf("%s: started periodic update of lock file %s timestamp", h, lfp)
for {
select {
case <-ticker.C:
if err := h.Configurer.Touch(h, lfp, time.Now(), exec.Sudo(h)); err != nil {
log.Warnf("%s: failed to touch lock file: %s", h, err)
}
case <-ctx.Done():
log.Debugf("%s: stopped lock cycle, removing file", h)
if err := h.Configurer.DeleteFile(h, lfp); err != nil {
log.Warnf("%s: failed to remove host lock file: %s", h, err)
}
p.wg.Done()
return
}
}
}()
return nil
}
func (p *Lock) startLock(h *cluster.Host) error {
return retry.Times(context.TODO(), 10, func(_ context.Context) error {
return p.tryLock(h)
})
}
func (p *Lock) tryLock(h *cluster.Host) error {
lfp := h.Configurer.K0sctlLockFilePath(h)
if err := h.Configurer.UpsertFile(h, lfp, p.instanceID); err != nil {
stat, err := h.Configurer.Stat(h, lfp, exec.Sudo(h))
if err != nil {
return fmt.Errorf("lock file disappeared: %w", err)
}
content, err := h.Configurer.ReadFile(h, lfp)
if err != nil {
return fmt.Errorf("failed to read lock file: %w", err)
}
if content != p.instanceID {
if time.Since(stat.ModTime()) < 30*time.Second {
return fmt.Errorf("another instance of k0sctl is currently operating on the host")
}
_ = h.Configurer.DeleteFile(h, lfp)
return fmt.Errorf("removed existing expired lock file")
}
}
return nil
}