-
Notifications
You must be signed in to change notification settings - Fork 44
/
remove.go
136 lines (126 loc) · 4.33 KB
/
remove.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
package calcium
import (
"bytes"
"context"
"sync"
"github.com/projecteru2/core/log"
"github.com/projecteru2/core/resource/plugins"
resourcetypes "github.com/projecteru2/core/resource/types"
"github.com/projecteru2/core/types"
"github.com/projecteru2/core/utils"
)
// RemoveWorkload remove workloads
// returns a channel that contains removing responses
func (c *Calcium) RemoveWorkload(ctx context.Context, IDs []string, force bool) (chan *types.RemoveWorkloadMessage, error) {
logger := log.WithFunc("calcium.RemoveWorkload").WithField("IDs", IDs).WithField("force", force)
nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, IDs)
if err != nil {
logger.Error(ctx, err, "failed to group workloads by node")
return nil, err
}
ch := make(chan *types.RemoveWorkloadMessage)
_ = c.pool.Invoke(func() {
defer close(ch)
wg := sync.WaitGroup{}
defer wg.Wait()
for nodename, workloadIDs := range nodeWorkloadGroup {
wg.Add(1)
_ = c.pool.Invoke(func(nodename string, workloadIDs []string) func() {
return func() {
defer wg.Done()
if err := c.withNodePodLocked(ctx, nodename, func(ctx context.Context, node *types.Node) error {
for _, workloadID := range workloadIDs {
ret := &types.RemoveWorkloadMessage{WorkloadID: workloadID, Success: true, Hook: []*bytes.Buffer{}}
if err := c.withWorkloadLocked(ctx, workloadID, false, func(ctx context.Context, workload *types.Workload) error {
return utils.Txn(
ctx,
// if
func(ctx context.Context) error {
_, _, err = c.rmgr.SetNodeResourceUsage(ctx, node.Name, nil, nil, []resourcetypes.Resources{workload.Resources}, true, plugins.Decr)
return err
},
// then
func(ctx context.Context) (err error) {
if err = c.doRemoveWorkload(ctx, workload, force); err == nil {
logger.Infof(ctx, "Workload %s removed", workload.ID)
}
return err
},
// rollback
func(ctx context.Context, failedByCond bool) error {
if failedByCond {
return nil
}
_, _, err = c.rmgr.SetNodeResourceUsage(ctx, node.Name, nil, nil, []resourcetypes.Resources{workload.Resources}, true, plugins.Incr)
return err
},
c.config.GlobalTimeout,
)
}); err != nil {
logger.WithField("id", workloadID).Error(ctx, err, "failed to lock workload")
ret.Hook = append(ret.Hook, bytes.NewBufferString(err.Error()))
ret.Success = false
}
ch <- ret
}
_ = c.pool.Invoke(func() { c.RemapResourceAndLog(ctx, logger, node) })
return nil
}); err != nil {
logger.WithField("node", nodename).Error(ctx, err, "failed to lock node")
ch <- &types.RemoveWorkloadMessage{Success: false}
}
}
}(nodename, workloadIDs))
}
})
return ch, nil
}
// RemoveWorkloadSync .
func (c *Calcium) RemoveWorkloadSync(ctx context.Context, IDs []string) error {
return c.doRemoveWorkloadSync(ctx, IDs)
}
// semantic: instance removed on err == nil, instance remained on err != nil
func (c *Calcium) doRemoveWorkload(ctx context.Context, workload *types.Workload, force bool) error {
return utils.Txn(
ctx,
// if
func(ctx context.Context) error {
return c.store.RemoveWorkload(ctx, workload)
},
// then
func(ctx context.Context) error {
return workload.Remove(ctx, force)
},
// rollback
func(ctx context.Context, failedByCond bool) error {
if failedByCond {
return nil
}
return c.store.AddWorkload(ctx, workload, nil)
},
c.config.GlobalTimeout,
)
}
// 同步地删除容器, 在某些需要等待的场合异常有用!
func (c *Calcium) doRemoveWorkloadSync(ctx context.Context, IDs []string) error {
ch, err := c.RemoveWorkload(ctx, IDs, true)
if err != nil {
return err
}
for m := range ch {
// TODO deal with failed
log.WithFunc("calcium.doRemoveWorkloadSync").Debugf(ctx, "Removed %s", m.WorkloadID)
}
return nil
}
func (c *Calcium) groupWorkloadsByNode(ctx context.Context, IDs []string) (map[string][]string, error) {
workloads, err := c.store.GetWorkloads(ctx, IDs)
if err != nil {
return nil, err
}
nodeWorkloadGroup := map[string][]string{}
for _, workload := range workloads {
nodeWorkloadGroup[workload.Nodename] = append(nodeWorkloadGroup[workload.Nodename], workload.ID)
}
return nodeWorkloadGroup, nil
}