-
Notifications
You must be signed in to change notification settings - Fork 0
/
state.go
152 lines (121 loc) · 3.03 KB
/
state.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
package goworker
import (
"fmt"
"time"
"github.com/lufred/goworker/common/logger"
"github.com/lufred/goworker/constant"
"go.uber.org/zap"
)
type WorkerState int
const (
// Initialized worker初始化完成
Initialized WorkerState = iota
// Pending 表示worker已创建,但未开始处理handler
Pending
// Running 表示worker已绑定一个process,正在处理
Running
// Shutdown 表示worker已结束
Shutdown
)
type Stater interface {
Handle()
NextStage()
}
type WorkerStateInitialized struct {
worker *Worker
}
func (s *WorkerStateInitialized) Handle() {
s.worker.cfg.Logger.Debug("worker: state initialized handle...")
defer s.NextStage()
s.worker.signalListenAsync()
switch s.worker.opts.Model {
case Cluster:
logger.Info("worker: cluster model")
sdc := s.worker.cfg.GetWorkerDiscoveries()
if sdc == nil {
panic("worker: serviceDiscoveryConfig cannot be empty in cluster mode")
}
logger.Info(fmt.Sprintf("worker: [%s] protocol : %s", s.worker.opts.ServiceName, sdc.Protocol))
d, err := GetDiscovery(sdc)
if err != nil {
panic(err)
}
if s.worker.opts.ShardingTotalCount <= 0 {
s.worker.opts.ShardingTotalCount = 1
}
s.worker.r = NewResolver(s.worker, d, s.worker.opts.ShardingTotalCount)
ok := false
for !ok {
err = s.worker.r.WorkerRegister()
if err != nil {
logger.Warn("worker: cluster register unsuccessful", zap.Any("warn", err))
time.Sleep(constant.ClusterWorkerRegisterRetryInterval)
} else {
ok = true
}
}
go func() {
for {
select {
case <-s.worker.ctx.Done():
return
default:
err := s.worker.r.Build()
if err != nil {
logger.Error("worker: cluster build error", zap.Any("error", err))
break
}
return
}
time.Sleep(constant.ClusterWorkerBuildRetryInterval)
}
}()
case Local:
logger.Info("worker: local model")
s.worker.workerInstance.UpdateHealthStatus(Green)
}
s.worker.waitForRunAsync()
}
func (s *WorkerStateInitialized) NextStage() {
s.worker.ChangeState(&WorkerStatePending{
worker: s.worker,
})
}
type WorkerStatePending struct {
worker *Worker
}
func (s *WorkerStatePending) Handle() {
s.worker.cfg.Logger.Debug("worker: state pending handle...")
defer s.NextStage()
}
func (s *WorkerStatePending) NextStage() {
s.worker.ChangeState(&WorkerStateRunning{
worker: s.worker,
})
}
type WorkerStateRunning struct {
worker *Worker
}
func (s *WorkerStateRunning) Handle() {
s.worker.cfg.Logger.Debug("worker: state running handle...")
defer s.NextStage()
close(s.worker.startChan)
<-s.worker.processDoneChan
}
func (s *WorkerStateRunning) NextStage() {
s.worker.ChangeState(&WorkerStateShutdown{
worker: s.worker,
})
}
type WorkerStateShutdown struct {
worker *Worker
}
func (s *WorkerStateShutdown) Handle() {
s.worker.cfg.Logger.Debug("worker: state shutdown handle...")
// the final state needs to be closed doneChan
defer s.NextStage()
}
func (s *WorkerStateShutdown) NextStage() {
defer s.worker.cfg.Logger.Debug("worker: final state...")
s.worker.ChangeState(nil)
}