-
Notifications
You must be signed in to change notification settings - Fork 0
/
engine.go
127 lines (117 loc) · 2.88 KB
/
engine.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
package jewel_crawler
import (
"context"
"encoding/json"
"github.com/SunMaybo/jewel-crawler/crawler"
"github.com/SunMaybo/jewel-crawler/limit"
logs "github.com/SunMaybo/jewel-crawler/logs"
"github.com/SunMaybo/jewel-crawler/sync"
"github.com/SunMaybo/jewel-crawler/task"
"github.com/SunMaybo/jewel-crawler/temp"
"github.com/go-redis/redis/v8"
"strings"
sync2 "sync"
"time"
)
type CrawlerEngine struct {
redis *redis.Client
limit *limit.ConcurrentLimit
Pipeline *crawler.PipeLine
queue string
Concurrent int
consumerQueue string
CallBack func(task task.Task, err error)
}
func SetLogLevel(level string) {
logs.GetLog(level)
}
type Config struct {
Redis *redis.Options
Queue string
ConsumerQueue string
Concurrent int
}
func New(cfg *Config) *CrawlerEngine {
rdb := redis.NewClient(cfg.Redis)
_, err := rdb.Ping(context.Background()).Result()
if err != nil {
panic(err)
}
if cfg.Concurrent <= 0 {
cfg.Concurrent = 3
}
return &CrawlerEngine{
redis: rdb,
Concurrent: cfg.Concurrent,
queue: cfg.Queue,
consumerQueue: cfg.ConsumerQueue,
limit: limit.NewConcurrentLimit(cfg.Concurrent),
Pipeline: crawler.New(cfg.Queue, temp.NewTempStorage(rdb)),
}
}
//开启
func (p *CrawlerEngine) Start(ctx context.Context, maxExecuteCount int) {
if maxExecuteCount <= 0 {
maxExecuteCount = 1
}
for {
queues := strings.Split(p.consumerQueue, ",")
if len(queues) >= 1 {
wait := sync2.WaitGroup{}
wait.Add(p.Concurrent)
for i := 0; i < p.Concurrent; i++ {
go func() {
defer wait.Done()
for _, queue := range queues {
result, err := p.redis.LPop(ctx, queue).Result()
if err != nil && err != redis.Nil {
logs.S.Error(err)
time.Sleep(15 * time.Second)
continue
}
if err != nil && redis.Nil == err {
time.Sleep(5 * time.Millisecond)
continue
}
t := task.Task{}
err = json.Unmarshal([]byte(result), &t)
if err != nil {
panic(err)
}
t.Redis = p.redis
err = p.Pipeline.Invoke(ctx, t)
if err != nil {
if t.Retry <= maxExecuteCount {
t.Retry += 1
err := p.Push(ctx, queue, t)
if err != nil {
logs.S.Warn(err)
}
} else {
if p.CallBack != nil {
p.CallBack(t, err)
}
}
} else {
if p.CallBack != nil {
p.CallBack(t, err)
}
}
}
}()
}
wait.Wait()
}
}
}
func (p *CrawlerEngine) Push(ctx context.Context, queue string, task task.Task) error {
logs.S.Infow("下发任务", "global_id", task.GlobalId, "url", task.CrawlerUrl)
taskStr, _ := json.Marshal(task)
return p.redis.RPush(ctx, queue, taskStr).Err()
}
func (p *CrawlerEngine) NewMutex() *sync.Mutex {
return sync.New(p.redis)
}
func (p *CrawlerEngine) Close() error {
return p.redis.Close()
}