forked from redis/go-redis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cluster_pipeline.go
130 lines (110 loc) · 2.79 KB
/
cluster_pipeline.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
package redis
import (
"gopkg.in/redis.v3/internal/hashtag"
"gopkg.in/redis.v3/internal/pool"
)
// ClusterPipeline is not thread-safe.
type ClusterPipeline struct {
commandable
cluster *ClusterClient
cmds []Cmder
closed bool
}
// Pipeline creates a new pipeline which is able to execute commands
// against multiple shards. It's NOT safe for concurrent use by
// multiple goroutines.
func (c *ClusterClient) Pipeline() *ClusterPipeline {
pipe := &ClusterPipeline{
cluster: c,
cmds: make([]Cmder, 0, 10),
}
pipe.commandable.process = pipe.process
return pipe
}
func (pipe *ClusterPipeline) process(cmd Cmder) {
pipe.cmds = append(pipe.cmds, cmd)
}
// Discard resets the pipeline and discards queued commands.
func (pipe *ClusterPipeline) Discard() error {
if pipe.closed {
return pool.ErrClosed
}
pipe.cmds = pipe.cmds[:0]
return nil
}
func (pipe *ClusterPipeline) Exec() (cmds []Cmder, retErr error) {
if pipe.closed {
return nil, pool.ErrClosed
}
if len(pipe.cmds) == 0 {
return []Cmder{}, nil
}
cmds = pipe.cmds
pipe.cmds = make([]Cmder, 0, 10)
cmdsMap := make(map[string][]Cmder)
for _, cmd := range cmds {
slot := hashtag.Slot(cmd.clusterKey())
addr := pipe.cluster.slotMasterAddr(slot)
cmdsMap[addr] = append(cmdsMap[addr], cmd)
}
for attempt := 0; attempt <= pipe.cluster.opt.getMaxRedirects(); attempt++ {
failedCmds := make(map[string][]Cmder)
for addr, cmds := range cmdsMap {
client, err := pipe.cluster.getClient(addr)
if err != nil {
setCmdsErr(cmds, err)
retErr = err
continue
}
cn, err := client.conn()
if err != nil {
setCmdsErr(cmds, err)
retErr = err
continue
}
failedCmds, err = pipe.execClusterCmds(cn, cmds, failedCmds)
if err != nil {
retErr = err
}
client.putConn(cn, err, false)
}
cmdsMap = failedCmds
}
return cmds, retErr
}
// Close closes the pipeline, releasing any open resources.
func (pipe *ClusterPipeline) Close() error {
pipe.Discard()
pipe.closed = true
return nil
}
func (pipe *ClusterPipeline) execClusterCmds(
cn *pool.Conn, cmds []Cmder, failedCmds map[string][]Cmder,
) (map[string][]Cmder, error) {
if err := writeCmd(cn, cmds...); err != nil {
setCmdsErr(cmds, err)
return failedCmds, err
}
var firstCmdErr error
for i, cmd := range cmds {
err := cmd.readReply(cn)
if err == nil {
continue
}
if isNetworkError(err) {
cmd.reset()
failedCmds[""] = append(failedCmds[""], cmds[i:]...)
break
} else if moved, ask, addr := isMovedError(err); moved {
pipe.cluster.lazyReloadSlots()
cmd.reset()
failedCmds[addr] = append(failedCmds[addr], cmd)
} else if ask {
cmd.reset()
failedCmds[addr] = append(failedCmds[addr], NewCmd("ASKING"), cmd)
} else if firstCmdErr == nil {
firstCmdErr = err
}
}
return failedCmds, firstCmdErr
}