forked from vitessio/vitess
/
slave_connection.go
347 lines (302 loc) · 11.3 KB
/
slave_connection.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
// Copyright 2014, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mysqlctl
import (
"fmt"
"sync"
log "github.com/golang/glog"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/mysqlconn"
"github.com/youtube/vitess/go/mysqlconn/replication"
"github.com/youtube/vitess/go/pools"
"github.com/youtube/vitess/go/sqldb"
"github.com/youtube/vitess/go/vt/dbconfigs"
)
var (
// ErrBinlogUnavailable is returned by this library when we
// cannot find a suitable binlog to satisfy the request.
ErrBinlogUnavailable = fmt.Errorf("cannot find relevant binlogs on this server")
)
// SlaveConnection represents a connection to mysqld that pretends to be a slave
// connecting for replication. Each such connection must identify itself to
// mysqld with a server ID that is unique both among other SlaveConnections and
// among actual slaves in the topology.
type SlaveConnection struct {
*mysqlconn.Conn
mysqld *Mysqld
slaveID uint32
cancel context.CancelFunc
wg sync.WaitGroup
}
// NewSlaveConnection creates a new slave connection to the mysqld instance.
// It uses a pools.IDPool to ensure that the server IDs used to connect are
// unique within this process. This is done with the assumptions that:
//
// 1) No other processes are making fake slave connections to our mysqld.
// 2) No real slave servers will have IDs in the range 1-N where N is the peak
// number of concurrent fake slave connections we will ever make.
func (mysqld *Mysqld) NewSlaveConnection() (*SlaveConnection, error) {
conn, err := mysqld.connectForReplication()
if err != nil {
return nil, err
}
sc := &SlaveConnection{
Conn: conn,
mysqld: mysqld,
slaveID: slaveIDPool.Get(),
}
log.Infof("new slave connection: slaveID=%d", sc.slaveID)
return sc, nil
}
// connectForReplication create a MySQL connection ready to use for replication.
func (mysqld *Mysqld) connectForReplication() (*mysqlconn.Conn, error) {
params, err := dbconfigs.WithCredentials(&mysqld.dbcfgs.Dba)
if err != nil {
return nil, err
}
ctx := context.Background()
conn, err := mysqlconn.Connect(ctx, ¶ms)
if err != nil {
return nil, err
}
// Tell the server that we understand the format of events
// that will be used if binlog_checksum is enabled on the server.
if _, err := conn.ExecuteFetch("SET @master_binlog_checksum=@@global.binlog_checksum", 0, false); err != nil {
return nil, fmt.Errorf("failed to set @master_binlog_checksum=@@global.binlog_checksum: %v", err)
}
return conn, nil
}
// slaveIDPool is the IDPool for server IDs used to connect as a slave.
var slaveIDPool = pools.NewIDPool()
// StartBinlogDumpFromCurrent requests a replication binlog dump from
// the current position.
func (sc *SlaveConnection) StartBinlogDumpFromCurrent(ctx context.Context) (replication.Position, <-chan replication.BinlogEvent, error) {
ctx, sc.cancel = context.WithCancel(ctx)
flavor, err := sc.mysqld.flavor()
if err != nil {
return replication.Position{}, nil, fmt.Errorf("StartBinlogDump needs flavor: %v", err)
}
masterPosition, err := flavor.MasterPosition(sc.mysqld)
if err != nil {
return replication.Position{}, nil, fmt.Errorf("failed to get master position: %v", err)
}
c, err := sc.StartBinlogDumpFromPosition(ctx, masterPosition)
return masterPosition, c, err
}
// StartBinlogDumpFromPosition requests a replication binlog dump from
// the master mysqld at the given Position and then sends binlog
// events to the provided channel.
// The stream will continue in the background, waiting for new events if
// necessary, until the connection is closed, either by the master or
// by canceling the context.
//
// Note the context is valid and used until eventChan is closed.
func (sc *SlaveConnection) StartBinlogDumpFromPosition(ctx context.Context, startPos replication.Position) (<-chan replication.BinlogEvent, error) {
ctx, sc.cancel = context.WithCancel(ctx)
flavor, err := sc.mysqld.flavor()
if err != nil {
return nil, fmt.Errorf("StartBinlogDump needs flavor: %v", err)
}
log.Infof("sending binlog dump command: startPos=%v, slaveID=%v", startPos, sc.slaveID)
if err = flavor.SendBinlogDumpCommand(sc, startPos); err != nil {
log.Errorf("couldn't send binlog dump command: %v", err)
return nil, err
}
// Read the first packet to see if it's an error response to our dump command.
buf, err := sc.Conn.ReadPacket()
if err != nil {
log.Errorf("couldn't start binlog dump: %v", err)
return nil, err
}
// FIXME(alainjobart) I think we can use a buffered channel for better performance.
eventChan := make(chan replication.BinlogEvent)
// Start reading events.
sc.wg.Add(1)
go func() {
defer func() {
close(eventChan)
sc.wg.Done()
}()
for {
if buf[0] == 254 {
// The master is telling us to stop.
log.Infof("received EOF packet in binlog dump: %#v", buf)
return
}
select {
// Skip the first byte because it's only used for signaling EOF.
case eventChan <- flavor.MakeBinlogEvent(buf[1:]):
case <-ctx.Done():
return
}
buf, err = sc.Conn.ReadPacket()
if err != nil {
if sqlErr, ok := err.(*sqldb.SQLError); ok && sqlErr.Number() == mysqlconn.CRServerLost {
// CRServerLost = Lost connection to MySQL server during query
// This is not necessarily an error. It could just be that we closed
// the connection from outside.
log.Infof("connection closed during binlog stream (possibly intentional): %v", err)
return
}
log.Errorf("read error while streaming binlog events: %v", err)
return
}
}
}()
return eventChan, nil
}
// StartBinlogDumpFromBinlogBeforeTimestamp requests a replication
// binlog dump from the master mysqld starting with a file that has
// timestamps smaller than the provided timestamp, and then sends
// binlog events to the provided channel.
//
// The startup phase will list all the binary logs, and find the one
// that has events starting strictly before the provided timestamp. It
// will then start from there, and stream all events. It is the
// responsability of the calling site to filter the events more.
//
// MySQL 5.6+ note: we need to do it that way because of the way the
// GTIDSet works. In the previous two streaming functions, we pass in
// the full GTIDSet (that has the list of all transactions seen in
// the replication stream). In this case, we don't know it, all we
// have is the binlog file names. We depend on parsing the first
// PREVIOUS_GTIDS_EVENT event in the logs to get it. So we need the
// caller to parse that event, and it can't be skipped because its
// timestamp is lower. Then, for each subsequent event, the caller
// also needs to add the event GTID to its GTIDSet. Otherwise it won't
// be correct ever. So the caller really needs to build up its GTIDSet
// along the entire file, not just for events whose timestamp is in a
// given range.
//
// The stream will continue in the background, waiting for new events if
// necessary, until the connection is closed, either by the master or
// by canceling the context.
//
// Note the context is valid and used until eventChan is closed.
func (sc *SlaveConnection) StartBinlogDumpFromBinlogBeforeTimestamp(ctx context.Context, timestamp int64) (<-chan replication.BinlogEvent, error) {
ctx, sc.cancel = context.WithCancel(ctx)
flavor, err := sc.mysqld.flavor()
if err != nil {
return nil, fmt.Errorf("StartBinlogDump needs flavor: %v", err)
}
// List the binlogs.
binlogs, err := sc.Conn.ExecuteFetch("SHOW BINARY LOGS", 1000, false)
if err != nil {
return nil, fmt.Errorf("failed to SHOW BINARY LOGS: %v", err)
}
// Start with the most recent binlog file until we find the right event.
var binlogIndex int
var event replication.BinlogEvent
for binlogIndex = len(binlogs.Rows) - 1; binlogIndex >= 0; binlogIndex-- {
// Exit the loop early if context is canceled.
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
// Start dumping the logs. The position is '4' to skip the
// Binlog File Header. See this page for more info:
// https://dev.mysql.com/doc/internals/en/binlog-file.html
binlog := binlogs.Rows[binlogIndex][0].String()
if err := sc.Conn.WriteComBinlogDump(sc.slaveID, binlog, 4, 0); err != nil {
return nil, fmt.Errorf("failed to send the ComBinlogDump command: %v", err)
}
// Get the first event to get its timestamp. We skip
// events that don't have timestamps (although it seems
// most do anyway).
for {
buf, err := sc.Conn.ReadPacket()
if err != nil {
return nil, fmt.Errorf("couldn't start binlog dump of binlog %v: %v", binlog, err)
}
// Why would the master tell us to stop here?
if buf[0] == 254 {
return nil, fmt.Errorf("received EOF packet for first packet of binlog %v", binlog)
}
// Parse the full event.
event = flavor.MakeBinlogEvent(buf[1:])
if !event.IsValid() {
return nil, fmt.Errorf("first event from binlog %v is not valid", binlog)
}
if event.Timestamp() > 0 {
// We found the first event with a
// valid timestamp.
break
}
}
if int64(event.Timestamp()) < timestamp {
// The first event in this binlog has a smaller
// timestamp than what we need, we found a good
// starting point.
break
}
// The timestamp is higher, we need to try the older files.
// Close and re-open our connection.
sc.Conn.Close()
conn, err := sc.mysqld.connectForReplication()
if err != nil {
return nil, err
}
sc.Conn = conn
}
if binlogIndex == -1 {
// We haven't found a suitable binlog
log.Errorf("couldn't find an old enough binlog to match timestamp >= %v (looked at %v files)", timestamp, len(binlogs.Rows))
return nil, ErrBinlogUnavailable
}
// Now just loop sending and reading events.
// FIXME(alainjobart) I think we can use a buffered channel for better performance.
eventChan := make(chan replication.BinlogEvent)
// Start reading events.
sc.wg.Add(1)
go func() {
defer func() {
close(eventChan)
sc.wg.Done()
}()
for {
select {
case eventChan <- event:
case <-ctx.Done():
return
}
buf, err := sc.Conn.ReadPacket()
if err != nil {
if sqlErr, ok := err.(*sqldb.SQLError); ok && sqlErr.Number() == mysqlconn.CRServerLost {
// CRServerLost = Lost connection to MySQL server during query
// This is not necessarily an error. It could just be that we closed
// the connection from outside.
log.Infof("connection closed during binlog stream (possibly intentional): %v", err)
return
}
log.Errorf("read error while streaming binlog events: %v", err)
return
}
if buf[0] == 254 {
// The master is telling us to stop.
log.Infof("received EOF packet in binlog dump: %#v", buf)
return
}
// Skip the first byte because it's only used
// for signaling EOF.
event = flavor.MakeBinlogEvent(buf[1:])
}
}()
return eventChan, nil
}
// Close closes the slave connection, which also signals an ongoing dump
// started with StartBinlogDump() to stop and close its BinlogEvent channel.
// The ID for the slave connection is recycled back into the pool.
func (sc *SlaveConnection) Close() {
if sc.Conn != nil {
log.Infof("closing slave socket to unblock reads")
sc.Conn.Close()
log.Infof("waiting for slave dump thread to end")
sc.cancel()
sc.wg.Wait()
log.Infof("closing slave MySQL client, recycling slaveID %v", sc.slaveID)
sc.Conn = nil
slaveIDPool.Put(sc.slaveID)
}
}