-
Notifications
You must be signed in to change notification settings - Fork 269
/
optimist.go
87 lines (80 loc) · 3.22 KB
/
optimist.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package syncer
import (
"context"
"github.com/pingcap/tidb/pkg/util/filter"
"github.com/pingcap/tiflow/dm/pkg/utils"
"go.uber.org/zap"
)
// initOptimisticShardDDL initializes the shard DDL support in the optimistic mode.
func (s *Syncer) initOptimisticShardDDL(ctx context.Context) error {
// fetch tables from source and filter them
sourceTables, err := s.fromDB.FetchAllDoTables(ctx, s.baList)
if err != nil {
return err
}
// convert according to router rules.
// downstream-schema -> downstream-table -> upstream-schema -> upstream-table.
// TODO: refine to downstream-ID -> upstream-ID
mapper := make(map[string]map[string]map[string]map[string]struct{})
for upSchema, UpTables := range sourceTables {
for _, upTable := range UpTables {
up := &filter.Table{Schema: upSchema, Name: upTable}
down := s.route(up)
downSchema, downTable := down.Schema, down.Name
if _, ok := mapper[downSchema]; !ok {
mapper[downSchema] = make(map[string]map[string]map[string]struct{})
}
if _, ok := mapper[downSchema][downTable]; !ok {
mapper[downSchema][downTable] = make(map[string]map[string]struct{})
}
if _, ok := mapper[downSchema][downTable][upSchema]; !ok {
mapper[downSchema][downTable][upSchema] = make(map[string]struct{})
}
mapper[downSchema][downTable][upSchema][upTable] = struct{}{}
}
}
return s.optimist.Init(mapper)
}
func (s *Syncer) resolveOptimisticDDL(ec *eventContext, sourceTable, targetTable *filter.Table) bool {
if sourceTable != nil && targetTable != nil {
if s.osgk.inConflictStage(sourceTable, targetTable) {
// in the following two situations we should resolve this ddl lock at now
// 1. after this worker's ddl, the ddl lock is resolved
// 2. other worker has resolved this ddl lock, receives resolve command from master
// TODO: maybe we don't need to resolve ddl lock in situation 1, because when situation 1 happens we
// should always receive a resolve operation like situation 2.
group, redirectLocation := s.osgk.resolveGroup(targetTable)
if len(group) > 0 {
s.optimist.DoneRedirectOperation(utils.GenTableID(targetTable))
resync := &ShardingReSync{
currLocation: redirectLocation,
latestLocation: ec.endLocation,
targetTable: targetTable,
allResolved: true,
}
s.osgk.tctx.L().Info("sending resync operation in optimistic shard mode",
zap.Stringer("shardingResync", resync))
*ec.shardingReSyncCh <- resync
s.osgk.addShardingReSync(resync)
return true
}
}
} else {
s.osgk.tctx.L().Warn("invalid resolveOptimistic deploy without sourceTable/targetTable in optimistic shard mode",
zap.Bool("emptySourceTable", sourceTable == nil),
zap.Bool("emptyTargetTable", targetTable == nil))
}
return false
}