forked from dgraph-io/dgraph
-
Notifications
You must be signed in to change notification settings - Fork 0
/
worker.go
122 lines (108 loc) · 3.56 KB
/
worker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
* Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Package worker contains code for intern.worker communication to perform
// queries and mutations.
package worker
import (
"fmt"
"log"
"math"
"net"
"sync"
"time"
"golang.org/x/net/context"
"github.com/dgraph-io/badger"
"github.com/dgraph-io/dgraph/conn"
"github.com/dgraph-io/dgraph/posting"
"github.com/dgraph-io/dgraph/protos/intern"
"github.com/dgraph-io/dgraph/x"
"google.golang.org/grpc"
)
var (
pstore *badger.ManagedDB
workerServer *grpc.Server
raftServer conn.RaftServer
pendingProposals chan struct{}
// In case of flaky network connectivity we would try to keep upto maxPendingEntries in wal
// so that the nodes which have lagged behind leader can just replay entries instead of
// fetching snapshot if network disconnectivity is greater than the interval at which snapshots
// are taken
)
func workerPort() int {
return x.Config.PortOffset + x.PortInternal
}
func Init(ps *badger.ManagedDB) {
pstore = ps
// needs to be initialized after group config
pendingProposals = make(chan struct{}, Config.NumPendingProposals)
workerServer = grpc.NewServer(
grpc.MaxRecvMsgSize(x.GrpcMaxSize),
grpc.MaxSendMsgSize(x.GrpcMaxSize),
grpc.MaxConcurrentStreams(math.MaxInt32))
}
// grpcWorker struct implements the gRPC server interface.
type grpcWorker struct {
sync.Mutex
reqids map[uint64]bool
}
// addIfNotPresent returns false if it finds the reqid already present.
// Otherwise, adds the reqid in the list, and returns true.
func (w *grpcWorker) addIfNotPresent(reqid uint64) bool {
w.Lock()
defer w.Unlock()
if w.reqids == nil {
w.reqids = make(map[uint64]bool)
} else if _, has := w.reqids[reqid]; has {
return false
}
w.reqids[reqid] = true
return true
}
// RunServer initializes a tcp server on port which listens to requests from
// other workers for intern.communication.
func RunServer(bindall bool) {
laddr := "localhost"
if bindall {
laddr = "0.0.0.0"
}
var err error
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", laddr, workerPort()))
if err != nil {
log.Fatalf("While running server: %v", err)
return
}
x.Printf("Worker listening at address: %v", ln.Addr())
intern.RegisterWorkerServer(workerServer, &grpcWorker{})
intern.RegisterRaftServer(workerServer, &raftServer)
workerServer.Serve(ln)
}
// StoreStats returns stats for data store.
func StoreStats() string {
return "Currently no stats for badger"
}
// BlockingStop stops all the nodes, server between other workers and syncs all marks.
func BlockingStop() {
// Sleep for 5 seconds to ensure that commit/abort is proposed.
time.Sleep(5 * time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
groups().Node.Stop() // blocking stop raft node.
workerServer.GracefulStop() // blocking stop server
groups().Node.applyAllMarks(ctx)
posting.StopLRUEviction()
groups().Node.snapshot(0)
}