-
Notifications
You must be signed in to change notification settings - Fork 402
/
gc-bf.go
161 lines (134 loc) · 4.26 KB
/
gc-bf.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
// Copyright (C) 2022 Storj Labs, Inc.
// See LICENSE for copying information.
package satellite
import (
"context"
"errors"
"net"
"runtime/pprof"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/common/peertls/extensions"
"storj.io/private/debug"
"storj.io/private/version"
"storj.io/storj/private/lifecycle"
"storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/overlay"
)
// GarbageCollectionBF is the satellite garbage collection process which collects bloom filters.
//
// architecture: Peer
type GarbageCollectionBF struct {
Log *zap.Logger
DB DB
Servers *lifecycle.Group
Services *lifecycle.Group
Debug struct {
Listener net.Listener
Server *debug.Server
}
Overlay struct {
DB overlay.DB
}
GarbageCollection struct {
Config bloomfilter.Config
}
RangedLoop struct {
Service *rangedloop.Service
}
}
// NewGarbageCollectionBF creates a new satellite garbage collection peer which collects storage nodes bloom filters.
func NewGarbageCollectionBF(log *zap.Logger, db DB, metabaseDB *metabase.DB, revocationDB extensions.RevocationDB,
versionInfo version.Info, config *Config, atomicLogLevel *zap.AtomicLevel) (*GarbageCollectionBF, error) {
peer := &GarbageCollectionBF{
Log: log,
DB: db,
Servers: lifecycle.NewGroup(log.Named("servers")),
Services: lifecycle.NewGroup(log.Named("services")),
}
{ // setup debug
var err error
if config.Debug.Address != "" {
peer.Debug.Listener, err = net.Listen("tcp", config.Debug.Address)
if err != nil {
withoutStack := errors.New(err.Error())
peer.Log.Debug("failed to start debug endpoints", zap.Error(withoutStack))
}
}
debugConfig := config.Debug
debugConfig.ControlTitle = "GC-BloomFilter"
peer.Debug.Server = debug.NewServerWithAtomicLevel(log.Named("debug"), peer.Debug.Listener, monkit.Default, debugConfig, atomicLogLevel)
peer.Servers.Add(lifecycle.Item{
Name: "debug",
Run: peer.Debug.Server.Run,
Close: peer.Debug.Server.Close,
})
}
{ // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache()
}
{ // setup garbage collection bloom filters
log := peer.Log.Named("garbage-collection-bf")
peer.GarbageCollection.Config = config.GarbageCollectionBF
var observer rangedloop.Observer
if config.GarbageCollectionBF.UseSyncObserver {
observer = bloomfilter.NewSyncObserver(log.Named("gc-bf"),
config.GarbageCollectionBF,
peer.Overlay.DB,
)
} else {
observer = bloomfilter.NewObserver(log.Named("gc-bf"),
config.GarbageCollectionBF,
peer.Overlay.DB,
)
}
provider := rangedloop.NewMetabaseRangeSplitter(metabaseDB, config.RangedLoop.AsOfSystemInterval, config.RangedLoop.BatchSize)
peer.RangedLoop.Service = rangedloop.NewService(log.Named("rangedloop"), config.RangedLoop, provider, []rangedloop.Observer{
rangedloop.NewLiveCountObserver(metabaseDB, config.RangedLoop.SuspiciousProcessedRatio, config.RangedLoop.AsOfSystemInterval),
observer,
})
if !config.GarbageCollectionBF.RunOnce {
peer.Services.Add(lifecycle.Item{
Name: "garbage-collection-bf",
Run: peer.RangedLoop.Service.Run,
Close: peer.RangedLoop.Service.Close,
})
peer.Debug.Server.Panel.Add(
debug.Cycle("Garbage Collection Bloom Filters", peer.RangedLoop.Service.Loop))
}
}
return peer, nil
}
// Run runs satellite garbage collection until it's either closed or it errors.
func (peer *GarbageCollectionBF) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
group, ctx := errgroup.WithContext(ctx)
pprof.Do(ctx, pprof.Labels("subsystem", "gc-bloomfilter"), func(ctx context.Context) {
peer.Servers.Run(ctx, group)
peer.Services.Run(ctx, group)
if peer.GarbageCollection.Config.RunOnce {
group.Go(func() error {
_, err = peer.RangedLoop.Service.RunOnce(ctx)
cancel()
return err
})
}
pprof.Do(ctx, pprof.Labels("name", "subsystem-wait"), func(ctx context.Context) {
err = group.Wait()
})
})
return err
}
// Close closes all the resources.
func (peer *GarbageCollectionBF) Close() error {
return errs.Combine(
peer.Servers.Close(),
peer.Services.Close(),
)
}