-
Notifications
You must be signed in to change notification settings - Fork 303
/
manager.go
93 lines (83 loc) · 3.04 KB
/
manager.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
package processor
import (
"context"
"sync"
backendconfig "github.com/rudderlabs/rudder-server/config/backend-config"
"github.com/rudderlabs/rudder-server/jobsdb"
"github.com/rudderlabs/rudder-server/processor/transformer"
"github.com/rudderlabs/rudder-server/services/fileuploader"
"github.com/rudderlabs/rudder-server/services/multitenant"
"github.com/rudderlabs/rudder-server/services/rsources"
"github.com/rudderlabs/rudder-server/services/transientsource"
"github.com/rudderlabs/rudder-server/utils/types"
)
type LifecycleManager struct {
HandleT *HandleT
mainCtx context.Context
currentCancel context.CancelFunc
waitGroup interface{ Wait() }
gatewayDB *jobsdb.HandleT
routerDB *jobsdb.HandleT
batchRouterDB *jobsdb.HandleT
errDB *jobsdb.HandleT
clearDB *bool
MultitenantStats multitenant.MultiTenantI // need not initialize again
ReportingI types.ReportingI // need not initialize again
BackendConfig backendconfig.BackendConfig
Transformer transformer.Transformer
transientSources transientsource.Service
fileuploader fileuploader.Provider
rsourcesService rsources.JobService
}
// Start starts a processor, this is not a blocking call.
// If the processor is not completely started and the data started coming then also it will not be problematic as we
// are assuming that the DBs will be up.
func (proc *LifecycleManager) Start() error {
if proc.Transformer != nil {
proc.HandleT.transformer = proc.Transformer
}
proc.HandleT.Setup(
proc.BackendConfig, proc.gatewayDB, proc.routerDB, proc.batchRouterDB, proc.errDB,
proc.clearDB, proc.ReportingI, proc.MultitenantStats, proc.transientSources, proc.fileuploader, proc.rsourcesService,
)
currentCtx, cancel := context.WithCancel(context.Background())
proc.currentCancel = cancel
var wg sync.WaitGroup
proc.waitGroup = &wg
wg.Add(1)
go func() {
defer wg.Done()
if err := proc.HandleT.Start(currentCtx); err != nil {
proc.HandleT.logger.Errorf("Error starting processor: %v", err)
}
}()
return nil
}
// Stop stops the processor, this is a blocking call.
func (proc *LifecycleManager) Stop() {
proc.currentCancel()
proc.HandleT.Shutdown()
proc.waitGroup.Wait()
}
// New creates a new Processor instance
func New(ctx context.Context, clearDb *bool, gwDb, rtDb, brtDb, errDb *jobsdb.HandleT,
tenantDB multitenant.MultiTenantI, reporting types.ReportingI, transientSources transientsource.Service, fileuploader fileuploader.Provider,
rsourcesService rsources.JobService,
) *LifecycleManager {
proc := &LifecycleManager{
HandleT: &HandleT{transformer: transformer.NewTransformer()},
mainCtx: ctx,
gatewayDB: gwDb,
routerDB: rtDb,
batchRouterDB: brtDb,
errDB: errDb,
clearDB: clearDb,
MultitenantStats: tenantDB,
BackendConfig: backendconfig.DefaultBackendConfig,
ReportingI: reporting,
transientSources: transientSources,
fileuploader: fileuploader,
rsourcesService: rsourcesService,
}
return proc
}