forked from hagen1778/tsbs
/
main.go
57 lines (47 loc) · 1.26 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
// bulk_load_mongo loads a Mongo daemon with data from stdin.
//
// Any existing collections in the database will be removed.
package main
import (
"flag"
"time"
"github.com/hagen1778/tsbs/load"
)
const (
collectionName = "point_data"
aggDocID = "doc_id"
aggDateFmt = "20060102_15" // see Go docs for how we arrive at this time format
aggKeyID = "key_id"
aggInsertBatchSize = 500 // found via trial-and-error
timestampField = "timestamp_ns"
)
// Program option vars:
var (
daemonURL string
documentPer bool
writeTimeout time.Duration
)
// Global vars
var (
loader *load.BenchmarkRunner
)
// Parse args:
func init() {
loader = load.GetBenchmarkRunner()
flag.StringVar(&daemonURL, "url", "localhost:27017", "Mongo URL.")
flag.DurationVar(&writeTimeout, "write-timeout", 10*time.Second, "Write timeout.")
flag.BoolVar(&documentPer, "document-per-event", false, "Whether to use one document per event or aggregate by hour")
flag.Parse()
}
func main() {
var benchmark load.Benchmark
var workQueues uint
if documentPer {
benchmark = newNaiveBenchmark(loader)
workQueues = load.SingleQueue
} else {
benchmark = newAggBenchmark(loader)
workQueues = load.WorkerPerQueue
}
loader.RunBenchmark(benchmark, workQueues)
}