forked from rainycape/memcache
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bench_test.go
131 lines (116 loc) · 2.77 KB
/
bench_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
package memcache
import (
"runtime"
"strings"
"sync"
"testing"
"time"
)
func benchmarkNewSet(b *testing.B, item *Item) {
c := newDockerServer(b)
c.SetTimeout(time.Duration(-1))
b.SetBytes(int64(len(item.Key) + len(item.Value)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := c.Set(item); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
}
func benchmarkNewSetGet(b *testing.B, item *Item) {
c := newDockerServer(b)
c.SetTimeout(time.Duration(-1))
key := item.Key
b.SetBytes(int64(len(item.Key) + len(item.Value)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := c.Set(item); err != nil {
b.Fatal(err)
}
if _, err := c.Get(key); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
}
func largeItem() *Item {
key := strings.Repeat("f", 240)
value := make([]byte, 1024)
return &Item{Key: key, Value: value}
}
func smallItem() *Item {
return &Item{Key: "foo", Value: []byte("bar")}
}
func BenchmarkNewSet(b *testing.B) {
benchmarkNewSet(b, smallItem())
}
func BenchmarkNewSetLarge(b *testing.B) {
benchmarkNewSet(b, largeItem())
}
func BenchmarkNewSetGet(b *testing.B) {
benchmarkNewSetGet(b, smallItem())
}
func BenchmarkNewSetGetLarge(b *testing.B) {
benchmarkNewSetGet(b, largeItem())
}
func benchmarkNewConcurrentSetGet(b *testing.B, item *Item, count int, opcount int) {
mp := runtime.GOMAXPROCS(0)
defer runtime.GOMAXPROCS(mp)
runtime.GOMAXPROCS(count)
c := newDockerServer(b)
c.SetTimeout(time.Duration(-1))
// Items are not thread safe
items := make([]*Item, count)
for ii := range items {
items[ii] = &Item{Key: item.Key, Value: item.Value}
}
b.SetBytes(int64((len(item.Key) + len(item.Value)) * count * opcount))
b.ResetTimer()
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
wg.Add(count)
for j := 0; j < count; j++ {
it := items[j]
key := it.Key
go func() {
defer wg.Done()
for k := 0; k < opcount; k++ {
if err := c.Set(it); err != nil {
b.Fatal(err)
}
if _, err := c.Get(key); err != nil {
b.Fatal(err)
}
}
}()
}
wg.Wait()
}
b.StopTimer()
}
func BenchmarkNewGetCacheMiss(b *testing.B) {
key := "not"
c := newDockerServer(b)
c.SetTimeout(time.Duration(-1))
c.Delete(key)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := c.Get(key); err != ErrCacheMiss {
b.Fatal(err)
}
}
b.StopTimer()
}
func BenchmarkNewConcurrentSetGetSmall10_100(b *testing.B) {
benchmarkNewConcurrentSetGet(b, smallItem(), 10, 100)
}
func BenchmarkNewConcurrentSetGetLarge10_100(b *testing.B) {
benchmarkNewConcurrentSetGet(b, largeItem(), 10, 100)
}
func BenchmarkNewConcurrentSetGetSmall20_100(b *testing.B) {
benchmarkNewConcurrentSetGet(b, smallItem(), 20, 100)
}
func BenchmarkNewConcurrentSetGetLarge20_100(b *testing.B) {
benchmarkNewConcurrentSetGet(b, largeItem(), 20, 100)
}