Permalink
Switch branches/tags
weekly.2012-03-27 weekly.2012-03-22 weekly.2012-03-13 weekly.2012-03-04 weekly.2012-02-22 weekly.2012-02-14 weekly.2012-02-07 weekly.2012-01-27 weekly.2012-01-20 weekly.2012-01-15 weekly.2011-12-22 weekly.2011-12-14 weekly.2011-12-06 weekly.2011-12-02 weekly.2011-12-01 weekly.2011-11-18 weekly.2011-11-09 weekly.2011-11-08 weekly.2011-11-02 weekly.2011-11-01 weekly.2011-10-26 weekly.2011-10-25 weekly.2011-10-18 weekly.2011-10-06 weekly.2011-09-21 weekly.2011-09-16 weekly.2011-09-07 weekly.2011-09-01 weekly.2011-08-17 weekly.2011-08-10 weekly.2011-07-29 weekly.2011-07-19 weekly.2011-07-07 weekly.2011-06-23 weekly.2011-06-16 weekly.2011-06-09 weekly.2011-06-02 weekly.2011-05-22 weekly.2011-04-27 weekly.2011-04-13 weekly.2011-04-04 weekly.2011-03-28 weekly.2011-03-15 weekly.2011-03-07.1 weekly.2011-03-07 weekly.2011-02-24 weekly.2011-02-15 weekly.2011-02-01.1 weekly.2011-02-01 weekly.2011-01-20 weekly.2011-01-19 weekly.2011-01-12 weekly.2011-01-06 weekly.2010-12-22 weekly.2010-12-15.1 weekly.2010-12-15 weekly.2010-12-08 weekly.2010-12-02 weekly.2010-11-23 weekly.2010-11-10 weekly.2010-11-02 weekly.2010-10-27 weekly.2010-10-20 weekly.2010-10-13.1 weekly.2010-10-13 weekly.2010-09-29 weekly.2010-09-22 weekly.2010-09-15 weekly.2010-09-06 weekly.2010-08-25 weekly.2010-08-11 weekly.2010-08-04 weekly.2010-07-29 weekly.2010-07-14 weekly.2010-07-01 weekly.2010-06-21 weekly.2010-06-09 weekly.2010-05-27 weekly.2010-05-04 weekly.2010-04-27 weekly.2010-04-13 weekly.2010-03-30 weekly.2010-03-22 weekly.2010-03-15 weekly.2010-03-04 weekly.2010-02-23 weekly.2010-02-17 weekly.2010-02-04 weekly.2010-01-27 weekly.2010-01-13 weekly.2010-01-05 weekly.2009-12-22 weekly.2009-12-09 weekly.2009-12-07 weekly.2009-11-17 weekly.2009-11-12 weekly.2009-11-10.1 weekly.2009-11-10 weekly.2009-11-06 weekly
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
216 lines (188 sloc) 4.79 KB
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync_test
import (
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
)
type bench struct {
setup func(*testing.B, mapInterface)
perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
}
func benchMap(b *testing.B, bench bench) {
for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
if bench.setup != nil {
bench.setup(b, m)
}
b.ResetTimer()
var i int64
b.RunParallel(func(pb *testing.PB) {
id := int(atomic.AddInt64(&i, 1) - 1)
bench.perG(b, pb, id*b.N, m)
})
})
}
}
func BenchmarkLoadMostlyHits(b *testing.B) {
const hits, misses = 1023, 1
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i % (hits + misses))
}
},
})
}
func BenchmarkLoadMostlyMisses(b *testing.B) {
const hits, misses = 1, 1023
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i % (hits + misses))
}
},
})
}
func BenchmarkLoadOrStoreBalanced(b *testing.B) {
const hits, misses = 128, 128
benchMap(b, bench{
setup: func(b *testing.B, m mapInterface) {
if _, ok := m.(*DeepCopyMap); ok {
b.Skip("DeepCopyMap has quadratic running time.")
}
for i := 0; i < hits; i++ {
m.LoadOrStore(i, i)
}
// Prime the map to get it into a steady state.
for i := 0; i < hits*2; i++ {
m.Load(i % hits)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
j := i % (hits + misses)
if j < hits {
if _, ok := m.LoadOrStore(j, i); !ok {
b.Fatalf("unexpected miss for %v", j)
}
} else {
if v, loaded := m.LoadOrStore(i, i); loaded {
b.Fatalf("failed to store %v: existing value %v", i, v)
}
}
}
},
})
}
func BenchmarkLoadOrStoreUnique(b *testing.B) {
benchMap(b, bench{
setup: func(b *testing.B, m mapInterface) {
if _, ok := m.(*DeepCopyMap); ok {
b.Skip("DeepCopyMap has quadratic running time.")
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.LoadOrStore(i, i)
}
},
})
}
func BenchmarkLoadOrStoreCollision(b *testing.B) {
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
m.LoadOrStore(0, 0)
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.LoadOrStore(0, 0)
}
},
})
}
func BenchmarkRange(b *testing.B) {
const mapSize = 1 << 10
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < mapSize; i++ {
m.Store(i, i)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Range(func(_, _ interface{}) bool { return true })
}
},
})
}
// BenchmarkAdversarialAlloc tests performance when we store a new value
// immediately whenever the map is promoted to clean and otherwise load a
// unique, missing key.
//
// This forces the Load calls to always acquire the map's mutex.
func BenchmarkAdversarialAlloc(b *testing.B) {
benchMap(b, bench{
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
var stores, loadsSinceStore int64
for ; pb.Next(); i++ {
m.Load(i)
if loadsSinceStore++; loadsSinceStore > stores {
m.LoadOrStore(i, stores)
loadsSinceStore = 0
stores++
}
}
},
})
}
// BenchmarkAdversarialDelete tests performance when we periodically delete
// one key and add a different one in a large map.
//
// This forces the Load calls to always acquire the map's mutex and periodically
// makes a full copy of the map despite changing only one entry.
func BenchmarkAdversarialDelete(b *testing.B) {
const mapSize = 1 << 10
benchMap(b, bench{
setup: func(_ *testing.B, m mapInterface) {
for i := 0; i < mapSize; i++ {
m.Store(i, i)
}
},
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
m.Load(i)
if i%mapSize == 0 {
m.Range(func(k, _ interface{}) bool {
m.Delete(k)
return false
})
m.Store(i, i)
}
}
},
})
}