Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

report cache misses in cached token authenticator benchmark #85152

Merged
merged 1 commit into from Nov 13, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -26,6 +26,7 @@ import (
mathrand "math/rand"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"

Expand Down Expand Up @@ -194,6 +195,11 @@ func newSingleBenchmark(tokens, threads int) *singleBenchmark {
// singleBenchmark collects all the state needed to run a benchmark. The
// question this benchmark answers is, "what's the average latency added by the
// cache for N concurrent tokens?"
//
// Given the size of the key range constructed by this test, the default go
// benchtime of 1 second is often inadequate to test caching and expiration
// behavior. A benchtime of 10 to 30 seconds is adequate to stress these
// code paths.
type singleBenchmark struct {
threadCount int
// These token.* variables are set by makeTokens()
Expand All @@ -204,11 +210,6 @@ type singleBenchmark struct {
tokenToAuds map[string]authenticator.Audiences
// a list makes it easy to select a random one
tokens []string

// Simulate slowness, qps limit, external service limitation, etc
chokepoint chan struct{}

b *testing.B
}

func (s *singleBenchmark) makeTokens() {
Expand All @@ -228,12 +229,12 @@ func (s *singleBenchmark) makeTokens() {
for i := 0; i < mathrand.Intn(4); i++ {
auds = append(auds, string(uuid.NewUUID()))
}
choice := mathrand.Intn(1000)
choice := mathrand.Float64()
switch {
case choice < 900:
case choice < 0.9:
r.ok = true
r.err = nil
case choice < 990:
case choice < 0.99:
r.ok = false
r.err = nil
default:
Expand All @@ -249,9 +250,6 @@ func (s *singleBenchmark) makeTokens() {
}

func (s *singleBenchmark) lookup(ctx context.Context, token string) (*authenticator.Response, bool, error) {
s.chokepoint <- struct{}{}
defer func() { <-s.chokepoint }()
time.Sleep(1 * time.Millisecond)
r, ok := s.tokenToResponse[token]
if !ok {
panic("test setup problem")
Expand All @@ -272,29 +270,41 @@ func (s *singleBenchmark) run(b *testing.B) {
}

func (s *singleBenchmark) bench(b *testing.B) {
s.b = b
// Simulate slowness, qps limit, external service limitation, etc
const maxInFlight = 40
chokepoint := make(chan struct{}, maxInFlight)
// lookup count
var lookups uint64

a := newWithClock(
authenticator.TokenFunc(s.lookup),
authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) {
atomic.AddUint64(&lookups, 1)
Copy link
Member

@enj enj Nov 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While I do not think it matters, this is technically defer func() { atomic.AddUint64(&lookups, 1) }() right since we want to count a lookup after it is complete?

Copy link
Member

@enj enj Nov 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually no that is wrong, you just want to count that you attempted to do a lookup at all (i.e. cache miss).

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep. I'm trying to gauge "how many expensive things do we do per op" so that we can compare the goodness (TM) of our cache strategy.


chokepoint <- struct{}{}
defer func() { <-chokepoint }()

time.Sleep(1 * time.Millisecond)

return s.lookup(ctx, token)
}),
true,
4*time.Second,
500*time.Millisecond,
utilclock.RealClock{},
)
const maxInFlight = 40
s.chokepoint = make(chan struct{}, maxInFlight)

s.b.ResetTimer()

b.ResetTimer()
b.SetParallelism(s.threadCount)
b.RunParallel(func(pb *testing.PB) {
r := mathrand.New(mathrand.NewSource(mathrand.Int63()))
for pb.Next() {
// some problems appear with random
// access, some appear with many
// requests for a single entry, so we
// do both.
s.doAuthForTokenN(r.Intn(len(s.tokens)), a)
// some problems appear with random access, some appear with many
// requests for a single entry, so we do both.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Uh, did you mean to drop the s.doAuthForTokenN(r.Intn(len(s.tokens)), a)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I reordered it to before the comment on line 302, because it seemed like it was explaining the second doAuthForTokenN but reading it again, it looks like it was in the right spot.

s.doAuthForTokenN(r.Intn(s.tokenCount), a)
s.doAuthForTokenN(0, a)
}
})
b.StopTimer()

b.ReportMetric(float64(lookups)/float64(b.N), "lookups/op")
}