Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

percentiles chart #66

Merged
merged 18 commits into from
Oct 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 27 additions & 5 deletions attacker/attacker.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"math"
"net"
"net/http"
"sync"
"time"

vegeta "github.com/tsenart/vegeta/v12/lib"
Expand Down Expand Up @@ -50,6 +51,12 @@ type Options struct {
// Result contains the results of a single HTTP request.
type Result struct {
Latency time.Duration

P50 time.Duration
P90 time.Duration
P95 time.Duration
P99 time.Duration

// Indicates if the last result in the entire attack.
End bool
}
Expand Down Expand Up @@ -104,23 +111,38 @@ func Attack(ctx context.Context, target string, resCh chan *Result, metricsCh ch

child, cancelChild := context.WithCancel(ctx)
defer cancelChild()
go sendMetrics(child, metrics, metricsCh)

// used to protect metrics
mu := &sync.Mutex{}
go sendMetrics(child, metrics, metricsCh, mu)

for res := range opts.Attacker.Attack(targeter, rate, opts.Duration, "main") {
select {
case <-ctx.Done():
opts.Attacker.Stop()
return
default:
resCh <- &Result{Latency: res.Latency}
mu.Lock()
metrics.Add(res)
p50 := metrics.Latencies.Quantile(0.50)
p90 := metrics.Latencies.Quantile(0.90)
p95 := metrics.Latencies.Quantile(0.95)
p99 := metrics.Latencies.Quantile(0.99)
mu.Unlock()
resCh <- &Result{
Latency: res.Latency,
P50: p50,
P90: p90,
P95: p95,
P99: p99,
}
}
}
metrics.Close()
metricsCh <- newMetrics(metrics)
metricsCh <- newMetrics(metrics, mu)
}

func sendMetrics(ctx context.Context, metrics *vegeta.Metrics, ch chan<- *Metrics) {
func sendMetrics(ctx context.Context, metrics *vegeta.Metrics, ch chan<- *Metrics, mu *sync.Mutex) {
// TODO: Make the interval changeable.
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
Expand All @@ -130,7 +152,7 @@ func sendMetrics(ctx context.Context, metrics *vegeta.Metrics, ch chan<- *Metric
case <-ctx.Done():
return
case <-ticker.C:
ch <- newMetrics(metrics)
ch <- newMetrics(metrics, mu)
}
}
}
13 changes: 8 additions & 5 deletions attacker/metrics.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package attacker

import (
"sync"
"time"

vegeta "github.com/tsenart/vegeta/v12/lib"
Expand Down Expand Up @@ -68,15 +69,17 @@ type ByteMetrics struct {
Mean float64 `json:"mean"`
}

func newMetrics(m *vegeta.Metrics) *Metrics {
func newMetrics(m *vegeta.Metrics, mu *sync.Mutex) *Metrics {
mu.Lock()
defer mu.Unlock()
return &Metrics{
Latencies: LatencyMetrics{
Total: m.Latencies.Total,
Mean: m.Latencies.Mean,
P50: m.Latencies.P50,
P90: m.Latencies.P90,
P95: m.Latencies.P95,
P99: m.Latencies.P99,
P50: m.Latencies.Quantile(0.50),
P90: m.Latencies.Quantile(0.90),
P95: m.Latencies.Quantile(0.95),
P99: m.Latencies.Quantile(0.99),
Comment on lines +79 to +82
Copy link
Owner

@nakabonne nakabonne Oct 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm running into an issue about data races when running it at a high rate. There seems to be a data race between calculating the quartiles for the chart and calculating them for metrics reporting.
screenshot-1

It's preferable to re-use a single quantile, but if it gets too complicated, you may want to revert back to it for now.

Suggested change
P50: m.Latencies.Quantile(0.50),
P90: m.Latencies.Quantile(0.90),
P95: m.Latencies.Quantile(0.95),
P99: m.Latencies.Quantile(0.99),
P50: m.Latencies.P50,
P90: m.Latencies.P90,
P95: m.Latencies.P95,
P99: m.Latencies.P99,

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering if it's a little too heavy to use the sync.Mutex every time, but I haven't been able to confirm it yet. I'll check it in a few hours.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting data race. I'll have a look at it.

From what I gathered, commenting out sendMetrics actually fix that data race but that is not good. I think a mutex, in the case for performance will actually be OK, but it'll be all around the code:

  • newMetrics
  • adding metrics/calculating quantiles
  • redrawChart, as it accesses Latencies to write metrics

Also, I just hit another data race, which happens with commented sendMetrics + starting a new attack:

WARNING: DATA RACE
Read at 0x00c000190718 by goroutine 20:
  github.com/nakabonne/ali/gui.attack()
      /home/brenol/goworkspace/src/github.com/brenol/ali/gui/keybinds.go:50 +0x5b
  github.com/nakabonne/ali/gui.keybinds.func3()
      /home/brenol/goworkspace/src/github.com/brenol/ali/gui/keybinds.go:40 +0x1d7
  github.com/mum4k/termdash.(*termdash).subscribers.func4()
      /home/brenol/goworkspace/pkg/mod/github.com/mum4k/termdash@v0.12.2/termdash.go:236 +0x7b
  github.com/mum4k/termdash/private/event.(*subscriber).callback()
      /home/brenol/goworkspace/pkg/mod/github.com/mum4k/termdash@v0.12.2/private/event/event.go:95 +0x5a
  github.com/mum4k/termdash/private/event.(*subscriber).run()
      /home/brenol/goworkspace/pkg/mod/github.com/mum4k/termdash@v0.12.2/private/event/event.go:110 +0xd2

Previous write at 0x00c000190718 by goroutine 52:
  github.com/nakabonne/ali/gui.(*drawer).redrawChart()
      /home/brenol/goworkspace/src/github.com/brenol/ali/gui/drawer.go:86 +0x13e8

Copy link
Contributor Author

@brenol brenol Oct 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, changed to a Mutex and fixed both data races. (the one you found and the one I found after)

I did not see a performance hit, but I did not use a lot of workers though.

  • edit

I tried first with a RWMutex but it did not work. Not sure why though.

Max: m.Latencies.Max,
Min: m.Latencies.Min,
},
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,6 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
github.com/tsenart/vegeta/v12 v12.8.3
go.uber.org/atomic v1.7.0
go.uber.org/goleak v1.1.10
)
5 changes: 5 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-gk v0.0.0-20140819190930-201884a44051 h1:ByJUvQYyTtNNCVfYNM48q6uYUT4fAlN0wNmd3th4BSo=
github.com/dgryski/go-gk v0.0.0-20140819190930-201884a44051/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E=
github.com/dgryski/go-lttb v0.0.0-20180810165845-318fcdf10a77/go.mod h1:Va5MyIzkU0rAM92tn3hb3Anb7oz7KcnixF49+2wOMe4=
Expand Down Expand Up @@ -67,12 +69,15 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A=
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
github.com/tsenart/vegeta/v12 v12.8.3 h1:UEsDkSrEJojMKW/xr7KUv4H/bYykX+V48KCsPZPqEfk=
github.com/tsenart/vegeta/v12 v12.8.3/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
Expand Down
41 changes: 37 additions & 4 deletions gui/drawer.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,26 +8,38 @@ import (
"github.com/mum4k/termdash/cell"
"github.com/mum4k/termdash/widgets/linechart"
"github.com/mum4k/termdash/widgets/text"
"go.uber.org/atomic"

"github.com/nakabonne/ali/attacker"
)

type drawer struct {
widgets *widgets
gridOpts *gridOpts
chartCh chan *attacker.Result
gaugeCh chan bool
metricsCh chan *attacker.Metrics

// aims to avoid to perform multiple `redrawChart`.
chartDrawing bool
chartDrawing *atomic.Bool
}

// redrawChart appends entities as soon as a result arrives.
// Given maxSize, then it can be pre-allocated.
// TODO: In the future, multiple charts including bytes-in/out etc will be re-drawn.
func (d *drawer) redrawChart(ctx context.Context, maxSize int) {
values := make([]float64, 0, maxSize)
d.chartDrawing = true

valuesP50 := make([]float64, 0, maxSize)
valuesP90 := make([]float64, 0, maxSize)
valuesP95 := make([]float64, 0, maxSize)
valuesP99 := make([]float64, 0, maxSize)

appendValue := func(to []float64, val time.Duration) []float64 {
return append(to, float64(val)/float64(time.Millisecond))
}

d.chartDrawing.Store(true)
L:
for {
select {
Expand All @@ -42,16 +54,37 @@ L:
break L
}
d.gaugeCh <- false
values = append(values, float64(res.Latency/time.Millisecond))

values = appendValue(values, res.Latency)
d.widgets.latencyChart.Series("latency", values,
linechart.SeriesCellOpts(cell.FgColor(cell.ColorNumber(87))),
linechart.SeriesXLabels(map[int]string{
0: "req",
}),
)

valuesP50 = appendValue(valuesP50, res.P50)
d.widgets.percentilesChart.Series("p50", valuesP50,
linechart.SeriesCellOpts(d.widgets.p50Legend.cellOpts...),
)

valuesP90 = appendValue(valuesP90, res.P90)
d.widgets.percentilesChart.Series("p90", valuesP90,
linechart.SeriesCellOpts(d.widgets.p90Legend.cellOpts...),
)

valuesP95 = appendValue(valuesP95, res.P95)
d.widgets.percentilesChart.Series("p95", valuesP95,
linechart.SeriesCellOpts(d.widgets.p95Legend.cellOpts...),
)

valuesP99 = appendValue(valuesP99, res.P99)
d.widgets.percentilesChart.Series("p99", valuesP99,
linechart.SeriesCellOpts(d.widgets.p99Legend.cellOpts...),
)
}
}
d.chartDrawing = false
d.chartDrawing.Store(false)
}

func (d *drawer) redrawGauge(ctx context.Context, maxSize int) {
Expand Down
56 changes: 48 additions & 8 deletions gui/drawer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"time"

"github.com/golang/mock/gomock"
"go.uber.org/atomic"

"github.com/nakabonne/ali/attacker"
)
Expand All @@ -18,31 +19,54 @@ func TestRedrawChart(t *testing.T) {
defer cancel()

tests := []struct {
name string
results []*attacker.Result
latencyChart LineChart
name string
results []*attacker.Result
latencyChart LineChart
percentilesChart LineChart
}{
{
name: "one result received",
results: []*attacker.Result{
{
Latency: 1000000,
P50: 500000,
P90: 900000,
P95: 950000,
P99: 990000,
},
},
latencyChart: func() LineChart {
l := NewMockLineChart(ctrl)
l.EXPECT().Series("latency", []float64{1.0}, gomock.Any())
return l
}(),
percentilesChart: func() LineChart {
l := NewMockLineChart(ctrl)
gomock.InOrder(
l.EXPECT().Series("p50", []float64{0.5}, gomock.Any()),
l.EXPECT().Series("p90", []float64{0.9}, gomock.Any()),
l.EXPECT().Series("p95", []float64{0.95}, gomock.Any()),
l.EXPECT().Series("p99", []float64{0.99}, gomock.Any()),
)
return l
}(),
},
{
name: "two results received",
results: []*attacker.Result{
{
Latency: 1000000,
P50: 500000,
P90: 900000,
P95: 950000,
P99: 990000,
},
{
Latency: 2000000,
P50: 1000000,
P90: 1800000,
P95: 1900000,
P99: 1980000,
},
},
latencyChart: func() LineChart {
Expand All @@ -51,15 +75,30 @@ func TestRedrawChart(t *testing.T) {
l.EXPECT().Series("latency", []float64{1.0, 2.0}, gomock.Any())
return l
}(),
percentilesChart: func() LineChart {
l := NewMockLineChart(ctrl)

gomock.InOrder(l.EXPECT().Series("p50", []float64{0.5}, gomock.Any()),
l.EXPECT().Series("p90", []float64{0.9}, gomock.Any()),
l.EXPECT().Series("p95", []float64{0.95}, gomock.Any()),
l.EXPECT().Series("p99", []float64{0.99}, gomock.Any()),
l.EXPECT().Series("p50", []float64{0.5, 1.0}, gomock.Any()),
l.EXPECT().Series("p90", []float64{0.9, 1.8}, gomock.Any()),
l.EXPECT().Series("p95", []float64{0.95, 1.9}, gomock.Any()),
l.EXPECT().Series("p99", []float64{0.99, 1.98}, gomock.Any()),
)
return l
}(),
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
d := &drawer{
widgets: &widgets{latencyChart: tt.latencyChart},
chartCh: make(chan *attacker.Result),
gaugeCh: make(chan bool, 100),
widgets: &widgets{latencyChart: tt.latencyChart, percentilesChart: tt.percentilesChart},
chartCh: make(chan *attacker.Result),
gaugeCh: make(chan bool, 100),
chartDrawing: atomic.NewBool(false),
}
go d.redrawChart(ctx, len(tt.results))
for _, res := range tt.results {
Expand Down Expand Up @@ -109,8 +148,9 @@ func TestRedrawGauge(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
d := &drawer{
widgets: &widgets{progressGauge: tt.gauge},
gaugeCh: make(chan bool),
widgets: &widgets{progressGauge: tt.gauge},
gaugeCh: make(chan bool),
chartDrawing: atomic.NewBool(false),
}
go d.redrawGauge(ctx, tt.size)
for i := 0; i < tt.size; i++ {
Expand Down
Loading