Skip to content

Commit

Permalink
charts: fix reorg deadlock
Browse files Browse the repository at this point in the history
This fixes a reorg deadlock in the charts cache handler.

Also fix block time average calculations in the DayZoom case in durationBTWChart.
  • Loading branch information
buck54321 authored and chappjc committed May 9, 2019
1 parent 37d5258 commit 16d3d83
Show file tree
Hide file tree
Showing 2 changed files with 122 additions and 64 deletions.
71 changes: 52 additions & 19 deletions db/cache/charts.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func ParseZoom(zoom string) ZoomLevel {
}

const (
aDay = 86400
aDay = 86400 // seconds
// HashrateAvgLength is the number of blocks used the rolling average for
// the network hashrate calculation.
HashrateAvgLength = 120
Expand Down Expand Up @@ -473,7 +473,6 @@ func (charts *ChartData) ReorgHandler(wg *sync.WaitGroup, c chan *txhelpers.Reor
}
commonAncestorHeight := uint64(data.NewChainHeight) - uint64(len(data.NewChain))
charts.mtx.Lock()
defer charts.mtx.Unlock()
newHeight := int(commonAncestorHeight) + 1
log.Debug("ChartData.ReorgHandler snipping blocks height to %d", newHeight)
charts.Blocks.Snip(newHeight)
Expand All @@ -487,6 +486,7 @@ func (charts *ChartData) ReorgHandler(wg *sync.WaitGroup, c chan *txhelpers.Reor
windowsLen--
log.Debug("ChartData.ReorgHandler snipping windows to height to %d", windowsLen)
charts.Windows.Snip(windowsLen)
charts.mtx.Unlock()
data.WG.Done()

case <-charts.ctx.Done():
Expand Down Expand Up @@ -629,7 +629,7 @@ func (charts *ChartData) StateID() uint64 {
return charts.stateID()
}

// StateID returns a unique (enough) ID associted with the state of the Blocks
// stateID returns a unique (enough) ID associted with the state of the Blocks
// data.
func (charts *ChartData) stateID() uint64 {
timeLen := len(charts.Blocks.Time)
Expand Down Expand Up @@ -870,22 +870,55 @@ func accumulate(data ChartUints) ChartUints {
return d
}

// Translate the uints to a slice of the differences between each. The provided
// data is assumed to be monotonically increasing. The first element is always
// 0 to keep the data length unchanged.
func btw(data ChartUints) ChartUints {
d := make(ChartUints, 0, len(data))
dataLen := len(data)
if dataLen == 0 {
return d
}
d = append(d, 0)
last := data[0]
for _, v := range data[1:] {
d = append(d, v-last)
// Translate the times slice to a slice of differences. The original dataset
// minus the first element is returned for convenience.
func blockTimes(blocks ChartUints) (ChartUints, ChartUints) {
times := make(ChartUints, 0, len(blocks))
dataLen := len(blocks)
if dataLen < 2 {
// Fewer than two data points is invalid for btw. Return empty data sets so
// that the JSON encoding will have the correct type.
return times, times
}
last := blocks[0]
for _, v := range blocks[1:] {
dif := v - last
if int64(dif) < 0 {
dif = 0
}
times = append(times, dif)
last = v
}
return d
return blocks[1:], times
}

// Take the average block times on the intervals defined by the ticks argument.
func avgBlockTimes(ticks, blocks ChartUints) (ChartUints, ChartUints) {
if len(ticks) < 2 {
// Return empty arrays so that JSON-encoding will have the correct type.
return ChartUints{}, ChartUints{}
}
times := make(ChartUints, 0, len(ticks)-1)
avgs := make(ChartUints, 0, len(ticks)-1)
workingOn := ticks[0]
nextIdx := 1
next := ticks[nextIdx]
lastIdx := 0
for i, t := range blocks {
if t > next {
_, pts := blockTimes(blocks[lastIdx:i])
avgs = append(avgs, pts.Avg(0, len(pts)))
times = append(times, workingOn)
nextIdx++
if nextIdx > len(ticks)-1 {
break
}
workingOn = next
lastIdx = i
next = ticks[nextIdx]
}
}
return times, avgs
}

func blockSizeChart(charts *ChartData, zoom ZoomLevel) ([]byte, error) {
Expand Down Expand Up @@ -931,9 +964,9 @@ func coinSupplyChart(charts *ChartData, zoom ZoomLevel) ([]byte, error) {
func durationBTWChart(charts *ChartData, zoom ZoomLevel) ([]byte, error) {
switch zoom {
case BlockZoom:
return charts.encode(charts.Blocks.Time, btw(charts.Blocks.Time))
return charts.encode(blockTimes(charts.Blocks.Time))
case DayZoom:
return charts.encode(charts.Days.Time, btw(charts.Days.Time))
return charts.encode(avgBlockTimes(charts.Days.Time, charts.Blocks.Time))
}
return nil, InvalidZoomErr
}
Expand Down
115 changes: 70 additions & 45 deletions db/cache/charts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,57 +252,82 @@ func TestChartReorg(t *testing.T) {
charts := &ChartData{
ctx: ctx,
}
charts.Windows = &windowSet{
cacheID: 0,
Time: newUints(),
PowDiff: newFloats(),
TicketPrice: newUints(),
}
charts.Days = &zoomSet{
cacheID: 0,
Height: newUints(),
Time: newUints(),
PoolSize: newUints(),
PoolValue: newFloats(),
BlockSize: newUints(),
TxCount: newUints(),
NewAtoms: newUints(),
Chainwork: newUints(),
Fees: newUints(),
}
charts.Blocks = &zoomSet{
cacheID: 0,
Time: newUints(),
PoolSize: newUints(),
PoolValue: newFloats(),
BlockSize: newUints(),
TxCount: newUints(),
NewAtoms: newUints(),
Chainwork: newUints(),
Fees: newUints(),
resetCharts := func() {
charts.Windows = &windowSet{
cacheID: 0,
Time: newUints(),
PowDiff: newFloats(),
TicketPrice: newUints(),
}
charts.Days = &zoomSet{
cacheID: 0,
Height: newUints(),
Time: newUints(),
PoolSize: newUints(),
PoolValue: newFloats(),
BlockSize: newUints(),
TxCount: newUints(),
NewAtoms: newUints(),
Chainwork: newUints(),
Fees: newUints(),
}
charts.Blocks = &zoomSet{
cacheID: 0,
Time: newUints(),
PoolSize: newUints(),
PoolValue: newFloats(),
BlockSize: newUints(),
TxCount: newUints(),
NewAtoms: newUints(),
Chainwork: newUints(),
Fees: newUints(),
}
}
// this test reorg will replace the entire chain.
reorgData := &txhelpers.ReorgData{
NewChainHeight: 2,
NewChain: make([]chainhash.Hash, 3),
WG: new(sync.WaitGroup),

reorgData := func(newHeight, chainLen int) *txhelpers.ReorgData {
d := &txhelpers.ReorgData{
NewChainHeight: int32(newHeight),
NewChain: make([]chainhash.Hash, chainLen),
WG: new(sync.WaitGroup),
}
d.WG.Add(1)
return d
}
reorgData.WG.Add(1)
wg := new(sync.WaitGroup)
c := make(chan *txhelpers.ReorgData)
wg.Add(1)
go charts.ReorgHandler(wg, c)
c <- reorgData
reorgData.WG.Wait()
if charts.Blocks.Time.Length() != 0 {
t.Errorf("unexpected blocks length %d", charts.Blocks.Time.Length())
}
// Reorg snips 2 days
if charts.Days.Time.Length() != 1 {
t.Errorf("unexpected days length %d", charts.Days.Time.Length())
}
// Reorg snips last window
if charts.Windows.Time.Length() != 2 {
t.Errorf("unexpected windows length %d", charts.Windows.Time.Length())
testReorg := func(newHeight, chainLen, newBlockLen, newDayLen, newWindowLen int) {
d := reorgData(newHeight, chainLen)
c <- d
done := make(chan struct{})
go func() {
d.WG.Wait()
close(done)
}()
select {
case <-time.NewTimer(time.Second).C:
t.Fatalf("timed out waiting for reorg test to complete")
case <-done:
}
if charts.Blocks.Time.Length() != newBlockLen {
t.Errorf("unexpected blocks length %d", charts.Blocks.Time.Length())
}
// Reorg snips 2 days
if charts.Days.Time.Length() != newDayLen {
t.Errorf("unexpected days length %d", charts.Days.Time.Length())
}
// Reorg snips last window
if charts.Windows.Time.Length() != newWindowLen {
t.Errorf("unexpected windows length %d", charts.Windows.Time.Length())
}
}
// Test replacing the entire chain.
resetCharts()
testReorg(2, 3, 0, 1, 2)

// All but one block.
resetCharts()
testReorg(2, 2, 1, 1, 2)
}

0 comments on commit 16d3d83

Please sign in to comment.