Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Re-enable previous fetching algorithm testing #6125

Merged
merged 1 commit into from
Jun 4, 2020
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 32 additions & 12 deletions beacon-chain/sync/initial-sync/blocks_fetcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)

func TestBlocksFetcherInitStartStop(t *testing.T) {
func TestBlocksFetcher_InitStartStop(t *testing.T) {
mc, p2p, _ := initializeTestServices(t, []uint64{}, []*peerData{})

ctx, cancel := context.WithCancel(context.Background())
Expand All @@ -41,7 +41,8 @@ func TestBlocksFetcherInitStartStop(t *testing.T) {
&blocksFetcherConfig{
headFetcher: mc,
p2p: p2p,
})
},
)

t.Run("check for leaked goroutines", func(t *testing.T) {
err := fetcher.start()
Expand Down Expand Up @@ -94,7 +95,7 @@ func TestBlocksFetcherInitStartStop(t *testing.T) {
})
}

func TestBlocksFetcherRoundRobin(t *testing.T) {
func TestBlocksFetcher_RoundRobin(t *testing.T) {
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
requestsGenerator := func(start, end uint64, batchSize uint64) []*fetchRequestParams {
var requests []*fetchRequestParams
Expand Down Expand Up @@ -376,7 +377,7 @@ func TestBlocksFetcherRoundRobin(t *testing.T) {
}
}

func TestBlocksFetcherScheduleRequest(t *testing.T) {
func TestBlocksFetcher_scheduleRequest(t *testing.T) {
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
Expand All @@ -390,8 +391,24 @@ func TestBlocksFetcherScheduleRequest(t *testing.T) {
}
})
}
func TestBlocksFetcher_handleRequest(t *testing.T) {
// Handle using default configuration.
t.Run("default config", func(t *testing.T) {
_handleRequest(t)
})

// Now handle using previous implementation, w/o WRR.
t.Run("previous config", func(t *testing.T) {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnableInitSyncWeightedRoundRobin: false,
})
defer resetCfg()
_handleRequest(t)
})
}

func TestBlocksFetcherHandleRequest(t *testing.T) {
// TODO(6024): Move to TestBlocksFetcher_handleRequest when EnableInitSyncWeightedRoundRobin is released.
func _handleRequest(t *testing.T) {
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
chainConfig := struct {
expectedBlockSlots []uint64
Expand Down Expand Up @@ -474,7 +491,7 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
})
}

func TestBlocksFetcherRequestBeaconBlocksByRangeRequest(t *testing.T) {
func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
chainConfig := struct {
expectedBlockSlots []uint64
Expand Down Expand Up @@ -548,7 +565,7 @@ func TestBlocksFetcherRequestBeaconBlocksByRangeRequest(t *testing.T) {
}
}

func TestBlocksFetcherSelectFailOverPeer(t *testing.T) {
func TestBlocksFetcher_selectFailOverPeer(t *testing.T) {
type args struct {
excludedPID peer.ID
peers []peer.ID
Expand Down Expand Up @@ -628,7 +645,7 @@ func TestBlocksFetcherSelectFailOverPeer(t *testing.T) {
}
}

func TestBlocksFetcherNonSkippedSlotAfter(t *testing.T) {
func TestBlocksFetcher_nonSkippedSlotAfter(t *testing.T) {
peersGen := func(size int) []*peerData {
blocks := append(makeSequence(1, 64), makeSequence(500, 640)...)
blocks = append(blocks, makeSequence(51200, 51264)...)
Expand Down Expand Up @@ -713,7 +730,10 @@ func TestBlocksFetcherNonSkippedSlotAfter(t *testing.T) {
})
}

func TestBlocksFetcherFilterPeers(t *testing.T) {
func TestBlocksFetcher_filterPeers(t *testing.T) {
if !featureconfig.Get().EnableInitSyncWeightedRoundRobin {
t.Skip("Test is run only when EnableInitSyncWeightedRoundRobin = true")
}
type weightedPeer struct {
peer.ID
usedCapacity int64
Expand Down Expand Up @@ -783,7 +803,7 @@ func TestBlocksFetcherFilterPeers(t *testing.T) {
fetcher.rateLimiter.Add(pid.ID.String(), pid.usedCapacity)
}
got := fetcher.filterPeers(pids, tt.args.peersPercentage)
// Re-arrange deterministically peers with the same remaining capacity.
// Re-arrange peers with the same remaining capacity, deterministically .
// They are deliberately shuffled - so that on the same capacity any of
// such peers can be selected. That's why they are sorted here.
sort.SliceStable(got, func(i, j int) bool {
Expand All @@ -801,7 +821,7 @@ func TestBlocksFetcherFilterPeers(t *testing.T) {
}
}

func TestBlocksFetcherRequestBlocksRateLimitingLocks(t *testing.T) {
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
p1 := p2pt.NewTestP2P(t)
p2 := p2pt.NewTestP2P(t)
p3 := p2pt.NewTestP2P(t)
Expand Down Expand Up @@ -873,7 +893,7 @@ func TestBlocksFetcherRequestBlocksRateLimitingLocks(t *testing.T) {
}
}

func TestBlocksFetcherRemoveStalePeerLocks(t *testing.T) {
func TestBlocksFetcher_removeStalePeerLocks(t *testing.T) {
type peerData struct {
peerID peer.ID
accessed time.Time
Expand Down