Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: evaluate after filter #2363

Merged
merged 2 commits into from
May 22, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions api/manager/docs.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions api/manager/swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -4283,12 +4283,12 @@
"d7y_io_dragonfly_v2_manager_types.SchedulerClusterConfig": {
"type": "object",
"properties": {
"filter_parent_limit": {
"candidate_parent_limit": {
"type": "integer",
"maximum": 20,
"minimum": 1
},
"filter_parent_range_limit": {
"filter_parent_limit": {
"type": "integer",
"maximum": 1000,
"minimum": 10
Expand Down
4 changes: 2 additions & 2 deletions api/manager/swagger.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -658,11 +658,11 @@ definitions:
type: object
d7y_io_dragonfly_v2_manager_types.SchedulerClusterConfig:
properties:
filter_parent_limit:
candidate_parent_limit:
maximum: 20
minimum: 1
type: integer
filter_parent_range_limit:
filter_parent_limit:
maximum: 1000
minimum: 10
type: integer
Expand Down
4 changes: 2 additions & 2 deletions manager/database/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ func seed(cfg *config.Config, db *gorm.DB) error {
},
Name: DefaultSchedulerClusterName,
Config: map[string]any{
"filter_parent_limit": schedulerconfig.DefaultSchedulerFilterParentLimit,
"filter_parent_range_limit": schedulerconfig.DefaultSchedulerFilterParentRangeLimit,
"candidate_parent_limit": schedulerconfig.DefaultSchedulerCandidateParentLimit,
"filter_parent_limit": schedulerconfig.DefaultSchedulerFilterParentLimit,
},
ClientConfig: map[string]any{
"load_limit": schedulerconfig.DefaultPeerConcurrentUploadLimit,
Expand Down
4 changes: 2 additions & 2 deletions manager/types/scheduler_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ type GetSchedulerClustersQuery struct {
}

type SchedulerClusterConfig struct {
FilterParentLimit uint32 `yaml:"filterParentLimit" mapstructure:"filterParentLimit" json:"filter_parent_limit" binding:"omitempty,gte=1,lte=20"`
FilterParentRangeLimit uint32 `yaml:"filterParentRangeLimit" mapstructure:"filterParentRangeLimit" json:"filter_parent_range_limit" binding:"omitempty,gte=10,lte=1000"`
CandidateParentLimit uint32 `yaml:"candidateParentLimit" mapstructure:"candidateParentLimit" json:"candidate_parent_limit" binding:"omitempty,gte=1,lte=20"`
FilterParentLimit uint32 `yaml:"filterParentLimit" mapstructure:"filterParentLimit" json:"filter_parent_limit" binding:"omitempty,gte=10,lte=1000"`
}

type SchedulerClusterClientConfig struct {
Expand Down
8 changes: 4 additions & 4 deletions scheduler/config/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ const (
// DefaultPeerConcurrentPieceCount is default number for pieces to concurrent downloading.
DefaultPeerConcurrentPieceCount = 4

// DefaultSchedulerFilterParentLimit is default limit the number for filter traversals.
DefaultSchedulerFilterParentLimit = 4
// DefaultSchedulerCandidateParentLimit is default limit the number of candidate parent.
DefaultSchedulerCandidateParentLimit = 4

// DefaultSchedulerFilterParentRangeLimit is default limit the range for filter traversals.
DefaultSchedulerFilterParentRangeLimit = 40
// DefaultSchedulerFilterParentLimit is default limit the number for filter parent.
DefaultSchedulerFilterParentLimit = 40
)

const (
Expand Down
24 changes: 13 additions & 11 deletions scheduler/scheduling/scheduling.go
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,18 @@ func (s *scheduling) FindCandidateParents(ctx context.Context, peer *resource.Pe
},
)

// Get the parents with candidateParentLimit.
candidateParentLimit := config.DefaultSchedulerCandidateParentLimit
if config, err := s.dynconfig.GetSchedulerClusterConfig(); err == nil {
if config.CandidateParentLimit > 0 {
candidateParentLimit = int(config.CandidateParentLimit)
}
}

if len(candidateParents) > candidateParentLimit {
candidateParents = candidateParents[:candidateParentLimit]
}

var parentIDs []string
for _, candidateParent := range candidateParents {
parentIDs = append(parentIDs, candidateParent.ID)
Expand Down Expand Up @@ -449,27 +461,17 @@ func (s *scheduling) FindSuccessParent(ctx context.Context, peer *resource.Peer,
// filterCandidateParents filters the candidate parents that can be scheduled.
func (s *scheduling) filterCandidateParents(peer *resource.Peer, blocklist set.SafeSet[string]) []*resource.Peer {
filterParentLimit := config.DefaultSchedulerFilterParentLimit
filterParentRangeLimit := config.DefaultSchedulerFilterParentRangeLimit
if config, err := s.dynconfig.GetSchedulerClusterConfig(); err == nil {
if config.FilterParentLimit > 0 {
filterParentLimit = int(config.FilterParentLimit)
}

if config.FilterParentRangeLimit > 0 {
filterParentRangeLimit = int(config.FilterParentRangeLimit)
}
}

var (
candidateParents []*resource.Peer
candidateParentIDs []string
)
for _, candidateParent := range peer.Task.LoadRandomPeers(uint(filterParentRangeLimit)) {
// Parent length limit after filtering.
if len(candidateParents) >= filterParentLimit {
break
}

for _, candidateParent := range peer.Task.LoadRandomPeers(uint(filterParentLimit)) {
// Candidate parent is in blocklist.
if blocklist.Contains(candidateParent.ID) {
peer.Log.Debugf("parent %s is not selected because it is in blocklist", candidateParent.ID)
Expand Down
22 changes: 11 additions & 11 deletions scheduler/scheduling/scheduling_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ func TestScheduling_ScheduleCandidateParents(t *testing.T) {
seedPeer.FSM.SetState(resource.PeerStateRunning)
peer.StoreAnnouncePeerStream(stream)
gomock.InOrder(
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
ConcurrentPieceCount: 2,
}, nil).Times(1),
Expand Down Expand Up @@ -679,7 +679,7 @@ func TestScheduling_ScheduleParentAndCandidateParents(t *testing.T) {
seedPeer.FSM.SetState(resource.PeerStateRunning)
peer.StoreReportPieceResultStream(stream)
gomock.InOrder(
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
ConcurrentPieceCount: 2,
}, nil).Times(1),
Expand Down Expand Up @@ -834,7 +834,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
mockPeers[1].FinishedPieces.Set(1)
mockPeers[1].FinishedPieces.Set(2)

md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1)
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand All @@ -859,7 +859,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
mockPeers[1].FinishedPieces.Set(1)
mockPeers[1].FinishedPieces.Set(2)

md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1)
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand All @@ -881,7 +881,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
mockPeers[1].FinishedPieces.Set(1)
mockPeers[1].FinishedPieces.Set(2)

md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1)
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand Down Expand Up @@ -912,7 +912,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
mockPeers[1].FinishedPieces.Set(1)
mockPeers[1].FinishedPieces.Set(2)

md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1)
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand All @@ -931,7 +931,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
peer.Task.StorePeer(peer)
peer.Task.StorePeer(mockPeers[0])
peer.Task.StorePeer(mockPeers[1])
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1)
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand All @@ -940,7 +940,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
},
},
{
name: "find parent and fetch filterParentLimit from manager dynconfig",
name: "find parent and fetch candidateParentLimit from manager dynconfig",
mock: func(peer *resource.Peer, mockPeers []*resource.Peer, blocklist set.SafeSet[string], md *configmocks.MockDynconfigInterfaceMockRecorder) {
peer.FSM.SetState(resource.PeerStateRunning)
mockPeers[0].FSM.SetState(resource.PeerStateRunning)
Expand All @@ -958,8 +958,8 @@ func TestScheduling_FindCandidateParents(t *testing.T) {
mockPeers[1].FinishedPieces.Set(2)

md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{
FilterParentLimit: 3,
}, nil).Times(1)
CandidateParentLimit: 3,
}, nil).Times(2)
},
expect: func(t *testing.T, peer *resource.Peer, mockPeers []*resource.Peer, parents []*resource.Peer, ok bool) {
assert := assert.New(t)
Expand Down Expand Up @@ -1192,7 +1192,7 @@ func TestScheduling_FindSuccessParent(t *testing.T) {
},
},
{
name: "find parent and fetch filterParentLimit from manager dynconfig",
name: "find parent and fetch candidateParentLimit from manager dynconfig",
mock: func(peer *resource.Peer, mockPeers []*resource.Peer, blocklist set.SafeSet[string], md *configmocks.MockDynconfigInterfaceMockRecorder) {
peer.FSM.SetState(resource.PeerStateRunning)
peer.Task.StorePeer(peer)
Expand Down