Skip to content

Commit

Permalink
#46: Covering LL routing by tests
Browse files Browse the repository at this point in the history
  • Loading branch information
roma-glushko committed Jan 14, 2024
1 parent 7a6b4ab commit da5df0e
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 7 deletions.
5 changes: 4 additions & 1 deletion pkg/providers/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,10 @@ type LangModelMock struct {

func NewLangModelMock(ID string, healthy bool, avgLatency float64) *LangModelMock {
movingAverage := latency.NewMovingAverage(0.06, 3)
movingAverage.Set(avgLatency)

if avgLatency > 0.0 {
movingAverage.Set(avgLatency)
}

return &LangModelMock{
modelID: ID,
Expand Down
20 changes: 14 additions & 6 deletions pkg/routers/routing/least_latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,21 @@ const (

// ModelSchedule defines latency update schedule for models
type ModelSchedule struct {
mu *sync.RWMutex
mu sync.RWMutex
model providers.Model
expireAt time.Time
}

func NewSchedule(model providers.Model) *ModelSchedule {
schedule := &ModelSchedule{
model: model,
}

schedule.Update()

return schedule
}

func (s *ModelSchedule) ExpireAt() time.Time {
s.mu.RLock()
defer s.mu.RUnlock()
Expand Down Expand Up @@ -55,9 +65,7 @@ func NewLeastLatencyRouting(models []providers.Model) *LeastLatencyRouting {
schedules := make([]*ModelSchedule, 0, len(models))

for _, model := range models {
schedules = append(schedules, &ModelSchedule{
model: model,
})
schedules = append(schedules, NewSchedule(model))
}

return &LeastLatencyRouting{
Expand Down Expand Up @@ -86,7 +94,7 @@ func (r *LeastLatencyRouting) Next() (providers.Model, error) { //nolint:cyclop

if len(coldSchedules) > 0 {
// warm up models
idx := r.warmupIdx.Add(1)
idx := r.warmupIdx.Add(1) - 1

schedule := coldSchedules[idx%uint32(len(coldSchedules))]
schedule.Update()
Expand Down Expand Up @@ -118,7 +126,7 @@ func (r *LeastLatencyRouting) Next() (providers.Model, error) { //nolint:cyclop
}

if !schedule.Expired() && !nextSchedule.Expired() &&
schedule.model.Latency().Value() < nextSchedule.model.Latency().Value() {
nextSchedule.model.Latency().Value() > schedule.model.Latency().Value() {
nextSchedule = schedule
}
}
Expand Down
74 changes: 74 additions & 0 deletions pkg/routers/routing/least_latency_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
package routing

import (
"strconv"
"testing"

"github.com/stretchr/testify/require"
"glide/pkg/providers"
)

func TestLeastLatencyRouting_Warmup(t *testing.T) {
type Model struct {
modelID string
healthy bool
latency float64
}

type TestCase struct {
models []Model
expectedModelIDs []string
}

tests := map[string]TestCase{
"all cold models": {[]Model{{"first", true, 0.0}, {"second", true, 0.0}, {"third", true, 0.0}}, []string{"first", "second", "third"}},
"all cold models & unhealthy": {[]Model{{"first", true, 0.0}, {"second", false, 0.0}, {"third", true, 0.0}}, []string{"first", "third", "first"}},
"some models are warmed": {[]Model{{"first", true, 100.0}, {"second", true, 0.0}, {"third", true, 120.0}}, []string{"second", "second", "second"}},
"cold unhealthy model": {[]Model{{"first", true, 120.0}, {"second", false, 0.0}, {"third", true, 100.0}}, []string{"third", "third", "third"}},
}

for name, tc := range tests {
t.Run(name, func(t *testing.T) {
models := make([]providers.Model, 0, len(tc.models))

for _, model := range tc.models {
models = append(models, providers.NewLangModelMock(model.modelID, model.healthy, model.latency))
}

routing := NewLeastLatencyRouting(models)
iterator := routing.Iterator()

// loop three times over the whole pool to check if we return back to the begging of the list
for _, modelID := range tc.expectedModelIDs {
model, err := iterator.Next()

require.NoError(t, err)
require.Equal(t, modelID, model.ID())
}
})
}
}

func TestLeastLatencyRouting_NoHealthyModels(t *testing.T) {
tests := map[string][]float64{
"all cold models unhealthy": {0.0, 0.0, 0.0},
"all warm models unhealthy": {100.0, 120.0, 150.0},
"cold & warm models unhealthy": {0.0, 120.0, 150.0},
}

for name, latencies := range tests {
t.Run(name, func(t *testing.T) {
models := make([]providers.Model, 0, len(latencies))

for idx, latency := range latencies {
models = append(models, providers.NewLangModelMock(strconv.Itoa(idx), false, latency))
}

routing := NewPriorityRouting(models)
iterator := routing.Iterator()

_, err := iterator.Next()
require.Error(t, err)
})
}
}

0 comments on commit da5df0e

Please sign in to comment.