Skip to content

Commit

Permalink
enable duplicate packet
Browse files Browse the repository at this point in the history
  • Loading branch information
Yohan Totting committed May 1, 2024
1 parent 3569b37 commit 0cdedd5
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 2 deletions.
2 changes: 1 addition & 1 deletion client.go
Expand Up @@ -259,7 +259,7 @@ func NewClient(s *SFU, id string, name string, peerConnectionConfig webrtc.Confi
// TODO: we need to use packet loss based bandwidth adjuster when the bandwidth is below 100_000
return gcc.NewSendSideBWE(
gcc.SendSideBWEInitialBitrate(int(s.bitrateConfigs.InitialBandwidth)),
gcc.SendSideBWEPacer(pacer.NewLeakyBucketPacer(int(s.bitrateConfigs.InitialBandwidth))),
gcc.SendSideBWEPacer(pacer.NewLeakyBucketPacer(int(s.bitrateConfigs.InitialBandwidth), true)),
// gcc.SendSideBWEPacer(gcc.NewNoOpPacer()),
)
})
Expand Down
10 changes: 9 additions & 1 deletion pkg/pacer/leakybucket.go
Expand Up @@ -48,6 +48,7 @@ func (q *queue) Remove(e *list.Element) *item {

// LeakyBucketPacer implements a leaky bucket pacing algorithm
type LeakyBucketPacer struct {
allowDuplicate bool
f float64
targetBitrate int
targetBitrateLock sync.Mutex
Expand All @@ -63,8 +64,9 @@ type LeakyBucketPacer struct {
}

// NewLeakyBucketPacer initializes a new LeakyBucketPacer
func NewLeakyBucketPacer(initialBitrate int) *LeakyBucketPacer {
func NewLeakyBucketPacer(initialBitrate int, allowDuplicate bool) *LeakyBucketPacer {
p := &LeakyBucketPacer{
allowDuplicate: allowDuplicate,
f: 1.5,
targetBitrate: initialBitrate,
pacingInterval: 5 * time.Millisecond,
Expand Down Expand Up @@ -165,6 +167,12 @@ Loop:
currentCache, _ := e.Value.(*item)

if currentCache.packet.Header().SequenceNumber == pkt.Header().SequenceNumber {
if p.allowDuplicate {
queue.InsertAfter(newItem, e)

break Loop
}

glog.Warning("packet cache: packet sequence ", pkt.Header().SequenceNumber, " already exists in the cache, will not adding the packet")

return 0, ErrDuplicate
Expand Down

0 comments on commit 0cdedd5

Please sign in to comment.