Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a package for slices utilities #25069

Merged
merged 4 commits into from
Apr 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 4 additions & 3 deletions daemon/cmd/fqdn.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"github.com/cilium/cilium/pkg/proxy"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/proxy/logger"
"github.com/cilium/cilium/pkg/slices"
"github.com/cilium/cilium/pkg/u8proto"
)

Expand Down Expand Up @@ -240,7 +241,7 @@ func (d *Daemon) bootstrapFQDN(possibleEndpoints map[uint16]*endpoint.Endpoint,
//
lookupTime := time.Now()
for _, zombie := range alive {
namesToClean = fqdn.KeepUniqueNames(append(namesToClean, zombie.Names...))
namesToClean = slices.Unique(append(namesToClean, zombie.Names...))
for _, name := range zombie.Names {
activeConnections.Update(lookupTime, name, []netip.Addr{zombie.IP}, activeConnectionsTTL)
}
Expand All @@ -250,11 +251,11 @@ func (d *Daemon) bootstrapFQDN(possibleEndpoints map[uint16]*endpoint.Endpoint,
// Entries here have been evicted from the DNS cache (via .GC due to
// TTL expiration or overlimit) and are no longer active connections.
for _, zombie := range dead {
namesToClean = fqdn.KeepUniqueNames(append(namesToClean, zombie.Names...))
namesToClean = slices.Unique(append(namesToClean, zombie.Names...))
}
}

namesToClean = fqdn.KeepUniqueNames(namesToClean)
namesToClean = slices.Unique(namesToClean)
if len(namesToClean) == 0 {
return nil
}
Expand Down
3 changes: 2 additions & 1 deletion operator/pkg/model/translation/envoy_listener.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/cilium/cilium/operator/pkg/model"
"github.com/cilium/cilium/pkg/envoy"
ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/slices"
)

const (
Expand Down Expand Up @@ -131,7 +132,7 @@ func NewListener(name string, ciliumSecretNamespace string, tls map[model.TLSSec

filterChains = append(filterChains, &envoy_config_listener.FilterChain{
FilterChainMatch: &envoy_config_listener.FilterChainMatch{
ServerNames: sortAndUnique(hostNames),
ServerNames: slices.SortedUnique(hostNames),
TransportProtocol: tlsTransportProtocol,
},
Filters: []*envoy_config_listener.Filter{
Expand Down
32 changes: 4 additions & 28 deletions operator/pkg/model/translation/translator.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (

"github.com/cilium/cilium/operator/pkg/model"
ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/slices"
)

const (
Expand Down Expand Up @@ -181,13 +182,13 @@ func (i *defaultTranslator) getRouteConfiguration(m *model.Model) []ciliumv2.XDS
redirectedHost := map[string]struct{}{}
// Add HTTPs redirect virtual host for secure host
if port == insecureHost && i.enforceHTTPs {
for _, h := range unique(portHostName[secureHost]) {
for _, h := range slices.Unique(portHostName[secureHost]) {
vhs, _ := NewVirtualHostWithDefaults([]string{h}, true, i.hostNameSuffixMatch, hostNameRoutes[h])
virtualhosts = append(virtualhosts, vhs)
redirectedHost[h] = struct{}{}
}
}
for _, h := range unique(hostNames) {
for _, h := range slices.Unique(hostNames) {
if port == insecureHost {
if _, ok := redirectedHost[h]; ok {
continue
Expand Down Expand Up @@ -242,7 +243,7 @@ func getNamespaceNamePortsMap(m *model.Model) map[string]map[string][]string {
for _, be := range r.Backends {
namePortMap, exist := namespaceNamePortMap[be.Namespace]
if exist {
namePortMap[be.Name] = sortAndUnique(append(namePortMap[be.Name], be.Port.GetPort()))
namePortMap[be.Name] = slices.SortedUnique(append(namePortMap[be.Name], be.Port.GetPort()))
} else {
namePortMap = map[string][]string{
be.Name: {be.Port.GetPort()},
Expand All @@ -254,28 +255,3 @@ func getNamespaceNamePortsMap(m *model.Model) map[string]map[string][]string {
}
return namespaceNamePortMap
}

func sortAndUnique(arr []string) []string {
res := unique(arr)
sort.Strings(res)
return res
}

// unique returns a unique slice of strings. The order of the elements is
// preserved.
func unique(arr []string) []string {
m := map[string]struct{}{}
for _, s := range arr {
m[s] = struct{}{}
}

res := make([]string, 0, len(m))
for _, v := range arr {
if _, exists := m[v]; !exists {
continue
}
res = append(res, v)
delete(m, v)
}
return res
}
11 changes: 6 additions & 5 deletions pkg/fqdn/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
ippkg "github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/slices"
)

// cacheEntry objects hold data passed in via DNSCache.Update, nominally
Expand Down Expand Up @@ -254,7 +255,7 @@ func (c *DNSCache) cleanupExpiredEntries(expires time.Time) (affectedNames []str
}

removed = make(map[netip.Addr][]*cacheEntry)
for _, name := range KeepUniqueNames(toCleanNames) {
for _, name := range slices.Unique(toCleanNames) {
if entries, exists := c.forward[name]; exists {
affectedNames = append(affectedNames, name)
for ip, entry := range c.removeExpired(entries, c.lastCleanup, time.Time{}) {
Expand Down Expand Up @@ -337,7 +338,7 @@ func (c *DNSCache) GC(now time.Time, zombies *DNSZombieMappings) (affectedNames
}
}

return KeepUniqueNames(append(expiredNames, overLimitNames...))
return slices.Unique(append(expiredNames, overLimitNames...))
}

// UpdateFromCache is a utility function that allows updating a DNSCache
Expand Down Expand Up @@ -591,7 +592,7 @@ func (c *DNSCache) ForceExpire(expireLookupsBefore time.Time, nameMatch *regexp.
}
}

return KeepUniqueNames(namesAffected)
return slices.Unique(namesAffected)
}

func (c *DNSCache) forceExpireByNames(expireLookupsBefore time.Time, names []string) (namesAffected []string) {
Expand Down Expand Up @@ -780,13 +781,13 @@ func (zombies *DNSZombieMappings) Upsert(expiryTime time.Time, ipStr string, qna
zombie, updatedExisting := zombies.deletes[addr]
if !updatedExisting {
zombie = &DNSZombieMapping{
Names: KeepUniqueNames(qname),
Names: slices.Unique(qname),
IP: netip.MustParseAddr(ipStr),
DeletePendingAt: expiryTime,
}
zombies.deletes[addr] = zombie
} else {
zombie.Names = KeepUniqueNames(append(zombie.Names, qname...))
zombie.Names = slices.Unique(append(zombie.Names, qname...))
// Keep the latest expiry time
if expiryTime.After(zombie.DeletePendingAt) {
zombie.DeletePendingAt = expiryTime
Expand Down
40 changes: 0 additions & 40 deletions pkg/fqdn/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,43 +99,3 @@ func (n *NameManager) MapSelectorsToIPsLocked(fqdnSelectors map[api.FQDNSelector
func prepareMatchName(matchName string) string {
return dns.FQDN(matchName)
}

// KeepUniqueNames removes duplicate names from the given slice while
// maintaining order. The returned slice re-uses the memory of the
// input slice.
func KeepUniqueNames(names []string) []string {
deleted := 0
namesLen := len(names)
// Use naive O(n^2) in-place algorithm for shorter slices,
// avoiding all memory allocations. Limit of 48 names has
// been experimentally derived. For shorter slices N^2 search
// is upto 5 times faster than using a map. At 48 both
// implementations are roughly the same speed. Above 48 the
// exponential kicks in and the naive loop becomes slower.
if namesLen < 48 {
Loop:
for i := 0; i < namesLen; i++ {
current := i - deleted
for j := 0; j < current; j++ {
if names[i] == names[j] {
deleted++
continue Loop
}
}
names[current] = names[i]
}
} else {
// Use map
entries := make(map[string]struct{}, namesLen)
for i := 0; i < namesLen; i++ {
if _, ok := entries[names[i]]; ok {
deleted++
continue
}
entries[names[i]] = struct{}{}
names[i-deleted] = names[i]
}
}
// truncate slice to leave off the duplicates
return names[:namesLen-deleted]
}
54 changes: 0 additions & 54 deletions pkg/fqdn/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,12 @@
package fqdn

import (
"fmt"
"math/rand"
"net/netip"
"time"

"github.com/sirupsen/logrus"
. "gopkg.in/check.v1"

"github.com/cilium/cilium/pkg/checker"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/policy/api"
)
Expand All @@ -31,57 +28,6 @@ var (
}
)

func (ds *DNSCacheTestSuite) TestKeepUniqueNames(c *C) {
r := rand.New(rand.NewSource(99))

data := make([]string, 48)
uniq := []string{}
for i := 0; i < len(data); i++ {
rnd := r.Float64()
// Duplicate name with 10% probability
if i > 0 && rnd < 0.1 {
data[i] = data[int(float64(i-1)*r.Float64())]
} else {
data[i] = fmt.Sprintf("a%d.domain.com", i)
uniq = append(uniq, data[i])
}
}

testData := []struct {
argument []string
expected []string
}{
{[]string{"a"}, []string{"a"}},
{[]string{"a", "a"}, []string{"a"}},
{[]string{"a", "b"}, []string{"a", "b"}},
{[]string{"a", "b", "b"}, []string{"a", "b"}},
{[]string{"a", "b", "c"}, []string{"a", "b", "c"}},
{[]string{"a", "b", "a", "c"}, []string{"a", "b", "c"}},
{[]string{""}, []string{""}},
{[]string{}, []string{}},
{data, uniq},
}

for _, item := range testData {
val := KeepUniqueNames(item.argument)
c.Assert(val, checker.DeepEquals, item.expected)
}
}

// Note: each "op" works on size things
func (ds *DNSCacheTestSuite) BenchmarkKeepUniqueNames(c *C) {
c.StopTimer()
data := make([]string, 48)
for i := 0; i < len(data); i++ {
data[i] = fmt.Sprintf("a%d.domain.com", i)
}
c.StartTimer()

for i := 0; i < c.N; i++ {
KeepUniqueNames(data)
}
}

func (ds *DNSCacheTestSuite) TestMapIPsToSelectors(c *C) {
var (
ciliumIP1 = netip.MustParseAddr("1.2.3.4")
Expand Down
47 changes: 19 additions & 28 deletions pkg/ip/ip.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"net/netip"
"sort"

"golang.org/x/exp/slices"
"github.com/cilium/cilium/pkg/slices"
)

const (
Expand Down Expand Up @@ -743,41 +743,32 @@ func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, [
// lexicographically sorted via a byte-wise comparison of the IP slices (i.e.
// IPv4 addresses show up before IPv6).
// The slice is manipulated in-place destructively.
//
// 1- Sort the slice by comparing the IPs as bytes
// 2- For every unseen unique IP in the sorted slice, move it to the end of
// the return slice.
// Note that the slice is always large enough and, because it is sorted, we
// will not overwrite a valid element with another. To overwrite an element i
// with j, i must have come before j AND we decided it was a duplicate of the
// element at i-1.
func KeepUniqueIPs(ips []net.IP) []net.IP {
sort.Slice(ips, func(i, j int) bool {
return bytes.Compare(ips[i], ips[j]) == -1
})

returnIPs := ips[:0] // len==0 but cap==cap(ips)
for readIdx, ip := range ips {
if len(returnIPs) == 0 || !returnIPs[len(returnIPs)-1].Equal(ips[readIdx]) {
returnIPs = append(returnIPs, ip)
}
}

return returnIPs
return slices.SortedUniqueFunc(
ips,
func(i, j int) bool {
return bytes.Compare(ips[i], ips[j]) == -1
},
func(a, b net.IP) bool {
return a.Equal(b)
},
)
}

// KeepUniqueAddrs transforms the provided multiset of IP addresses into a
// single set, lexicographically sorted via comparison of the addresses using
// netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6).
// The slice is manipulated in-place destructively; it does not create a new slice.
func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr {
if len(addrs) == 0 {
return addrs
}
sort.Slice(addrs, func(i, j int) bool {
return addrs[i].Compare(addrs[j]) < 0
})
return slices.Compact(addrs)
return slices.SortedUniqueFunc(
addrs,
func(i, j int) bool {
return addrs[i].Compare(addrs[j]) < 0
},
func(a, b netip.Addr) bool {
return a == b
},
)
}

var privateIPBlocks []*net.IPNet
Expand Down
22 changes: 0 additions & 22 deletions pkg/ip/ip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package ip

import (
"math/big"
"math/rand"
"net"
"net/netip"
"sort"
Expand Down Expand Up @@ -751,27 +750,6 @@ func (s *IPTestSuite) TestKeepUniqueIPs(c *C) {
}
}

// Note: each "op" works on size things
func (s *IPTestSuite) BenchmarkKeepUniqueIPs(c *C) {
size := 1000
ipsOrig := make([]net.IP, 0, size)
for i := 0; i < size; i++ {
ipsOrig = append(ipsOrig, net.IPv4(byte(i>>24), byte(i>>16), byte(i>>8), byte(i>>0)))
}
ips := make([]net.IP, 0, len(ipsOrig))

copy(ips, ipsOrig)
for i := 0; i < c.N; i++ {
c.StopTimer()
rand.Shuffle(len(ips), func(i, j int) {
ips[i], ips[j] = ips[j], ips[i]
})
c.StartTimer()

KeepUniqueIPs(ips)
}
}
Comment on lines -755 to -773
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm actually curious to see what the difference in benchmark results were between the old KeepUniqueIPs and the new SortedUniqueFunc.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here they are (I used the new benchmark to compare the old and the new function to see the differences while scaling the slice size):

name                  old time/op    new time/op    delta
KeepUniqueIPs/96-8      17.2µs ±34%    15.6µs ±29%    ~     (p=0.421 n=5+5)
KeepUniqueIPs/128-8     24.5µs ± 5%    23.8µs ± 3%    ~     (p=0.151 n=5+5)
KeepUniqueIPs/160-8     31.2µs ± 8%    29.8µs ± 2%  -4.47%  (p=0.032 n=5+5)
KeepUniqueIPs/192-8     37.2µs ± 4%    36.2µs ± 2%    ~     (p=0.056 n=5+5)
KeepUniqueIPs/256-8     50.6µs ± 1%    48.6µs ± 0%  -4.04%  (p=0.008 n=5+5)
KeepUniqueIPs/512-8      108µs ± 3%     105µs ± 1%  -3.27%  (p=0.008 n=5+5)
KeepUniqueIPs/1024-8     235µs ± 1%     225µs ± 0%  -4.26%  (p=0.008 n=5+5)

name                  old alloc/op   new alloc/op   delta
KeepUniqueIPs/96-8       96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/128-8      96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/160-8      96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/192-8      96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/256-8      96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/512-8      96.0B ± 0%     96.0B ± 0%    ~     (all equal)
KeepUniqueIPs/1024-8     96.0B ± 0%     96.0B ± 0%    ~     (all equal)

name                  old allocs/op  new allocs/op  delta
KeepUniqueIPs/96-8        3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/128-8       3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/160-8       3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/192-8       3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/256-8       3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/512-8       3.00 ± 0%      3.00 ± 0%    ~     (all equal)
KeepUniqueIPs/1024-8      3.00 ± 0%      3.00 ± 0%    ~     (all equal)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Awesome, thanks. It would be great to have them in the commit msg IMO


func TestKeepUniqueAddrs(t *testing.T) {
for _, tc := range []struct {
name string
Expand Down