Skip to content

Commit

Permalink
refactoring node type
Browse files Browse the repository at this point in the history
node type refactoring results in 5-10% less memory consumption
  • Loading branch information
gaissmai committed Apr 20, 2024
1 parent 84fa5bd commit fa1d6fb
Show file tree
Hide file tree
Showing 9 changed files with 291 additions and 357 deletions.
24 changes: 12 additions & 12 deletions dumper.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ func (t *Table[V]) dump(w io.Writer) error {
func (n *node[V]) dumpRec(w io.Writer, path []byte, is4 bool) {
n.dump(w, path, is4)

for i, child := range n.children.childs {
octet := n.children.Select(uint(i))
for i, child := range n.children {
octet := n.childrenBitset.Select(uint(i))
child.dumpRec(w, append(path, byte(octet)), is4)
}
}
Expand All @@ -89,13 +89,13 @@ func (n *node[V]) dump(w io.Writer, path []byte, is4 bool) {
must(fmt.Fprintf(w, "\n%s[%s] depth: %d path: [%v] / %d\n",
indent, n.hasType(), depth, ancestors(path, is4), bits))

if len(n.prefixes.values) != 0 {
indices := n.prefixes.allIndexes()
if len(n.prefixes) != 0 {
indices := n.allStrideIndexes()
// print the baseIndices for this node.
must(fmt.Fprintf(w, "%sindexs(#%d): %v\n", indent, len(n.prefixes.values), indices))
must(fmt.Fprintf(w, "%sindexs(#%d): %v\n", indent, len(n.prefixes), indices))

// print the prefixes for this node
must(fmt.Fprintf(w, "%sprefxs(#%d): ", indent, len(n.prefixes.values)))
must(fmt.Fprintf(w, "%sprefxs(#%d): ", indent, len(n.prefixes)))

for _, idx := range indices {
octet, bits := baseIndexToPrefix(idx)
Expand All @@ -104,12 +104,12 @@ func (n *node[V]) dump(w io.Writer, path []byte, is4 bool) {
must(fmt.Fprintln(w))
}

if len(n.children.childs) != 0 {
if len(n.children) != 0 {
// print the childs for this node
must(fmt.Fprintf(w, "%schilds(#%d): ", indent, len(n.children.childs)))
must(fmt.Fprintf(w, "%schilds(#%d): ", indent, len(n.children)))

for i := range n.children.childs {
octet := n.children.Select(uint(i))
for i := range n.children {
octet := n.childrenBitset.Select(uint(i))
must(fmt.Fprintf(w, "%s ", octetFmt(octet, is4)))
}
must(fmt.Fprintln(w))
Expand Down Expand Up @@ -164,8 +164,8 @@ func (nt nodeType) String() string {

// hasType returns the nodeType.
func (n *node[V]) hasType() nodeType {
lenPefixes := len(n.prefixes.values)
lenChilds := len(n.children.childs)
lenPefixes := len(n.prefixes)
lenChilds := len(n.children)

if lenPefixes == 0 && lenChilds != 0 {
return intermediateNode
Expand Down
64 changes: 0 additions & 64 deletions fulltable_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"math/rand"
"net/netip"
"os"
"runtime"
"strconv"
"strings"
"testing"
Expand Down Expand Up @@ -43,69 +42,6 @@ func init() {
randRoute6 = routes6[rand.Intn(len(routes6))]
}

func TestFullNew(t *testing.T) {
t.Parallel()
var startMem, endMem runtime.MemStats
runtime.ReadMemStats(&startMem)
nRoutes := make([]route, len(routes))
copy(nRoutes, routes)
runtime.ReadMemStats(&endMem)
rawBytes := endMem.TotalAlloc - startMem.TotalAlloc

rt := bart.Table[any]{}
runtime.ReadMemStats(&startMem)
for _, route := range nRoutes {
rt.Insert(route.CIDR, nil)
}
runtime.ReadMemStats(&endMem)
bartBytes := endMem.TotalAlloc - startMem.TotalAlloc

t.Logf("BART: n: %d routes, raw: %d KBytes, bart: %6d KBytes, mult: %.2f (bart/raw)",
len(nRoutes), rawBytes/(2<<10), bartBytes/(2<<10), float32(bartBytes)/float32(rawBytes))
}

func TestFullNewV4(t *testing.T) {
t.Parallel()
var startMem, endMem runtime.MemStats
runtime.ReadMemStats(&startMem)
nRoutes := make([]route, len(routes4))
copy(nRoutes, routes4)
runtime.ReadMemStats(&endMem)
rawBytes := endMem.TotalAlloc - startMem.TotalAlloc

rt := bart.Table[any]{}
runtime.ReadMemStats(&startMem)
for _, route := range nRoutes {
rt.Insert(route.CIDR, nil)
}
runtime.ReadMemStats(&endMem)
bartBytes := endMem.TotalAlloc - startMem.TotalAlloc

t.Logf("BART: n: %d routes, raw: %d KBytes, bart: %6d KBytes, mult: %.2f (bart/raw)",
len(nRoutes), rawBytes/(2<<10), bartBytes/(2<<10), float32(bartBytes)/float32(rawBytes))
}

func TestFullNewV6(t *testing.T) {
t.Parallel()
var startMem, endMem runtime.MemStats
runtime.ReadMemStats(&startMem)
nRoutes := make([]route, len(routes6))
copy(nRoutes, routes4)
runtime.ReadMemStats(&endMem)
rawBytes := endMem.TotalAlloc - startMem.TotalAlloc

rt := bart.Table[any]{}
runtime.ReadMemStats(&startMem)
for _, route := range nRoutes {
rt.Insert(route.CIDR, nil)
}
runtime.ReadMemStats(&endMem)
bartBytes := endMem.TotalAlloc - startMem.TotalAlloc

t.Logf("BART: n: %d routes, raw: %d KBytes, bart: %6d KBytes, mult: %.2f (bart/raw)",
len(nRoutes), rawBytes/(2<<10), bartBytes/(2<<10), float32(bartBytes)/float32(rawBytes))
}

var (
intSink int
okSink bool
Expand Down
14 changes: 7 additions & 7 deletions metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,22 +58,22 @@ func (t *Table[V]) readTableStats() map[string]any {
t.walk(func(n *node[V], depth int, is4 bool) {
switch is4 {
case true:
stats.size4 += len(n.prefixes.values)
stats.childs4[len(n.children.childs)]++
stats.size4 += len(n.prefixes)
stats.childs4[len(n.children)]++
stats.depth4[depth]++
stats.types4[n.hasType().String()]++

for _, idx := range n.prefixes.allIndexes() {
for _, idx := range n.allStrideIndexes() {
_, pfxLen := baseIndexToPrefix(idx)
stats.prefixlen4[strideLen*depth+pfxLen]++
}
case false:
stats.size6 += len(n.prefixes.values)
stats.childs6[len(n.children.childs)]++
stats.size6 += len(n.prefixes)
stats.childs6[len(n.children)]++
stats.depth6[depth]++
stats.types6[n.hasType().String()]++

for _, idx := range n.prefixes.allIndexes() {
for _, idx := range n.allStrideIndexes() {
_, pfxLen := baseIndexToPrefix(idx)
stats.prefixlen6[strideLen*depth+pfxLen]++
}
Expand Down Expand Up @@ -121,7 +121,7 @@ func (t *Table[V]) walk(cb metricWalkFunc[V]) {
func (n *node[V]) metricWalkRec(cb metricWalkFunc[V], depth int, is4 bool) {
cb(n, depth, is4)

for _, child := range n.children.childs {
for _, child := range n.children {
child.metricWalkRec(cb, depth+1, is4)
}
}
Loading

0 comments on commit fa1d6fb

Please sign in to comment.