Skip to content

Commit

Permalink
autopilot: refactor betweenness centrality metric construction
Browse files Browse the repository at this point in the history
  • Loading branch information
bhandras committed Jul 17, 2020
1 parent a7ae67d commit 2172de6
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 40 deletions.
19 changes: 13 additions & 6 deletions autopilot/betweenness_centrality.go
Expand Up @@ -71,15 +71,22 @@ type BetweennessCentrality struct {
}

// NewBetweennessCentralityMetric creates a new BetweennessCentrality instance.
// Users can specify the number of workers to use for calculating centrality.
func NewBetweennessCentralityMetric(workers int) (*BetweennessCentrality, error) {
func NewBetweennessCentralityMetric() *BetweennessCentrality {
return &BetweennessCentrality{
workers: 1,
}
}

// SetWorkers can be used to specify the number of workers to use for
// calculating centrality.
func (bc *BetweennessCentrality) SetWorkers(workers int) error {
// There should be at least one worker.
if workers < 1 {
return nil, fmt.Errorf("workers must be positive")
return fmt.Errorf("workers must be positive")
}
return &BetweennessCentrality{
workers: workers,
}, nil
bc.workers = workers

return nil
}

// Name returns the name of the metric.
Expand Down
27 changes: 10 additions & 17 deletions autopilot/betweenness_centrality_test.go
Expand Up @@ -7,34 +7,30 @@ import (
"github.com/stretchr/testify/require"
)

func TestBetweennessCentralityMetricConstruction(t *testing.T) {
func TestBetweennessCentralitySetWorkers(t *testing.T) {
failing := []int{-1, 0}
ok := []int{1, 10}

m := NewBetweennessCentralityMetric()
for _, workers := range failing {
m, err := NewBetweennessCentralityMetric(workers)
require.Error(
t, err, "construction must fail with <= 0 workers",
t, m.SetWorkers(workers),
"construction must fail with <= 0 workers",
)
require.Nil(t, m)
}

for _, workers := range ok {
m, err := NewBetweennessCentralityMetric(workers)
require.NoError(
t, err, "construction must succeed with >= 1 workers",
t, m.SetWorkers(workers),
"construction must succeed with >= 1 workers",
)
require.NotNil(t, m)
}
}

// Tests that empty graph results in empty centrality result.
func TestBetweennessCentralityEmptyGraph(t *testing.T) {
centralityMetric, err := NewBetweennessCentralityMetric(1)
require.NoError(
t, err,
"construction must succeed with positive number of workers",
)
centralityMetric := NewBetweennessCentralityMetric()

for _, chanGraph := range chanGraphs {
graph, cleanup, err := chanGraph.genFunc()
Expand Down Expand Up @@ -93,13 +89,10 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
)

success := t.Run(testName, func(t1 *testing.T) {
metric, err := NewBetweennessCentralityMetric(
numWorkers,
)
metric := NewBetweennessCentralityMetric()

require.NoError(
t, err,
"construction must succeed with "+
"positive number of workers",
t, metric.SetWorkers(numWorkers),
)

graphNodes := buildTestGraph(
Expand Down
27 changes: 14 additions & 13 deletions autopilot/top_centrality.go
Expand Up @@ -17,15 +17,25 @@ import (
// existing edge and recalculate the betweenness centrality of each node. This
// technique is usually referred to as "greedy" algorithm and gives better
// results than TopK but is considerably slower too.
type TopCentrality struct{}
type TopCentrality struct {
centralityMetric *BetweennessCentrality
}

// A compile time assertion to ensure TopCentrality meets the
// AttachmentHeuristic interface.
var _ AttachmentHeuristic = (*TopCentrality)(nil)

// NewTopCentrality constructs and returns a new TopCentrality heuristic.
func NewTopCentrality() *TopCentrality {
return &TopCentrality{}
centralityMetric := NewBetweennessCentralityMetric()

if err := centralityMetric.SetWorkers(runtime.NumCPU()); err != nil {
log.Warnf("invalid number of CPUs available")
}

return &TopCentrality{
centralityMetric: centralityMetric,
}
}

// Name returns the name of the heuristic.
Expand All @@ -43,22 +53,13 @@ func (g *TopCentrality) NodeScores(graph ChannelGraph, chans []Channel,
chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
map[NodeID]*NodeScore, error) {

// As we don't currently support incremental graph updates, we
// don't need to cache anything.
bc, err := NewBetweennessCentralityMetric(
runtime.NumCPU(),
)
if err != nil {
return nil, err
}

// Calculate betweenness centrality for the whole graph.
if err := bc.Refresh(graph); err != nil {
if err := g.centralityMetric.Refresh(graph); err != nil {
return nil, err
}

normalize := true
centrality := bc.GetMetric(normalize)
centrality := g.centralityMetric.GetMetric(normalize)

// Create a map of the existing peers for faster filtering.
existingPeers := make(map[NodeID]struct{})
Expand Down
8 changes: 4 additions & 4 deletions rpcserver.go
Expand Up @@ -4948,12 +4948,12 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context,
// Calculate betweenness centrality if requested. Note that depending on the
// graph size, this may take up to a few minutes.
channelGraph := autopilot.ChannelGraphFromDatabase(graph)
centralityMetric, err := autopilot.NewBetweennessCentralityMetric(
runtime.NumCPU(),
)
if err != nil {
centralityMetric := autopilot.NewBetweennessCentralityMetric()

if err := centralityMetric.SetWorkers(runtime.NumCPU()); err != nil {
return nil, err
}

if err := centralityMetric.Refresh(channelGraph); err != nil {
return nil, err
}
Expand Down

0 comments on commit 2172de6

Please sign in to comment.