Skip to content

Commit

Permalink
ipn/ipnlocal: move handling of expired nodes to LocalBackend
Browse files Browse the repository at this point in the history
In order to be able to synthesize a new NetMap when a node expires, have
LocalBackend start a timer when receiving a new NetMap that fires
slightly after the next node expires. Additionally, move the logic that
updates expired nodes into LocalBackend so it runs on every netmap
(whether received from controlclient or self-triggered).

Updates #6932

Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I833390e16ad188983eac29eb34cc7574f555f2f3
  • Loading branch information
andrew-d committed Jan 14, 2023
1 parent 1e67947 commit 431b06a
Show file tree
Hide file tree
Showing 6 changed files with 348 additions and 199 deletions.
6 changes: 6 additions & 0 deletions control/controlclient/direct.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ type Direct struct {
popBrowser func(url string) // or nil
c2nHandler http.Handler // or nil
onClientVersion func(*tailcfg.ClientVersion) // or nil
onControlTime func(time.Time) // or nil

dialPlan ControlDialPlanner // can be nil

Expand Down Expand Up @@ -116,6 +117,7 @@ type Options struct {
LinkMonitor *monitor.Mon // optional link monitor
PopBrowserURL func(url string) // optional func to open browser
OnClientVersion func(*tailcfg.ClientVersion) // optional func to inform GUI of client version status
OnControlTime func(time.Time) // optional func to notify callers of new time from control
Dialer *tsdial.Dialer // non-nil
C2NHandler http.Handler // or nil

Expand Down Expand Up @@ -244,6 +246,7 @@ func NewDirect(opts Options) (*Direct, error) {
pinger: opts.Pinger,
popBrowser: opts.PopBrowserURL,
onClientVersion: opts.OnClientVersion,
onControlTime: opts.OnControlTime,
c2nHandler: opts.C2NHandler,
dialer: opts.Dialer,
dialPlan: opts.DialPlan,
Expand Down Expand Up @@ -1016,6 +1019,9 @@ func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, readOnly bool
}
if resp.ControlTime != nil && !resp.ControlTime.IsZero() {
c.logf.JSON(1, "controltime", resp.ControlTime.UTC())
if c.onControlTime != nil {
c.onControlTime(*resp.ControlTime)
}
}
if resp.KeepAlive {
vlogf("netmap: got keep-alive")
Expand Down
96 changes: 5 additions & 91 deletions control/controlclient/map.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"log"
"net/netip"
"sort"
"time"

"tailscale.com/envknob"
"tailscale.com/tailcfg"
Expand Down Expand Up @@ -53,12 +52,6 @@ type mapSession struct {
lastPopBrowserURL string
stickyDebug tailcfg.Debug // accumulated opt.Bool values
lastTKAInfo *tailcfg.TKAInfo
previouslyExpired map[tailcfg.StableNodeID]bool // to avoid log spam

// clockDelta stores the delta between the current time and the time
// received from control such that:
// time.Now().Add(clockDelta) == MapResponse.ControlTime
clockDelta time.Duration

// netMapBuilding is non-nil during a netmapForResponse call,
// containing the value to be returned, once fully populated.
Expand All @@ -67,12 +60,11 @@ type mapSession struct {

func newMapSession(privateNodeKey key.NodePrivate) *mapSession {
ms := &mapSession{
privateNodeKey: privateNodeKey,
logf: logger.Discard,
vlogf: logger.Discard,
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
previouslyExpired: map[tailcfg.StableNodeID]bool{},
privateNodeKey: privateNodeKey,
logf: logger.Discard,
vlogf: logger.Discard,
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
}
return ms
}
Expand All @@ -93,7 +85,6 @@ func (ms *mapSession) addUserProfile(userID tailcfg.UserID) {
// information from prior MapResponse values.
func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.NetworkMap {
undeltaPeers(resp, ms.previousPeers)
ms.flagExpiredPeers(resp)

ms.previousPeers = cloneNodes(resp.Peers) // defensive/lazy clone, since this escapes to who knows where
for _, up := range resp.UserProfiles {
Expand Down Expand Up @@ -352,83 +343,6 @@ func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
mapRes.PeersRemoved = nil
}

// For extra defense-in-depth, when we're testing expired nodes we check
// ControlTime against this 'epoch' (set to the approximate time that this code
// was written) such that if control (or Headscale, etc.) sends a ControlTime
// that's sufficiently far in the past, we can safely ignore it.
var flagExpiredPeersEpoch = time.Unix(1673373066, 0)

// If the offset between the current time and the time received from control is
// larger than this, we store an offset in our mapSession to adjust future
// clock timings.
const minClockDelta = 1 * time.Minute

// flagExpiredPeers updates mapRes.Peers, mutating all peers that have expired,
// taking into account any clock skew detected by using the ControlTime field
// in the MapResponse. We don't actually remove expired peers from the Peers
// array; instead, we clear some fields of the Node object, and set
// Node.Expired so other parts of the codebase can provide more clear error
// messages when attempting to e.g. ping an expired node.
//
// This is additionally a defense-in-depth against something going wrong with
// control such that we start seeing expired peers with a valid Endpoints or
// DERP field.
func (ms *mapSession) flagExpiredPeers(mapRes *tailcfg.MapResponse) {
localNow := clockNow()

// If we have a ControlTime field, update our delta.
if mapRes.ControlTime != nil && !mapRes.ControlTime.IsZero() {
delta := mapRes.ControlTime.Sub(localNow)
if delta.Abs() > minClockDelta {
ms.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
ms.clockDelta = delta
} else {
ms.clockDelta = 0
}
}

// Adjust our current time by any saved delta to adjust for clock skew.
controlNow := localNow.Add(ms.clockDelta)
if controlNow.Before(flagExpiredPeersEpoch) {
ms.logf("netmap: flagExpiredPeers: [unexpected] delta-adjusted current time is before hardcoded epoch; skipping")
return
}

for _, peer := range mapRes.Peers {
// Nodes that don't expire have KeyExpiry set to the zero time;
// skip those and peers that are already marked as expired
// (e.g. from control).
if peer.KeyExpiry.IsZero() || peer.KeyExpiry.After(controlNow) {
delete(ms.previouslyExpired, peer.StableID)
continue
} else if peer.Expired {
continue
}

if !ms.previouslyExpired[peer.StableID] {
ms.logf("[v1] netmap: flagExpiredPeers: clearing expired peer %v", peer.StableID)
ms.previouslyExpired[peer.StableID] = true
}

// Actually mark the node as expired
peer.Expired = true

// Control clears the Endpoints and DERP fields of expired
// nodes; do so here as well. The Expired bool is the correct
// thing to set, but this replicates the previous behaviour.
//
// NOTE: this is insufficient to actually break connectivity,
// since we discover endpoints via DERP, and due to DERP return
// path optimization.
peer.Endpoints = nil
peer.DERP = ""

// Defense-in-depth: break the node's public key as well, in
// case something tries to communicate.
peer.Key = key.NodePublicWithBadOldPrefix(peer.Key)
}
}

// ptrCopy returns a pointer to a newly allocated shallow copy of *v.
func ptrCopy[T any](v *T) *T {
if v == nil {
Expand Down
106 changes: 0 additions & 106 deletions control/controlclient/map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,112 +308,6 @@ func TestUndeltaPeers(t *testing.T) {
}
}

func TestFlagExpiredPeers(t *testing.T) {
n := func(id tailcfg.NodeID, name string, expiry time.Time, mod ...func(*tailcfg.Node)) *tailcfg.Node {
n := &tailcfg.Node{ID: id, Name: name, KeyExpiry: expiry}
for _, f := range mod {
f(n)
}
return n
}

now := time.Unix(1673373129, 0)

oldClockNow := clockNow
clockNow = func() time.Time { return now }
t.Cleanup(func() { clockNow = oldClockNow })

timeInPast := now.Add(-1 * time.Hour)
timeInFuture := now.Add(1 * time.Hour)

timeBeforeEpoch := flagExpiredPeersEpoch.Add(-1 * time.Second)
if now.Before(timeBeforeEpoch) {
panic("current time in test cannot be before epoch")
}

var expiredKey key.NodePublic
if err := expiredKey.UnmarshalText([]byte("nodekey:6da774d5d7740000000000000000000000000000000000000000000000000000")); err != nil {
panic(err)
}

tests := []struct {
name string
mapRes *tailcfg.MapResponse
want []*tailcfg.Node
}{
{
name: "no_expiry",
mapRes: &tailcfg.MapResponse{
ControlTime: &now,
Peers: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeInFuture),
},
},
want: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeInFuture),
},
},
{
name: "expiry",
mapRes: &tailcfg.MapResponse{
ControlTime: &now,
Peers: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeInPast),
},
},
want: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeInPast, func(n *tailcfg.Node) {
n.Expired = true
n.Key = expiredKey
}),
},
},
{
name: "bad_ControlTime",
mapRes: &tailcfg.MapResponse{
// ControlTime here is intentionally before our hardcoded epoch
ControlTime: &timeBeforeEpoch,

Peers: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeBeforeEpoch.Add(-1*time.Hour)), // before ControlTime
},
},
want: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", timeBeforeEpoch.Add(-1*time.Hour)), // should have expired, but ControlTime is before epoch
},
},
{
name: "tagged_node",
mapRes: &tailcfg.MapResponse{
ControlTime: &now,
Peers: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", time.Time{}), // tagged node; zero expiry
},
},
want: []*tailcfg.Node{
n(1, "foo", timeInFuture),
n(2, "bar", time.Time{}), // not expired
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ms := newTestMapSession(t)
ms.flagExpiredPeers(tt.mapRes)
if !reflect.DeepEqual(tt.mapRes.Peers, tt.want) {
t.Errorf("wrong results\n got: %s\nwant: %s", formatNodes(tt.mapRes.Peers), formatNodes(tt.want))
}
})
}
}

func formatNodes(nodes []*tailcfg.Node) string {
var sb strings.Builder
for i, n := range nodes {
Expand Down

0 comments on commit 431b06a

Please sign in to comment.