Skip to content

Commit ec72cba

Browse files
committed
chore: add support for blockEndpoints to configMaps
1 parent b573a10 commit ec72cba

File tree

2 files changed

+105
-4
lines changed

2 files changed

+105
-4
lines changed

tailnet/configmaps.go

+18-4
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,11 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap {
207207
func (c *configMaps) peerConfigLocked() []*tailcfg.Node {
208208
out := make([]*tailcfg.Node, 0, len(c.peers))
209209
for _, p := range c.peers {
210-
out = append(out, p.node.Clone())
210+
n := p.node.Clone()
211+
if c.blockEndpoints {
212+
n.Endpoints = nil
213+
}
214+
out = append(out, n)
211215
}
212216
return out
213217
}
@@ -228,6 +232,19 @@ func (c *configMaps) setAddresses(ips []netip.Prefix) {
228232
c.Broadcast()
229233
}
230234

235+
// setBlockEndpoints sets whether we should block configuring endpoints we learn
236+
// from peers. It triggers a configuration of the engine if the value changes.
237+
// nolint: revive
238+
func (c *configMaps) setBlockEndpoints(blockEndpoints bool) {
239+
c.L.Lock()
240+
defer c.L.Unlock()
241+
if c.blockEndpoints != blockEndpoints {
242+
c.netmapDirty = true
243+
}
244+
c.blockEndpoints = blockEndpoints
245+
c.Broadcast()
246+
}
247+
231248
// derMapLocked returns the current DERPMap. c.L must be held
232249
func (c *configMaps) derpMapLocked() *tailcfg.DERPMap {
233250
m := DERPMapFromProto(c.derpMap)
@@ -342,9 +359,6 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat
342359
// to avoid random hangs while we set up the connection again after
343360
// inactivity.
344361
node.KeepAlive = ok && peerStatus.Active
345-
if c.blockEndpoints {
346-
node.Endpoints = nil
347-
}
348362
}
349363
switch {
350364
case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE:

tailnet/configmaps_internal_test.go

+87
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,93 @@ func TestConfigMaps_updatePeers_lost_and_found(t *testing.T) {
484484
_ = testutil.RequireRecvCtx(ctx, t, done)
485485
}
486486

487+
func TestConfigMaps_setBlockEndpoints_different(t *testing.T) {
488+
t.Parallel()
489+
ctx := testutil.Context(t, testutil.WaitShort)
490+
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
491+
fEng := newFakeEngineConfigurable()
492+
nodePrivateKey := key.NewNode()
493+
nodeID := tailcfg.NodeID(5)
494+
discoKey := key.NewDisco()
495+
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
496+
defer uut.close()
497+
498+
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
499+
p1Node := newTestNode(1)
500+
p1n, err := NodeToProto(p1Node)
501+
require.NoError(t, err)
502+
p1tcn, err := uut.protoNodeToTailcfg(p1n)
503+
p1tcn.KeepAlive = true
504+
require.NoError(t, err)
505+
506+
// Given: peer already exists
507+
uut.L.Lock()
508+
uut.peers[p1ID] = &peerLifecycle{
509+
peerID: p1ID,
510+
node: p1tcn,
511+
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
512+
}
513+
uut.L.Unlock()
514+
515+
uut.setBlockEndpoints(true)
516+
517+
nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap)
518+
r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig)
519+
require.Len(t, nm.Peers, 1)
520+
require.Len(t, nm.Peers[0].Endpoints, 0)
521+
require.Len(t, r.wg.Peers, 1)
522+
523+
done := make(chan struct{})
524+
go func() {
525+
defer close(done)
526+
uut.close()
527+
}()
528+
_ = testutil.RequireRecvCtx(ctx, t, done)
529+
}
530+
531+
func TestConfigMaps_setBlockEndpoints_same(t *testing.T) {
532+
t.Parallel()
533+
ctx := testutil.Context(t, testutil.WaitShort)
534+
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
535+
fEng := newFakeEngineConfigurable()
536+
nodePrivateKey := key.NewNode()
537+
nodeID := tailcfg.NodeID(5)
538+
discoKey := key.NewDisco()
539+
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
540+
defer uut.close()
541+
542+
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
543+
p1Node := newTestNode(1)
544+
p1n, err := NodeToProto(p1Node)
545+
require.NoError(t, err)
546+
p1tcn, err := uut.protoNodeToTailcfg(p1n)
547+
p1tcn.KeepAlive = true
548+
require.NoError(t, err)
549+
550+
// Given: peer already exists && blockEndpoints set to true
551+
uut.L.Lock()
552+
uut.peers[p1ID] = &peerLifecycle{
553+
peerID: p1ID,
554+
node: p1tcn,
555+
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
556+
}
557+
uut.blockEndpoints = true
558+
uut.L.Unlock()
559+
560+
// Then: we don't configure
561+
requireNeverConfigures(ctx, t, uut)
562+
563+
// When we set blockEndpoints to true
564+
uut.setBlockEndpoints(true)
565+
566+
done := make(chan struct{})
567+
go func() {
568+
defer close(done)
569+
uut.close()
570+
}()
571+
_ = testutil.RequireRecvCtx(ctx, t, done)
572+
}
573+
487574
func expectStatusWithHandshake(
488575
ctx context.Context, t testing.TB, fEng *fakeEngineConfigurable, k key.NodePublic, lastHandshake time.Time,
489576
) <-chan struct{} {

0 commit comments

Comments
 (0)