|
| 1 | +package tailnet |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "errors" |
| 6 | + "net/netip" |
| 7 | + "sync" |
| 8 | + |
| 9 | + "github.com/google/uuid" |
| 10 | + "go4.org/netipx" |
| 11 | + "tailscale.com/net/dns" |
| 12 | + "tailscale.com/tailcfg" |
| 13 | + "tailscale.com/types/ipproto" |
| 14 | + "tailscale.com/types/key" |
| 15 | + "tailscale.com/types/netmap" |
| 16 | + "tailscale.com/wgengine" |
| 17 | + "tailscale.com/wgengine/filter" |
| 18 | + "tailscale.com/wgengine/router" |
| 19 | + "tailscale.com/wgengine/wgcfg" |
| 20 | + "tailscale.com/wgengine/wgcfg/nmcfg" |
| 21 | + |
| 22 | + "cdr.dev/slog" |
| 23 | + "github.com/coder/coder/v2/tailnet/proto" |
| 24 | +) |
| 25 | + |
| 26 | +// engineConfigurable is the subset of wgengine.Engine that we use for configuration. |
| 27 | +// |
| 28 | +// This allows us to test configuration code without faking the whole interface. |
| 29 | +type engineConfigurable interface { |
| 30 | + SetNetworkMap(*netmap.NetworkMap) |
| 31 | + Reconfig(*wgcfg.Config, *router.Config, *dns.Config, *tailcfg.Debug) error |
| 32 | + SetDERPMap(*tailcfg.DERPMap) |
| 33 | + SetFilter(*filter.Filter) |
| 34 | +} |
| 35 | + |
| 36 | +type phase int |
| 37 | + |
| 38 | +const ( |
| 39 | + idle phase = iota |
| 40 | + configuring |
| 41 | + closed |
| 42 | +) |
| 43 | + |
| 44 | +type configMaps struct { |
| 45 | + sync.Cond |
| 46 | + netmapDirty bool |
| 47 | + derpMapDirty bool |
| 48 | + filterDirty bool |
| 49 | + closing bool |
| 50 | + phase phase |
| 51 | + |
| 52 | + engine engineConfigurable |
| 53 | + static netmap.NetworkMap |
| 54 | + peers map[uuid.UUID]*peerLifecycle |
| 55 | + addresses []netip.Prefix |
| 56 | + derpMap *proto.DERPMap |
| 57 | + logger slog.Logger |
| 58 | +} |
| 59 | + |
| 60 | +func newConfigMaps(logger slog.Logger, engine engineConfigurable, nodeID tailcfg.NodeID, nodeKey key.NodePrivate, discoKey key.DiscoPublic, addresses []netip.Prefix) *configMaps { |
| 61 | + pubKey := nodeKey.Public() |
| 62 | + c := &configMaps{ |
| 63 | + Cond: *(sync.NewCond(&sync.Mutex{})), |
| 64 | + logger: logger, |
| 65 | + engine: engine, |
| 66 | + static: netmap.NetworkMap{ |
| 67 | + SelfNode: &tailcfg.Node{ |
| 68 | + ID: nodeID, |
| 69 | + Key: pubKey, |
| 70 | + DiscoKey: discoKey, |
| 71 | + }, |
| 72 | + NodeKey: pubKey, |
| 73 | + PrivateKey: nodeKey, |
| 74 | + PacketFilter: []filter.Match{{ |
| 75 | + // Allow any protocol! |
| 76 | + IPProto: []ipproto.Proto{ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, ipproto.SCTP}, |
| 77 | + // Allow traffic sourced from anywhere. |
| 78 | + Srcs: []netip.Prefix{ |
| 79 | + netip.PrefixFrom(netip.AddrFrom4([4]byte{}), 0), |
| 80 | + netip.PrefixFrom(netip.AddrFrom16([16]byte{}), 0), |
| 81 | + }, |
| 82 | + // Allow traffic to route anywhere. |
| 83 | + Dsts: []filter.NetPortRange{ |
| 84 | + { |
| 85 | + Net: netip.PrefixFrom(netip.AddrFrom4([4]byte{}), 0), |
| 86 | + Ports: filter.PortRange{ |
| 87 | + First: 0, |
| 88 | + Last: 65535, |
| 89 | + }, |
| 90 | + }, |
| 91 | + { |
| 92 | + Net: netip.PrefixFrom(netip.AddrFrom16([16]byte{}), 0), |
| 93 | + Ports: filter.PortRange{ |
| 94 | + First: 0, |
| 95 | + Last: 65535, |
| 96 | + }, |
| 97 | + }, |
| 98 | + }, |
| 99 | + Caps: []filter.CapMatch{}, |
| 100 | + }}, |
| 101 | + }, |
| 102 | + peers: make(map[uuid.UUID]*peerLifecycle), |
| 103 | + addresses: addresses, |
| 104 | + } |
| 105 | + go c.configLoop() |
| 106 | + return c |
| 107 | +} |
| 108 | + |
| 109 | +// configLoop waits for the config to be dirty, then reconfigures the engine. |
| 110 | +// It is internal to configMaps |
| 111 | +func (c *configMaps) configLoop() { |
| 112 | + c.L.Lock() |
| 113 | + defer c.L.Unlock() |
| 114 | + defer func() { |
| 115 | + c.phase = closed |
| 116 | + c.Broadcast() |
| 117 | + }() |
| 118 | + for { |
| 119 | + for !(c.closing || c.netmapDirty || c.filterDirty || c.derpMapDirty) { |
| 120 | + c.phase = idle |
| 121 | + c.Wait() |
| 122 | + } |
| 123 | + if c.closing { |
| 124 | + return |
| 125 | + } |
| 126 | + // queue up the reconfiguration actions we will take while we have |
| 127 | + // the configMaps locked. We will execute them while unlocked to avoid |
| 128 | + // blocking during reconfig. |
| 129 | + actions := make([]func(), 0, 3) |
| 130 | + if c.derpMapDirty { |
| 131 | + derpMap := c.derpMapLocked() |
| 132 | + actions = append(actions, func() { |
| 133 | + c.engine.SetDERPMap(derpMap) |
| 134 | + }) |
| 135 | + } |
| 136 | + if c.netmapDirty { |
| 137 | + nm := c.netMapLocked() |
| 138 | + actions = append(actions, func() { |
| 139 | + c.engine.SetNetworkMap(nm) |
| 140 | + c.reconfig(nm) |
| 141 | + }) |
| 142 | + } |
| 143 | + if c.filterDirty { |
| 144 | + f := c.filterLocked() |
| 145 | + actions = append(actions, func() { |
| 146 | + c.engine.SetFilter(f) |
| 147 | + }) |
| 148 | + } |
| 149 | + |
| 150 | + c.netmapDirty = false |
| 151 | + c.filterDirty = false |
| 152 | + c.derpMapDirty = false |
| 153 | + c.phase = configuring |
| 154 | + c.Broadcast() |
| 155 | + |
| 156 | + c.L.Unlock() |
| 157 | + for _, a := range actions { |
| 158 | + a() |
| 159 | + } |
| 160 | + c.L.Lock() |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +// close closes the configMaps and stops it configuring the engine |
| 165 | +func (c *configMaps) close() { |
| 166 | + c.L.Lock() |
| 167 | + defer c.L.Unlock() |
| 168 | + c.closing = true |
| 169 | + c.Broadcast() |
| 170 | + for c.phase != closed { |
| 171 | + c.Wait() |
| 172 | + } |
| 173 | +} |
| 174 | + |
| 175 | +// netMapLocked returns the current NetworkMap as determined by the config we |
| 176 | +// have. c.L must be held. |
| 177 | +func (c *configMaps) netMapLocked() *netmap.NetworkMap { |
| 178 | + nm := new(netmap.NetworkMap) |
| 179 | + *nm = c.static |
| 180 | + |
| 181 | + nm.Addresses = make([]netip.Prefix, len(c.addresses)) |
| 182 | + copy(nm.Addresses, c.addresses) |
| 183 | + |
| 184 | + nm.DERPMap = DERPMapFromProto(c.derpMap) |
| 185 | + nm.Peers = c.peerConfigLocked() |
| 186 | + nm.SelfNode.Addresses = nm.Addresses |
| 187 | + nm.SelfNode.AllowedIPs = nm.Addresses |
| 188 | + return nm |
| 189 | +} |
| 190 | + |
| 191 | +// peerConfigLocked returns the set of peer nodes we have. c.L must be held. |
| 192 | +func (c *configMaps) peerConfigLocked() []*tailcfg.Node { |
| 193 | + out := make([]*tailcfg.Node, 0, len(c.peers)) |
| 194 | + for _, p := range c.peers { |
| 195 | + out = append(out, p.node.Clone()) |
| 196 | + } |
| 197 | + return out |
| 198 | +} |
| 199 | + |
| 200 | +// setAddresses sets the addresses belonging to this node to the given slice. It |
| 201 | +// triggers configuration of the engine if the addresses have changed. |
| 202 | +// c.L MUST NOT be held. |
| 203 | +func (c *configMaps) setAddresses(ips []netip.Prefix) { |
| 204 | + c.L.Lock() |
| 205 | + defer c.L.Unlock() |
| 206 | + if d := prefixesDifferent(c.addresses, ips); !d { |
| 207 | + return |
| 208 | + } |
| 209 | + c.addresses = make([]netip.Prefix, len(ips)) |
| 210 | + copy(c.addresses, ips) |
| 211 | + c.netmapDirty = true |
| 212 | + c.filterDirty = true |
| 213 | + c.Broadcast() |
| 214 | +} |
| 215 | + |
| 216 | +// derMapLocked returns the current DERPMap. c.L must be held |
| 217 | +func (c *configMaps) derpMapLocked() *tailcfg.DERPMap { |
| 218 | + m := DERPMapFromProto(c.derpMap) |
| 219 | + return m |
| 220 | +} |
| 221 | + |
| 222 | +// reconfig computes the correct wireguard config and calls the engine.Reconfig |
| 223 | +// with the config we have. It is not intended for this to be called outside of |
| 224 | +// the updateLoop() |
| 225 | +func (c *configMaps) reconfig(nm *netmap.NetworkMap) { |
| 226 | + cfg, err := nmcfg.WGCfg(nm, Logger(c.logger.Named("net.wgconfig")), netmap.AllowSingleHosts, "") |
| 227 | + if err != nil { |
| 228 | + // WGCfg never returns an error at the time this code was written. If it starts, returning |
| 229 | + // errors if/when we upgrade tailscale, we'll need to deal. |
| 230 | + c.logger.Critical(context.Background(), "update wireguard config failed", slog.Error(err)) |
| 231 | + return |
| 232 | + } |
| 233 | + |
| 234 | + rc := &router.Config{LocalAddrs: nm.Addresses} |
| 235 | + err = c.engine.Reconfig(cfg, rc, &dns.Config{}, &tailcfg.Debug{}) |
| 236 | + if err != nil { |
| 237 | + if errors.Is(err, wgengine.ErrNoChanges) { |
| 238 | + return |
| 239 | + } |
| 240 | + c.logger.Error(context.Background(), "failed to reconfigure wireguard engine", slog.Error(err)) |
| 241 | + } |
| 242 | +} |
| 243 | + |
| 244 | +// filterLocked returns the current filter, based on our local addresses. c.L |
| 245 | +// must be held. |
| 246 | +func (c *configMaps) filterLocked() *filter.Filter { |
| 247 | + localIPSet := netipx.IPSetBuilder{} |
| 248 | + for _, addr := range c.addresses { |
| 249 | + localIPSet.AddPrefix(addr) |
| 250 | + } |
| 251 | + localIPs, _ := localIPSet.IPSet() |
| 252 | + logIPSet := netipx.IPSetBuilder{} |
| 253 | + logIPs, _ := logIPSet.IPSet() |
| 254 | + return filter.New( |
| 255 | + c.static.PacketFilter, |
| 256 | + localIPs, |
| 257 | + logIPs, |
| 258 | + nil, |
| 259 | + Logger(c.logger.Named("net.packet-filter")), |
| 260 | + ) |
| 261 | +} |
| 262 | + |
| 263 | +type peerLifecycle struct { |
| 264 | + node *tailcfg.Node |
| 265 | + // TODO: implement timers to track lost peers |
| 266 | + // lastHandshake time.Time |
| 267 | + // timer time.Timer |
| 268 | +} |
| 269 | + |
| 270 | +// prefixesDifferent returns true if the two slices contain different prefixes |
| 271 | +// where order doesn't matter. |
| 272 | +func prefixesDifferent(a, b []netip.Prefix) bool { |
| 273 | + if len(a) != len(b) { |
| 274 | + return true |
| 275 | + } |
| 276 | + as := make(map[string]bool) |
| 277 | + for _, p := range a { |
| 278 | + as[p.String()] = true |
| 279 | + } |
| 280 | + for _, p := range b { |
| 281 | + if !as[p.String()] { |
| 282 | + return true |
| 283 | + } |
| 284 | + } |
| 285 | + return false |
| 286 | +} |
0 commit comments