From af5a992e0cab038ae81d05f40357277137e022c3 Mon Sep 17 00:00:00 2001 From: Kyle Carberry Date: Tue, 7 Mar 2023 15:21:01 +0000 Subject: [PATCH] chore: fix coordinator flake by moving pubsub below register After making the in-memory pubsub conform to the expectations of PostgreSQL, this flake started appearing. This fixes it because the agent socket is registered when a message is received. --- enterprise/tailnet/coordinator.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/enterprise/tailnet/coordinator.go b/enterprise/tailnet/coordinator.go index 99234d2a00146..e4ff982723983 100644 --- a/enterprise/tailnet/coordinator.go +++ b/enterprise/tailnet/coordinator.go @@ -197,12 +197,6 @@ func (c *haCoordinator) handleNextClientMessage(id, agent uuid.UUID, decoder *js func (c *haCoordinator) ServeAgent(conn net.Conn, id uuid.UUID, name string) error { c.agentNameCache.Add(id, name) - // Tell clients on other instances to send a callmemaybe to us. - err := c.publishAgentHello(id) - if err != nil { - return xerrors.Errorf("publish agent hello: %w", err) - } - // Publish all nodes on this instance that want to connect to this agent. nodes := c.nodesSubscribedToAgent(id) if len(nodes) > 0 { @@ -241,6 +235,12 @@ func (c *haCoordinator) ServeAgent(conn net.Conn, id uuid.UUID, name string) err } c.mutex.Unlock() + // Tell clients on other instances to send a callmemaybe to us. + err := c.publishAgentHello(id) + if err != nil { + return xerrors.Errorf("publish agent hello: %w", err) + } + defer func() { c.mutex.Lock() defer c.mutex.Unlock()