From b655768798be7decd0fb04e197109c2f6edfa601 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Thu, 7 Mar 2024 07:42:50 -0800 Subject: [PATCH 1/5] fix: do not set max deadline for workspaces on template update (#12446) * fix: do not set max deadline for workspaces on template update When templates are updated and schedule data is changed, we update all running workspaces to have up-to-date scheduling information that sticks to the new policy. When updating the max_deadline for existing running workspaces, if the max_deadline was before now()+2h we would set the max_deadline to now()+2h. Builds that don't/shouldn't have a max_deadline have it set to 0, which is always before now()+2h, and thus would always have the max_deadline updated. * test: add unit test to excercise template schedule bug --------- Co-authored-by: Steven Masley --- enterprise/coderd/schedule/template.go | 13 +- enterprise/coderd/schedule/template_test.go | 128 +++++++++++++++----- 2 files changed, 109 insertions(+), 32 deletions(-) diff --git a/enterprise/coderd/schedule/template.go b/enterprise/coderd/schedule/template.go index 69f868dfaa599..0de989dd9ce0e 100644 --- a/enterprise/coderd/schedule/template.go +++ b/enterprise/coderd/schedule/template.go @@ -274,15 +274,24 @@ func (s *EnterpriseTemplateScheduleStore) updateWorkspaceBuild(ctx context.Conte } // If max deadline is before now()+2h, then set it to that. + // This is intended to give ample warning to this workspace about an upcoming auto-stop. + // If we were to omit this "grace" period, then this workspace could be set to be stopped "now". + // The "2 hours" was an arbitrary decision for this window. now := s.now() - if autostop.MaxDeadline.Before(now.Add(2 * time.Hour)) { + if !autostop.MaxDeadline.IsZero() && autostop.MaxDeadline.Before(now.Add(2*time.Hour)) { autostop.MaxDeadline = now.Add(time.Hour * 2) } // If the current deadline on the build is after the new max_deadline, then // set it to the max_deadline. autostop.Deadline = build.Deadline - if autostop.Deadline.After(autostop.MaxDeadline) { + if !autostop.MaxDeadline.IsZero() && autostop.Deadline.After(autostop.MaxDeadline) { + autostop.Deadline = autostop.MaxDeadline + } + + // If there's a max_deadline but the deadline is 0, then set the deadline to + // the max_deadline. + if !autostop.MaxDeadline.IsZero() && autostop.Deadline.IsZero() { autostop.Deadline = autostop.MaxDeadline } diff --git a/enterprise/coderd/schedule/template_test.go b/enterprise/coderd/schedule/template_test.go index ac158a795b3a3..4df05c026496f 100644 --- a/enterprise/coderd/schedule/template_test.go +++ b/enterprise/coderd/schedule/template_test.go @@ -16,6 +16,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbtestutil" agplschedule "github.com/coder/coder/v2/coderd/schedule" + "github.com/coder/coder/v2/coderd/util/ptr" "github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/enterprise/coderd/schedule" "github.com/coder/coder/v2/testutil" @@ -27,30 +28,35 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { db, _ := dbtestutil.NewDB(t) var ( - org = dbgen.Organization(t, db, database.Organization{}) - user = dbgen.User(t, db, database.User{}) + org = dbgen.Organization(t, db, database.Organization{}) + quietUser = dbgen.User(t, db, database.User{ + Username: "quiet", + }) + noQuietUser = dbgen.User(t, db, database.User{ + Username: "no-quiet", + }) file = dbgen.File(t, db, database.File{ - CreatedBy: user.ID, + CreatedBy: quietUser.ID, }) templateJob = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{ OrganizationID: org.ID, FileID: file.ID, - InitiatorID: user.ID, + InitiatorID: quietUser.ID, Tags: database.StringMap{ "foo": "bar", }, }) templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{ OrganizationID: org.ID, - CreatedBy: user.ID, + CreatedBy: quietUser.ID, JobID: templateJob.ID, }) ) const userQuietHoursSchedule = "CRON_TZ=UTC 0 0 * * *" // midnight UTC ctx := testutil.Context(t, testutil.WaitLong) - user, err := db.UpdateUserQuietHoursSchedule(ctx, database.UpdateUserQuietHoursScheduleParams{ - ID: user.ID, + quietUser, err := db.UpdateUserQuietHoursSchedule(ctx, database.UpdateUserQuietHoursScheduleParams{ + ID: quietUser.ID, QuietHoursSchedule: userQuietHoursSchedule, }) require.NoError(t, err) @@ -62,12 +68,15 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { // Workspace old max_deadline too soon cases := []struct { - name string - now time.Time - deadline time.Time - maxDeadline time.Time - newDeadline time.Time // 0 for no change + name string + now time.Time + deadline time.Time + maxDeadline time.Time + // Set to nil for no change. + newDeadline *time.Time newMaxDeadline time.Time + noQuietHours bool + autostopReq *agplschedule.TemplateAutostopRequirement }{ { name: "SkippedWorkspaceMaxDeadlineTooSoon", @@ -75,7 +84,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { deadline: buildTime, maxDeadline: buildTime.Add(1 * time.Hour), // Unchanged since the max deadline is too soon. - newDeadline: time.Time{}, + newDeadline: nil, newMaxDeadline: buildTime.Add(1 * time.Hour), }, { @@ -85,7 +94,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { deadline: buildTime, // Far into the future... maxDeadline: nextQuietHours.Add(24 * time.Hour), - newDeadline: time.Time{}, + newDeadline: nil, // We will use now() + 2 hours if the newly calculated max deadline // from the workspace build time is before now. newMaxDeadline: nextQuietHours.Add(8 * time.Hour), @@ -97,7 +106,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { deadline: buildTime, // Far into the future... maxDeadline: nextQuietHours.Add(24 * time.Hour), - newDeadline: time.Time{}, + newDeadline: nil, // We will use now() + 2 hours if the newly calculated max deadline // from the workspace build time is within the next 2 hours. newMaxDeadline: nextQuietHours.Add(1 * time.Hour), @@ -109,7 +118,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { deadline: buildTime, // Far into the future... maxDeadline: nextQuietHours.Add(24 * time.Hour), - newDeadline: time.Time{}, + newDeadline: nil, newMaxDeadline: nextQuietHours, }, { @@ -120,7 +129,56 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { deadline: nextQuietHours.Add(24 * time.Hour), maxDeadline: nextQuietHours.Add(24 * time.Hour), // The deadline should match since it is after the new max deadline. - newDeadline: nextQuietHours, + newDeadline: ptr.Ref(nextQuietHours), + newMaxDeadline: nextQuietHours, + }, + { + // There was a bug if a user has no quiet hours set, and autostop + // req is not turned on, then the max deadline is set to `time.Time{}`. + // This zero value was "in the past", so the workspace deadline would + // be set to "now" + 2 hours. + // This is a mistake because the max deadline being zero means + // there is no max deadline. + name: "MaxDeadlineShouldBeUnset", + now: buildTime, + deadline: buildTime.Add(time.Hour * 8), + maxDeadline: time.Time{}, // No max set + // Should be unchanged + newDeadline: ptr.Ref(buildTime.Add(time.Hour * 8)), + newMaxDeadline: time.Time{}, + noQuietHours: true, + autostopReq: &agplschedule.TemplateAutostopRequirement{ + DaysOfWeek: 0, + Weeks: 0, + }, + }, + { + // A bug existed where MaxDeadline could be set, but deadline was + // `time.Time{}`. This is a logical inconsistency because the "max" + // deadline was ignored. + name: "NoDeadline", + now: buildTime, + deadline: time.Time{}, + maxDeadline: time.Time{}, // No max set + // Should be unchanged + newDeadline: ptr.Ref(time.Time{}), + newMaxDeadline: time.Time{}, + noQuietHours: true, + autostopReq: &agplschedule.TemplateAutostopRequirement{ + DaysOfWeek: 0, + Weeks: 0, + }, + }, + + { + // Similar to 'NoDeadline' test. This has a MaxDeadline set, so + // the deadline of the workspace should now be set. + name: "WorkspaceDeadlineNowSet", + now: nextQuietHours.Add(-6 * time.Hour), + // Start with unset times + deadline: time.Time{}, + maxDeadline: time.Time{}, + newDeadline: ptr.Ref(nextQuietHours), newMaxDeadline: nextQuietHours, }, } @@ -131,6 +189,11 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { t.Run(c.name, func(t *testing.T) { t.Parallel() + user := quietUser + if c.noQuietHours { + user = noQuietUser + } + t.Log("buildTime", buildTime) t.Log("nextQuietHours", nextQuietHours) t.Log("now", c.now) @@ -217,17 +280,22 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { templateScheduleStore.TimeNowFn = func() time.Time { return c.now } + + autostopReq := agplschedule.TemplateAutostopRequirement{ + // Every day + DaysOfWeek: 0b01111111, + Weeks: 0, + } + if c.autostopReq != nil { + autostopReq = *c.autostopReq + } _, err = templateScheduleStore.Set(ctx, db, template, agplschedule.TemplateScheduleOptions{ - UserAutostartEnabled: false, - UserAutostopEnabled: false, - DefaultTTL: 0, - MaxTTL: 0, - UseMaxTTL: false, - AutostopRequirement: agplschedule.TemplateAutostopRequirement{ - // Every day - DaysOfWeek: 0b01111111, - Weeks: 0, - }, + UserAutostartEnabled: false, + UserAutostopEnabled: false, + DefaultTTL: 0, + MaxTTL: 0, + UseMaxTTL: false, + AutostopRequirement: autostopReq, FailureTTL: 0, TimeTilDormant: 0, TimeTilDormantAutoDelete: 0, @@ -238,10 +306,10 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) { newBuild, err := db.GetWorkspaceBuildByID(ctx, wsBuild.ID) require.NoError(t, err) - if c.newDeadline.IsZero() { - c.newDeadline = wsBuild.Deadline + if c.newDeadline == nil { + c.newDeadline = &wsBuild.Deadline } - require.WithinDuration(t, c.newDeadline, newBuild.Deadline, time.Second) + require.WithinDuration(t, *c.newDeadline, newBuild.Deadline, time.Second) require.WithinDuration(t, c.newMaxDeadline, newBuild.MaxDeadline, time.Second) // Check that the new build has the same state as before. From 5359fb31f5c8f5efd0a96a236c634080b5bd738f Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Mon, 4 Mar 2024 23:40:15 -0800 Subject: [PATCH 2/5] chore: add test for workspace proxy derp meshing (#12220) - Reworks the proxy registration loop into a struct (so I can add a `RegisterNow` method) - Changes the proxy registration loop interval to 15s (previously 30s) - Adds test which tests bidirectional DERP meshing on all possible paths between 6 workspace proxy replicas Related to https://github.com/coder/customers/issues/438 --- enterprise/coderd/coderdenttest/proxytest.go | 49 ++-- enterprise/coderd/workspaceproxy_test.go | 4 +- enterprise/wsproxy/wsproxy.go | 26 ++- enterprise/wsproxy/wsproxy_test.go | 197 +++++++++++++++- enterprise/wsproxy/wsproxysdk/wsproxysdk.go | 229 +++++++++++++------ 5 files changed, 396 insertions(+), 109 deletions(-) diff --git a/enterprise/coderd/coderdenttest/proxytest.go b/enterprise/coderd/coderdenttest/proxytest.go index 9b43cbe6c316d..831c4be86f640 100644 --- a/enterprise/coderd/coderdenttest/proxytest.go +++ b/enterprise/coderd/coderdenttest/proxytest.go @@ -38,15 +38,29 @@ type ProxyOptions struct { // ProxyURL is optional ProxyURL *url.URL + // Token is optional. If specified, a new workspace proxy region will not be + // created, and the proxy will become a replica of the existing proxy + // region. + Token string + // FlushStats is optional FlushStats chan chan<- struct{} } -// NewWorkspaceProxy will configure a wsproxy.Server with the given options. -// The new wsproxy will register itself with the given coderd.API instance. -// The first user owner client is required to create the wsproxy on the coderd -// api server. -func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Client, options *ProxyOptions) *wsproxy.Server { +type WorkspaceProxy struct { + *wsproxy.Server + + ServerURL *url.URL +} + +// NewWorkspaceProxyReplica will configure a wsproxy.Server with the given +// options. The new wsproxy replica will register itself with the given +// coderd.API instance. +// +// If a token is not provided, a new workspace proxy region is created using the +// owner client. If a token is provided, the proxy will become a replica of the +// existing proxy region. +func NewWorkspaceProxyReplica(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Client, options *ProxyOptions) WorkspaceProxy { ctx, cancelFunc := context.WithCancel(context.Background()) t.Cleanup(cancelFunc) @@ -107,11 +121,15 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie options.Name = namesgenerator.GetRandomName(1) } - proxyRes, err := owner.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{ - Name: options.Name, - Icon: "/emojis/flag.png", - }) - require.NoError(t, err, "failed to create workspace proxy") + token := options.Token + if token == "" { + proxyRes, err := owner.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{ + Name: options.Name, + Icon: "/emojis/flag.png", + }) + require.NoError(t, err, "failed to create workspace proxy") + token = proxyRes.ProxyToken + } // Inherit collector options from coderd, but keep the wsproxy reporter. statsCollectorOptions := coderdAPI.Options.WorkspaceAppsStatsCollectorOptions @@ -121,7 +139,7 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie } wssrv, err := wsproxy.New(ctx, &wsproxy.Options{ - Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug), + Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug).With(slog.F("server_url", serverURL.String())), Experiments: options.Experiments, DashboardURL: coderdAPI.AccessURL, AccessURL: accessURL, @@ -131,14 +149,14 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie Tracing: coderdAPI.TracerProvider, APIRateLimit: coderdAPI.APIRateLimit, SecureAuthCookie: coderdAPI.SecureAuthCookie, - ProxySessionToken: proxyRes.ProxyToken, + ProxySessionToken: token, DisablePathApps: options.DisablePathApps, // We need a new registry to not conflict with the coderd internal // proxy metrics. PrometheusRegistry: prometheus.NewRegistry(), DERPEnabled: !options.DerpDisabled, DERPOnly: options.DerpOnly, - DERPServerRelayAddress: accessURL.String(), + DERPServerRelayAddress: serverURL.String(), StatsCollectorOptions: statsCollectorOptions, }) require.NoError(t, err) @@ -151,5 +169,8 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie handler = wssrv.Handler mutex.Unlock() - return wssrv + return WorkspaceProxy{ + Server: wssrv, + ServerURL: serverURL, + } } diff --git a/enterprise/coderd/workspaceproxy_test.go b/enterprise/coderd/workspaceproxy_test.go index 17e17240dcace..b7d4e8cf2f8f9 100644 --- a/enterprise/coderd/workspaceproxy_test.go +++ b/enterprise/coderd/workspaceproxy_test.go @@ -99,7 +99,7 @@ func TestRegions(t *testing.T) { require.NoError(t, err) const proxyName = "hello" - _ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + _ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: proxyName, AppHostname: appHostname + ".proxy", }) @@ -734,7 +734,7 @@ func TestReconnectingPTYSignedToken(t *testing.T) { proxyURL, err := url.Parse(fmt.Sprintf("https://%s.com", namesgenerator.GetRandomName(1))) require.NoError(t, err) - _ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + _ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: namesgenerator.GetRandomName(1), ProxyURL: proxyURL, AppHostname: "*.sub.example.com", diff --git a/enterprise/wsproxy/wsproxy.go b/enterprise/wsproxy/wsproxy.go index 68693f4633871..17fae2b791695 100644 --- a/enterprise/wsproxy/wsproxy.go +++ b/enterprise/wsproxy/wsproxy.go @@ -128,7 +128,7 @@ type Server struct { ctx context.Context cancel context.CancelFunc derpCloseFunc func() - registerDone <-chan struct{} + registerLoop *wsproxysdk.RegisterWorkspaceProxyLoop } // New creates a new workspace proxy server. This requires a primary coderd @@ -210,7 +210,7 @@ func New(ctx context.Context, opts *Options) (*Server, error) { // goroutine to periodically re-register. replicaID := uuid.New() osHostname := cliutil.Hostname() - regResp, registerDone, err := client.RegisterWorkspaceProxyLoop(ctx, wsproxysdk.RegisterWorkspaceProxyLoopOpts{ + registerLoop, regResp, err := client.RegisterWorkspaceProxyLoop(ctx, wsproxysdk.RegisterWorkspaceProxyLoopOpts{ Logger: opts.Logger, Request: wsproxysdk.RegisterWorkspaceProxyRequest{ AccessURL: opts.AccessURL.String(), @@ -230,12 +230,13 @@ func New(ctx context.Context, opts *Options) (*Server, error) { if err != nil { return nil, xerrors.Errorf("register proxy: %w", err) } - s.registerDone = registerDone - err = s.handleRegister(ctx, regResp) + s.registerLoop = registerLoop + + derpServer.SetMeshKey(regResp.DERPMeshKey) + err = s.handleRegister(regResp) if err != nil { return nil, xerrors.Errorf("handle register: %w", err) } - derpServer.SetMeshKey(regResp.DERPMeshKey) secKey, err := workspaceapps.KeyFromString(regResp.AppSecurityKey) if err != nil { @@ -409,16 +410,16 @@ func New(ctx context.Context, opts *Options) (*Server, error) { return s, nil } +func (s *Server) RegisterNow() error { + _, err := s.registerLoop.RegisterNow() + return err +} + func (s *Server) Close() error { s.cancel() var err error - registerDoneWaitTicker := time.NewTicker(11 * time.Second) // the attempt timeout is 10s - select { - case <-registerDoneWaitTicker.C: - err = multierror.Append(err, xerrors.New("timed out waiting for registerDone")) - case <-s.registerDone: - } + s.registerLoop.Close() s.derpCloseFunc() appServerErr := s.AppServer.Close() if appServerErr != nil { @@ -437,11 +438,12 @@ func (*Server) mutateRegister(_ *wsproxysdk.RegisterWorkspaceProxyRequest) { // package in the primary and update req.ReplicaError accordingly. } -func (s *Server) handleRegister(_ context.Context, res wsproxysdk.RegisterWorkspaceProxyResponse) error { +func (s *Server) handleRegister(res wsproxysdk.RegisterWorkspaceProxyResponse) error { addresses := make([]string, len(res.SiblingReplicas)) for i, replica := range res.SiblingReplicas { addresses[i] = replica.RelayAddress } + s.Logger.Debug(s.ctx, "setting DERP mesh sibling addresses", slog.F("addresses", addresses)) s.derpMesh.SetAddresses(addresses, false) s.latestDERPMap.Store(res.DERPMap) diff --git a/enterprise/wsproxy/wsproxy_test.go b/enterprise/wsproxy/wsproxy_test.go index 0d440165dfb16..e8fed4f35c594 100644 --- a/enterprise/wsproxy/wsproxy_test.go +++ b/enterprise/wsproxy/wsproxy_test.go @@ -1,14 +1,18 @@ package wsproxy_test import ( + "context" "fmt" "net" + "net/url" "testing" + "time" "github.com/davecgh/go-spew/spew" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -22,6 +26,7 @@ import ( "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/workspaceapps/apptest" "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/enterprise/coderd/coderdenttest" "github.com/coder/coder/v2/enterprise/coderd/license" "github.com/coder/coder/v2/provisioner/echo" @@ -62,7 +67,7 @@ func TestDERPOnly(t *testing.T) { }) // Create an external proxy. - _ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + _ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "best-proxy", DerpOnly: true, }) @@ -109,15 +114,15 @@ func TestDERP(t *testing.T) { }) // Create two running external proxies. - proxyAPI1 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + proxyAPI1 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "best-proxy", }) - proxyAPI2 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + proxyAPI2 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "worst-proxy", }) // Create a running external proxy with DERP disabled. - proxyAPI3 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + proxyAPI3 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "no-derp-proxy", DerpDisabled: true, }) @@ -340,7 +345,7 @@ func TestDERPEndToEnd(t *testing.T) { _ = closer.Close() }) - coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "best-proxy", }) @@ -430,6 +435,105 @@ resourceLoop: require.False(t, p2p) } +// TestDERPMesh spawns 6 workspace proxy replicas and tries to connect to a +// single DERP peer via every single one. +func TestDERPMesh(t *testing.T) { + t.Parallel() + + deploymentValues := coderdtest.DeploymentValues(t) + deploymentValues.Experiments = []string{ + "*", + } + + client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + DeploymentValues: deploymentValues, + AppHostname: "*.primary.test.coder.com", + IncludeProvisionerDaemon: true, + RealIPConfig: &httpmw.RealIPConfig{ + TrustedOrigins: []*net.IPNet{{ + IP: net.ParseIP("127.0.0.1"), + Mask: net.CIDRMask(8, 32), + }}, + TrustedHeaders: []string{ + "CF-Connecting-IP", + }, + }, + }, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureWorkspaceProxy: 1, + }, + }, + }) + t.Cleanup(func() { + _ = closer.Close() + }) + + proxyURL, err := url.Parse("https://proxy.test.coder.com") + require.NoError(t, err) + + // Create 6 proxy replicas. + const count = 6 + var ( + sessionToken = "" + proxies = [count]coderdenttest.WorkspaceProxy{} + derpURLs = [count]string{} + ) + for i := range proxies { + proxies[i] = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ + Name: "best-proxy", + Token: sessionToken, + ProxyURL: proxyURL, + }) + if i == 0 { + sessionToken = proxies[i].Options.ProxySessionToken + } + + derpURL := *proxies[i].ServerURL + derpURL.Path = "/derp" + derpURLs[i] = derpURL.String() + } + + // Force all proxies to re-register immediately. This ensures the DERP mesh + // is up-to-date. In production this will happen automatically after about + // 15 seconds. + for i, proxy := range proxies { + err := proxy.RegisterNow() + require.NoErrorf(t, err, "failed to force proxy %d to re-register", i) + } + + // Generate cases. We have a case for: + // - Each proxy to itself. + // - Each proxy to each other proxy (one way, no duplicates). + cases := [][2]string{} + for i, derpURL := range derpURLs { + cases = append(cases, [2]string{derpURL, derpURL}) + for j := i + 1; j < len(derpURLs); j++ { + cases = append(cases, [2]string{derpURL, derpURLs[j]}) + } + } + require.Len(t, cases, (count*(count+1))/2) // triangle number + + for i, c := range cases { + i, c := i, c + t.Run(fmt.Sprintf("Proxy%d", i), func(t *testing.T) { + t.Parallel() + + t.Logf("derp1=%s, derp2=%s", c[0], c[1]) + ctx := testutil.Context(t, testutil.WaitLong) + client1, client1Recv := createDERPClient(t, ctx, "client1", c[0]) + client2, client2Recv := createDERPClient(t, ctx, "client2", c[1]) + + // Send a packet from client 1 to client 2. + testDERPSend(t, ctx, client2.SelfPublicKey(), client2Recv, client1) + + // Send a packet from client 2 to client 1. + testDERPSend(t, ctx, client1.SelfPublicKey(), client1Recv, client2) + }) + } +} + func TestWorkspaceProxyWorkspaceApps(t *testing.T) { t.Parallel() @@ -482,7 +586,7 @@ func TestWorkspaceProxyWorkspaceApps(t *testing.T) { if opts.DisableSubdomainApps { opts.AppHost = "" } - proxyAPI := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{ + proxyAPI := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ Name: "best-proxy", AppHostname: opts.AppHost, DisablePathApps: opts.DisablePathApps, @@ -498,3 +602,84 @@ func TestWorkspaceProxyWorkspaceApps(t *testing.T) { } }) } + +// createDERPClient creates a DERP client and spawns a goroutine that reads from +// the client and sends the received packets to a channel. +// +//nolint:revive +func createDERPClient(t *testing.T, ctx context.Context, name string, derpURL string) (*derphttp.Client, <-chan derp.ReceivedPacket) { + t.Helper() + + client, err := derphttp.NewClient(key.NewNode(), derpURL, func(format string, args ...any) { + t.Logf(name+": "+format, args...) + }) + require.NoError(t, err, "create client") + t.Cleanup(func() { + _ = client.Close() + }) + err = client.Connect(ctx) + require.NoError(t, err, "connect to DERP server") + + ch := make(chan derp.ReceivedPacket, 1) + go func() { + defer close(ch) + for { + msg, err := client.Recv() + if err != nil { + t.Logf("Recv error: %v", err) + return + } + switch msg := msg.(type) { + case derp.ReceivedPacket: + ch <- msg + return + default: + // We don't care about other messages. + } + } + }() + + return client, ch +} + +// testDERPSend sends a message from src to dstKey and waits for it to be +// received on dstCh. +// +// If the packet doesn't arrive within 500ms, it will try to send it again until +// testutil.WaitLong is reached. +// +//nolint:revive +func testDERPSend(t *testing.T, ctx context.Context, dstKey key.NodePublic, dstCh <-chan derp.ReceivedPacket, src *derphttp.Client) { + t.Helper() + + // The prefix helps identify where the packet starts if you get garbled data + // in logs. + const msgStrPrefix = "test_packet_" + msgStr, err := cryptorand.String(64 - len(msgStrPrefix)) + require.NoError(t, err, "generate random msg string") + msg := []byte(msgStrPrefix + msgStr) + + err = src.Send(dstKey, msg) + require.NoError(t, err, "send message via DERP") + + ticker := time.NewTicker(time.Millisecond * 500) + defer ticker.Stop() + for { + select { + case pkt := <-dstCh: + require.Equal(t, src.SelfPublicKey(), pkt.Source, "packet came from wrong source") + require.Equal(t, msg, pkt.Data, "packet data is wrong") + return + case <-ctx.Done(): + t.Fatal("timed out waiting for packet") + return + case <-ticker.C: + } + + // Send another packet. Since we're sending packets immediately + // after opening the clients, they might not be meshed together + // properly yet. + err = src.Send(dstKey, msg) + require.NoError(t, err, "send message via DERP") + } +} diff --git a/enterprise/wsproxy/wsproxysdk/wsproxysdk.go b/enterprise/wsproxy/wsproxysdk/wsproxysdk.go index 1163f7c435001..37636102bb413 100644 --- a/enterprise/wsproxy/wsproxysdk/wsproxysdk.go +++ b/enterprise/wsproxy/wsproxysdk/wsproxysdk.go @@ -277,135 +277,214 @@ type RegisterWorkspaceProxyLoopOpts struct { // called in a blocking manner, so it should avoid blocking for too long. If // the callback returns an error, the loop will stop immediately and the // error will be returned to the FailureFn. - CallbackFn func(ctx context.Context, res RegisterWorkspaceProxyResponse) error + CallbackFn func(res RegisterWorkspaceProxyResponse) error // FailureFn is called with the last error returned from the server if the // context is canceled, registration fails for more than MaxFailureCount, // or if any permanent values in the response change. FailureFn func(err error) } -// RegisterWorkspaceProxyLoop will register the workspace proxy and then start a -// goroutine to keep registering periodically in the background. -// -// The first response is returned immediately, and subsequent responses will be -// notified to the given CallbackFn. When the context is canceled the loop will -// stop immediately and the context error will be returned to the FailureFn. -// -// The returned channel will be closed when the loop stops and can be used to -// ensure the loop is dead before continuing. When a fatal error is encountered, -// the proxy will be deregistered (with the same ReplicaID and AttemptTimeout) -// before calling the FailureFn. -func (c *Client) RegisterWorkspaceProxyLoop(ctx context.Context, opts RegisterWorkspaceProxyLoopOpts) (RegisterWorkspaceProxyResponse, <-chan struct{}, error) { - if opts.Interval == 0 { - opts.Interval = 30 * time.Second - } - if opts.MaxFailureCount == 0 { - opts.MaxFailureCount = 10 - } - if opts.AttemptTimeout == 0 { - opts.AttemptTimeout = 10 * time.Second - } - if opts.MutateFn == nil { - opts.MutateFn = func(_ *RegisterWorkspaceProxyRequest) {} - } - if opts.CallbackFn == nil { - opts.CallbackFn = func(_ context.Context, _ RegisterWorkspaceProxyResponse) error { - return nil - } +type RegisterWorkspaceProxyLoop struct { + opts RegisterWorkspaceProxyLoopOpts + c *Client + + // runLoopNow takes a response channel to send the response to and triggers + // the loop to run immediately if it's waiting. + runLoopNow chan chan RegisterWorkspaceProxyResponse + closedCtx context.Context + close context.CancelFunc + done chan struct{} +} + +func (l *RegisterWorkspaceProxyLoop) register(ctx context.Context) (RegisterWorkspaceProxyResponse, error) { + registerCtx, registerCancel := context.WithTimeout(ctx, l.opts.AttemptTimeout) + res, err := l.c.RegisterWorkspaceProxy(registerCtx, l.opts.Request) + registerCancel() + if err != nil { + return RegisterWorkspaceProxyResponse{}, xerrors.Errorf("register workspace proxy: %w", err) } - failureFn := func(err error) { - // We have to use background context here because the original context - // may be canceled. - deregisterCtx, cancel := context.WithTimeout(context.Background(), opts.AttemptTimeout) - defer cancel() - deregisterErr := c.DeregisterWorkspaceProxy(deregisterCtx, DeregisterWorkspaceProxyRequest{ - ReplicaID: opts.Request.ReplicaID, - }) - if deregisterErr != nil { - opts.Logger.Error(ctx, - "failed to deregister workspace proxy with Coder primary (it will be automatically deregistered shortly)", - slog.Error(deregisterErr), - ) - } + return res, nil +} - if opts.FailureFn != nil { - opts.FailureFn(err) - } +// Start starts the proxy registration loop. The provided context is only used +// for the initial registration. Use Close() to stop. +func (l *RegisterWorkspaceProxyLoop) Start(ctx context.Context) (RegisterWorkspaceProxyResponse, error) { + if l.opts.Interval == 0 { + l.opts.Interval = 15 * time.Second + } + if l.opts.MaxFailureCount == 0 { + l.opts.MaxFailureCount = 10 + } + if l.opts.AttemptTimeout == 0 { + l.opts.AttemptTimeout = 10 * time.Second } - originalRes, err := c.RegisterWorkspaceProxy(ctx, opts.Request) + var err error + originalRes, err := l.register(ctx) if err != nil { - return RegisterWorkspaceProxyResponse{}, nil, xerrors.Errorf("register workspace proxy: %w", err) + return RegisterWorkspaceProxyResponse{}, xerrors.Errorf("initial registration: %w", err) } - done := make(chan struct{}) go func() { - defer close(done) + defer close(l.done) var ( failedAttempts = 0 - ticker = time.NewTicker(opts.Interval) + ticker = time.NewTicker(l.opts.Interval) ) for { + var respCh chan RegisterWorkspaceProxyResponse select { - case <-ctx.Done(): - failureFn(ctx.Err()) + case <-l.closedCtx.Done(): + l.failureFn(xerrors.Errorf("proxy registration loop closed")) return + case respCh = <-l.runLoopNow: case <-ticker.C: } - opts.Logger.Debug(ctx, + l.opts.Logger.Debug(context.Background(), "re-registering workspace proxy with Coder primary", - slog.F("req", opts.Request), - slog.F("timeout", opts.AttemptTimeout), + slog.F("req", l.opts.Request), + slog.F("timeout", l.opts.AttemptTimeout), slog.F("failed_attempts", failedAttempts), ) - opts.MutateFn(&opts.Request) - registerCtx, cancel := context.WithTimeout(ctx, opts.AttemptTimeout) - res, err := c.RegisterWorkspaceProxy(registerCtx, opts.Request) - cancel() + + l.mutateFn(&l.opts.Request) + resp, err := l.register(l.closedCtx) if err != nil { failedAttempts++ - opts.Logger.Warn(ctx, + l.opts.Logger.Warn(context.Background(), "failed to re-register workspace proxy with Coder primary", - slog.F("req", opts.Request), - slog.F("timeout", opts.AttemptTimeout), + slog.F("req", l.opts.Request), + slog.F("timeout", l.opts.AttemptTimeout), slog.F("failed_attempts", failedAttempts), slog.Error(err), ) - if failedAttempts > opts.MaxFailureCount { - failureFn(xerrors.Errorf("exceeded re-registration failure count of %d: last error: %w", opts.MaxFailureCount, err)) + if failedAttempts > l.opts.MaxFailureCount { + l.failureFn(xerrors.Errorf("exceeded re-registration failure count of %d: last error: %w", l.opts.MaxFailureCount, err)) return } continue } failedAttempts = 0 - if res.AppSecurityKey != originalRes.AppSecurityKey { - failureFn(xerrors.New("app security key has changed, proxy must be restarted")) + // Check for consistency. + if originalRes.AppSecurityKey != resp.AppSecurityKey { + l.failureFn(xerrors.New("app security key has changed, proxy must be restarted")) return } - if res.DERPMeshKey != originalRes.DERPMeshKey { - failureFn(xerrors.New("DERP mesh key has changed, proxy must be restarted")) + if originalRes.DERPMeshKey != resp.DERPMeshKey { + l.failureFn(xerrors.New("DERP mesh key has changed, proxy must be restarted")) return } - if res.DERPRegionID != originalRes.DERPRegionID { - failureFn(xerrors.New("DERP region ID has changed, proxy must be restarted")) + if originalRes.DERPRegionID != resp.DERPRegionID { + l.failureFn(xerrors.New("DERP region ID has changed, proxy must be restarted")) + return } - err = opts.CallbackFn(ctx, res) + err = l.callbackFn(resp) if err != nil { - failureFn(xerrors.Errorf("callback fn returned error: %w", err)) + l.failureFn(xerrors.Errorf("callback function returned an error: %w", err)) return } - ticker.Reset(opts.Interval) + // If we were triggered by RegisterNow(), send the response back. + if respCh != nil { + respCh <- resp + close(respCh) + } + + ticker.Reset(l.opts.Interval) } }() - return originalRes, done, nil + return originalRes, nil +} + +// RegisterNow asks the registration loop to register immediately. A timeout of +// 2x the attempt timeout is used to wait for the response. +func (l *RegisterWorkspaceProxyLoop) RegisterNow() (RegisterWorkspaceProxyResponse, error) { + // The channel is closed by the loop after sending the response. + respCh := make(chan RegisterWorkspaceProxyResponse, 1) + select { + case <-l.done: + return RegisterWorkspaceProxyResponse{}, xerrors.New("proxy registration loop closed") + case l.runLoopNow <- respCh: + } + select { + case <-l.done: + return RegisterWorkspaceProxyResponse{}, xerrors.New("proxy registration loop closed") + case resp := <-respCh: + return resp, nil + } +} + +func (l *RegisterWorkspaceProxyLoop) Close() { + l.close() + <-l.done +} + +func (l *RegisterWorkspaceProxyLoop) mutateFn(req *RegisterWorkspaceProxyRequest) { + if l.opts.MutateFn != nil { + l.opts.MutateFn(req) + } +} + +func (l *RegisterWorkspaceProxyLoop) callbackFn(res RegisterWorkspaceProxyResponse) error { + if l.opts.CallbackFn != nil { + return l.opts.CallbackFn(res) + } + return nil +} + +func (l *RegisterWorkspaceProxyLoop) failureFn(err error) { + // We have to use background context here because the original context may + // be canceled. + deregisterCtx, cancel := context.WithTimeout(context.Background(), l.opts.AttemptTimeout) + defer cancel() + deregisterErr := l.c.DeregisterWorkspaceProxy(deregisterCtx, DeregisterWorkspaceProxyRequest{ + ReplicaID: l.opts.Request.ReplicaID, + }) + if deregisterErr != nil { + l.opts.Logger.Error(context.Background(), + "failed to deregister workspace proxy with Coder primary (it will be automatically deregistered shortly)", + slog.Error(deregisterErr), + ) + } + + if l.opts.FailureFn != nil { + l.opts.FailureFn(err) + } +} + +// RegisterWorkspaceProxyLoop will register the workspace proxy and then start a +// goroutine to keep registering periodically in the background. +// +// The first response is returned immediately, and subsequent responses will be +// notified to the given CallbackFn. When the loop is Close()d it will stop +// immediately and an error will be returned to the FailureFn. +// +// When a fatal error is encountered (or the proxy is closed), the proxy will be +// deregistered (with the same ReplicaID and AttemptTimeout) before calling the +// FailureFn. +func (c *Client) RegisterWorkspaceProxyLoop(ctx context.Context, opts RegisterWorkspaceProxyLoopOpts) (*RegisterWorkspaceProxyLoop, RegisterWorkspaceProxyResponse, error) { + closedCtx, closeFn := context.WithCancel(context.Background()) + loop := &RegisterWorkspaceProxyLoop{ + opts: opts, + c: c, + runLoopNow: make(chan chan RegisterWorkspaceProxyResponse), + closedCtx: closedCtx, + close: closeFn, + done: make(chan struct{}), + } + + regResp, err := loop.Start(ctx) + if err != nil { + return nil, RegisterWorkspaceProxyResponse{}, xerrors.Errorf("start loop: %w", err) + } + return loop, regResp, nil } type CoordinateMessageType int From d448c496527d01b8f716d4c70676607d63657506 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Thu, 7 Mar 2024 22:31:40 -0800 Subject: [PATCH 3/5] feat: add derp mesh health checking in workspace proxies (#12222) --- enterprise/cli/proxyserver.go | 9 +- enterprise/coderd/coderd.go | 26 +- enterprise/coderd/coderdenttest/proxytest.go | 4 + enterprise/replicasync/replicasync.go | 107 +++++--- enterprise/replicasync/replicasync_test.go | 2 +- enterprise/wsproxy/wsproxy.go | 137 +++++++--- enterprise/wsproxy/wsproxy_test.go | 262 ++++++++++++++++++- 7 files changed, 445 insertions(+), 102 deletions(-) diff --git a/enterprise/cli/proxyserver.go b/enterprise/cli/proxyserver.go index 9ac59735b120d..37e90ce56cbe7 100644 --- a/enterprise/cli/proxyserver.go +++ b/enterprise/cli/proxyserver.go @@ -244,7 +244,7 @@ func (r *RootCmd) proxyServer() *clibase.Cmd { closers.Add(closeFunc) } - proxy, err := wsproxy.New(ctx, &wsproxy.Options{ + options := &wsproxy.Options{ Logger: logger, Experiments: coderd.ReadExperiments(logger, cfg.Experiments.Value()), HTTPClient: httpClient, @@ -263,7 +263,12 @@ func (r *RootCmd) proxyServer() *clibase.Cmd { DERPEnabled: cfg.DERP.Server.Enable.Value(), DERPOnly: derpOnly.Value(), DERPServerRelayAddress: cfg.DERP.Server.RelayURL.String(), - }) + } + if httpServers.TLSConfig != nil { + options.TLSCertificates = httpServers.TLSConfig.Certificates + } + + proxy, err := wsproxy.New(ctx, options) if err != nil { return xerrors.Errorf("create workspace proxy: %w", err) } diff --git a/enterprise/coderd/coderd.go b/enterprise/coderd/coderd.go index 25bf7b971f977..be35f0ca948d9 100644 --- a/enterprise/coderd/coderd.go +++ b/enterprise/coderd/coderd.go @@ -3,8 +3,6 @@ package coderd import ( "context" "crypto/ed25519" - "crypto/tls" - "crypto/x509" "fmt" "math" "net/http" @@ -365,27 +363,9 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { }) } - meshRootCA := x509.NewCertPool() - for _, certificate := range options.TLSCertificates { - for _, certificatePart := range certificate.Certificate { - certificate, err := x509.ParseCertificate(certificatePart) - if err != nil { - return nil, xerrors.Errorf("parse certificate %s: %w", certificate.Subject.CommonName, err) - } - meshRootCA.AddCert(certificate) - } - } - // This TLS configuration spoofs access from the access URL hostname - // assuming that the certificates provided will cover that hostname. - // - // Replica sync and DERP meshing require accessing replicas via their - // internal IP addresses, and if TLS is configured we use the same - // certificates. - meshTLSConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: options.TLSCertificates, - RootCAs: meshRootCA, - ServerName: options.AccessURL.Hostname(), + meshTLSConfig, err := replicasync.CreateDERPMeshTLSConfig(options.AccessURL.Hostname(), options.TLSCertificates) + if err != nil { + return nil, xerrors.Errorf("create DERP mesh TLS config: %w", err) } api.replicaManager, err = replicasync.New(ctx, options.Logger, options.Database, options.Pubsub, &replicasync.Options{ ID: api.AGPL.ID, diff --git a/enterprise/coderd/coderdenttest/proxytest.go b/enterprise/coderd/coderdenttest/proxytest.go index 831c4be86f640..730d65c0469f2 100644 --- a/enterprise/coderd/coderdenttest/proxytest.go +++ b/enterprise/coderd/coderdenttest/proxytest.go @@ -43,6 +43,9 @@ type ProxyOptions struct { // region. Token string + // ReplicaPingCallback is optional. + ReplicaPingCallback func(replicas []codersdk.Replica, err string) + // FlushStats is optional FlushStats chan chan<- struct{} } @@ -157,6 +160,7 @@ func NewWorkspaceProxyReplica(t *testing.T, coderdAPI *coderd.API, owner *coders DERPEnabled: !options.DerpDisabled, DERPOnly: options.DerpOnly, DERPServerRelayAddress: serverURL.String(), + ReplicaErrCallback: options.ReplicaPingCallback, StatsCollectorOptions: statsCollectorOptions, }) require.NoError(t, err) diff --git a/enterprise/replicasync/replicasync.go b/enterprise/replicasync/replicasync.go index b7e46b99f6124..aa8adb3a04ad5 100644 --- a/enterprise/replicasync/replicasync.go +++ b/enterprise/replicasync/replicasync.go @@ -3,6 +3,7 @@ package replicasync import ( "context" "crypto/tls" + "crypto/x509" "database/sql" "errors" "fmt" @@ -265,45 +266,35 @@ func (m *Manager) syncReplicas(ctx context.Context) error { }, } defer client.CloseIdleConnections() - var wg sync.WaitGroup - var mu sync.Mutex - failed := make([]string, 0) - for _, peer := range m.Regional() { - wg.Add(1) + + peers := m.Regional() + errs := make(chan error, len(peers)) + for _, peer := range peers { go func(peer database.Replica) { - defer wg.Done() - ra, err := url.Parse(peer.RelayAddress) + err := PingPeerReplica(ctx, client, peer.RelayAddress) if err != nil { - m.logger.Warn(ctx, "could not parse relay address", - slog.F("relay_address", peer.RelayAddress), slog.Error(err)) + errs <- xerrors.Errorf("ping sibling replica %s (%s): %w", peer.Hostname, peer.RelayAddress, err) + m.logger.Warn(ctx, "failed to ping sibling replica, this could happen if the replica has shutdown", + slog.F("replica_hostname", peer.Hostname), + slog.F("replica_relay_address", peer.RelayAddress), + slog.Error(err), + ) return } - target, err := ra.Parse("/derp/latency-check") - if err != nil { - m.logger.Warn(ctx, "could not resolve /derp/latency-check endpoint", - slog.F("relay_address", peer.RelayAddress), slog.Error(err)) - return - } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, target.String(), nil) - if err != nil { - m.logger.Warn(ctx, "create http request for relay probe", - slog.F("relay_address", peer.RelayAddress), slog.Error(err)) - return - } - res, err := client.Do(req) - if err != nil { - mu.Lock() - failed = append(failed, fmt.Sprintf("relay %s (%s): %s", peer.Hostname, peer.RelayAddress, err)) - mu.Unlock() - return - } - _ = res.Body.Close() + errs <- nil }(peer) } - wg.Wait() + + replicaErrs := make([]string, 0, len(peers)) + for i := 0; i < len(peers); i++ { + err := <-errs + if err != nil { + replicaErrs = append(replicaErrs, err.Error()) + } + } replicaError := "" - if len(failed) > 0 { - replicaError = fmt.Sprintf("Failed to dial peers: %s", strings.Join(failed, ", ")) + if len(replicaErrs) > 0 { + replicaError = fmt.Sprintf("Failed to dial peers: %s", strings.Join(replicaErrs, ", ")) } databaseLatency, err := m.db.Ping(ctx) @@ -363,6 +354,32 @@ func (m *Manager) syncReplicas(ctx context.Context) error { return nil } +// PingPeerReplica pings a peer replica over it's internal relay address to +// ensure it's reachable and alive for health purposes. +func PingPeerReplica(ctx context.Context, client http.Client, relayAddress string) error { + ra, err := url.Parse(relayAddress) + if err != nil { + return xerrors.Errorf("parse relay address %q: %w", relayAddress, err) + } + target, err := ra.Parse("/derp/latency-check") + if err != nil { + return xerrors.Errorf("parse latency-check URL: %w", err) + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target.String(), nil) + if err != nil { + return xerrors.Errorf("create request: %w", err) + } + res, err := client.Do(req) + if err != nil { + return xerrors.Errorf("do probe: %w", err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusOK { + return xerrors.Errorf("unexpected status code: %d", res.StatusCode) + } + return nil +} + // Self represents the current replica. func (m *Manager) Self() database.Replica { m.mutex.Lock() @@ -466,3 +483,29 @@ func (m *Manager) Close() error { } return nil } + +// CreateDERPMeshTLSConfig creates a TLS configuration for connecting to peers +// in the DERP mesh over private networking. It overrides the ServerName to be +// the expected public hostname of the peer, and trusts all of the TLS server +// certificates used by this replica (as we expect all replicas to use the same +// TLS certificates). +func CreateDERPMeshTLSConfig(hostname string, tlsCertificates []tls.Certificate) (*tls.Config, error) { + meshRootCA := x509.NewCertPool() + for _, certificate := range tlsCertificates { + for _, certificatePart := range certificate.Certificate { + parsedCert, err := x509.ParseCertificate(certificatePart) + if err != nil { + return nil, xerrors.Errorf("parse certificate %s: %w", parsedCert.Subject.CommonName, err) + } + meshRootCA.AddCert(parsedCert) + } + } + + // This TLS configuration trusts the built-in TLS certificates and forces + // the server name to be the public hostname. + return &tls.Config{ + MinVersion: tls.VersionTLS12, + RootCAs: meshRootCA, + ServerName: hostname, + }, nil +} diff --git a/enterprise/replicasync/replicasync_test.go b/enterprise/replicasync/replicasync_test.go index 600eb839e28bf..f04ba9ccecc31 100644 --- a/enterprise/replicasync/replicasync_test.go +++ b/enterprise/replicasync/replicasync_test.go @@ -286,7 +286,7 @@ func (d *derpyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { d.Add(1) return } - w.WriteHeader(http.StatusUpgradeRequired) + w.WriteHeader(http.StatusOK) } func (d *derpyHandler) requireOnlyDERPPaths(t *testing.T) { diff --git a/enterprise/wsproxy/wsproxy.go b/enterprise/wsproxy/wsproxy.go index 17fae2b791695..55567014fb4e0 100644 --- a/enterprise/wsproxy/wsproxy.go +++ b/enterprise/wsproxy/wsproxy.go @@ -3,14 +3,15 @@ package wsproxy import ( "context" "crypto/tls" - "crypto/x509" "errors" "fmt" "net/http" "net/url" "reflect" "regexp" + "slices" "strings" + "sync" "sync/atomic" "time" @@ -19,6 +20,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/singleflight" "golang.org/x/xerrors" "tailscale.com/derp" "tailscale.com/derp/derphttp" @@ -35,6 +37,7 @@ import ( "github.com/coder/coder/v2/coderd/workspaceapps" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/enterprise/derpmesh" + "github.com/coder/coder/v2/enterprise/replicasync" "github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk" "github.com/coder/coder/v2/site" "github.com/coder/coder/v2/tailnet" @@ -76,6 +79,10 @@ type Options struct { // provide access to workspace apps/terminal. DERPOnly bool + // ReplicaErrCallback is called when the proxy replica successfully or + // unsuccessfully pings its peers in the mesh. + ReplicaErrCallback func(replicas []codersdk.Replica, err string) + ProxySessionToken string // AllowAllCors will set all CORs headers to '*'. // By default, CORs is set to accept external requests @@ -121,8 +128,12 @@ type Server struct { SDKClient *wsproxysdk.Client // DERP - derpMesh *derpmesh.Mesh - latestDERPMap atomic.Pointer[tailcfg.DERPMap] + derpMesh *derpmesh.Mesh + derpMeshTLSConfig *tls.Config + replicaPingSingleflight singleflight.Group + replicaErrMut sync.Mutex + replicaErr string + latestDERPMap atomic.Pointer[tailcfg.DERPMap] // Used for graceful shutdown. Required for the dialer. ctx context.Context @@ -166,29 +177,10 @@ func New(ctx context.Context, opts *Options) (*Server, error) { return nil, xerrors.Errorf("%q is a workspace proxy, not a primary coderd instance", opts.DashboardURL) } - meshRootCA := x509.NewCertPool() - for _, certificate := range opts.TLSCertificates { - for _, certificatePart := range certificate.Certificate { - certificate, err := x509.ParseCertificate(certificatePart) - if err != nil { - return nil, xerrors.Errorf("parse certificate %s: %w", certificate.Subject.CommonName, err) - } - meshRootCA.AddCert(certificate) - } - } - // This TLS configuration spoofs access from the access URL hostname - // assuming that the certificates provided will cover that hostname. - // - // Replica sync and DERP meshing require accessing replicas via their - // internal IP addresses, and if TLS is configured we use the same - // certificates. - meshTLSConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: opts.TLSCertificates, - RootCAs: meshRootCA, - ServerName: opts.AccessURL.Hostname(), + meshTLSConfig, err := replicasync.CreateDERPMeshTLSConfig(opts.AccessURL.Hostname(), opts.TLSCertificates) + if err != nil { + return nil, xerrors.Errorf("create DERP mesh tls config: %w", err) } - derpServer := derp.NewServer(key.NewNode(), tailnet.Logger(opts.Logger.Named("net.derp"))) ctx, cancel := context.WithCancel(context.Background()) @@ -202,14 +194,13 @@ func New(ctx context.Context, opts *Options) (*Server, error) { PrometheusRegistry: opts.PrometheusRegistry, SDKClient: client, derpMesh: derpmesh.New(opts.Logger.Named("net.derpmesh"), derpServer, meshTLSConfig), + derpMeshTLSConfig: meshTLSConfig, ctx: ctx, cancel: cancel, } // Register the workspace proxy with the primary coderd instance and start a // goroutine to periodically re-register. - replicaID := uuid.New() - osHostname := cliutil.Hostname() registerLoop, regResp, err := client.RegisterWorkspaceProxyLoop(ctx, wsproxysdk.RegisterWorkspaceProxyLoopOpts{ Logger: opts.Logger, Request: wsproxysdk.RegisterWorkspaceProxyRequest{ @@ -217,8 +208,8 @@ func New(ctx context.Context, opts *Options) (*Server, error) { WildcardHostname: opts.AppHostname, DerpEnabled: opts.DERPEnabled, DerpOnly: opts.DERPOnly, - ReplicaID: replicaID, - ReplicaHostname: osHostname, + ReplicaID: uuid.New(), + ReplicaHostname: cliutil.Hostname(), ReplicaError: "", ReplicaRelayAddress: opts.DERPServerRelayAddress, Version: buildinfo.Version(), @@ -433,9 +424,10 @@ func (s *Server) Close() error { return err } -func (*Server) mutateRegister(_ *wsproxysdk.RegisterWorkspaceProxyRequest) { - // TODO: we should probably ping replicas similarly to the replicasync - // package in the primary and update req.ReplicaError accordingly. +func (s *Server) mutateRegister(req *wsproxysdk.RegisterWorkspaceProxyRequest) { + s.replicaErrMut.Lock() + defer s.replicaErrMut.Unlock() + req.ReplicaError = s.replicaErr } func (s *Server) handleRegister(res wsproxysdk.RegisterWorkspaceProxyResponse) error { @@ -448,9 +440,82 @@ func (s *Server) handleRegister(res wsproxysdk.RegisterWorkspaceProxyResponse) e s.latestDERPMap.Store(res.DERPMap) + go s.pingSiblingReplicas(res.SiblingReplicas) return nil } +func (s *Server) pingSiblingReplicas(replicas []codersdk.Replica) { + if len(replicas) == 0 { + return + } + + // Avoid pinging multiple times at once if the list hasn't changed. + relayURLs := make([]string, len(replicas)) + for i, r := range replicas { + relayURLs[i] = r.RelayAddress + } + slices.Sort(relayURLs) + singleflightStr := strings.Join(relayURLs, " ") // URLs can't contain spaces. + + //nolint:dogsled + _, _, _ = s.replicaPingSingleflight.Do(singleflightStr, func() (any, error) { + const ( + perReplicaTimeout = 3 * time.Second + fullTimeout = 10 * time.Second + ) + ctx, cancel := context.WithTimeout(s.ctx, fullTimeout) + defer cancel() + + client := http.Client{ + Timeout: perReplicaTimeout, + Transport: &http.Transport{ + TLSClientConfig: s.derpMeshTLSConfig, + DisableKeepAlives: true, + }, + } + defer client.CloseIdleConnections() + + errs := make(chan error, len(replicas)) + for _, peer := range replicas { + go func(peer codersdk.Replica) { + err := replicasync.PingPeerReplica(ctx, client, peer.RelayAddress) + if err != nil { + errs <- xerrors.Errorf("ping sibling replica %s (%s): %w", peer.Hostname, peer.RelayAddress, err) + s.Logger.Warn(ctx, "failed to ping sibling replica, this could happen if the replica has shutdown", + slog.F("replica_hostname", peer.Hostname), + slog.F("replica_relay_address", peer.RelayAddress), + slog.Error(err), + ) + return + } + errs <- nil + }(peer) + } + + replicaErrs := make([]string, 0, len(replicas)) + for i := 0; i < len(replicas); i++ { + err := <-errs + if err != nil { + replicaErrs = append(replicaErrs, err.Error()) + } + } + + s.replicaErrMut.Lock() + defer s.replicaErrMut.Unlock() + s.replicaErr = "" + if len(replicaErrs) > 0 { + s.replicaErr = fmt.Sprintf("Failed to dial peers: %s", strings.Join(replicaErrs, ", ")) + } + if s.Options.ReplicaErrCallback != nil { + s.Options.ReplicaErrCallback(replicas, s.replicaErr) + } + + //nolint:nilnil // we don't actually use the return value of the + // singleflight here + return nil, nil + }) +} + func (s *Server) handleRegisterFailure(err error) { if s.ctx.Err() != nil { return @@ -519,8 +584,14 @@ func (s *Server) healthReport(rw http.ResponseWriter, r *http.Request) { fmt.Sprintf("version mismatch: primary coderd (%s) != workspace proxy (%s)", primaryBuild.Version, buildinfo.Version())) } + s.replicaErrMut.Lock() + if s.replicaErr != "" { + report.Errors = append(report.Errors, "High availability networking: it appears you are running more than one replica of the proxy, but the replicas are unable to establish a mesh for networking: "+s.replicaErr) + } + s.replicaErrMut.Unlock() + // TODO: We should hit the deployment config endpoint and do some config - // checks. We can check the version from the X-CODER-BUILD-VERSION header + // checks. httpapi.Write(r.Context(), rw, http.StatusOK, report) } diff --git a/enterprise/wsproxy/wsproxy_test.go b/enterprise/wsproxy/wsproxy_test.go index e8fed4f35c594..00c482b1e5ed9 100644 --- a/enterprise/wsproxy/wsproxy_test.go +++ b/enterprise/wsproxy/wsproxy_test.go @@ -2,8 +2,11 @@ package wsproxy_test import ( "context" + "encoding/json" "fmt" "net" + "net/http" + "net/http/httptest" "net/url" "testing" "time" @@ -20,6 +23,7 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/agent/agenttest" + "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/cli/clibase" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/healthcheck/derphealth" @@ -29,6 +33,7 @@ import ( "github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/enterprise/coderd/coderdenttest" "github.com/coder/coder/v2/enterprise/coderd/license" + "github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk" "github.com/coder/coder/v2/provisioner/echo" "github.com/coder/coder/v2/testutil" ) @@ -128,21 +133,20 @@ func TestDERP(t *testing.T) { }) // Create a proxy that is never started. - createProxyCtx := testutil.Context(t, testutil.WaitLong) - _, err := client.CreateWorkspaceProxy(createProxyCtx, codersdk.CreateWorkspaceProxyRequest{ + ctx := testutil.Context(t, testutil.WaitLong) + _, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{ Name: "never-started-proxy", }) require.NoError(t, err) // Wait for both running proxies to become healthy. require.Eventually(t, func() bool { - healthCtx := testutil.Context(t, testutil.WaitLong) - err := api.ProxyHealth.ForceUpdate(healthCtx) + err := api.ProxyHealth.ForceUpdate(ctx) if !assert.NoError(t, err) { return false } - regions, err := client.Regions(healthCtx) + regions, err := client.Regions(ctx) if !assert.NoError(t, err) { return false } @@ -264,7 +268,8 @@ resourceLoop: t.Run("ConnectDERP", func(t *testing.T) { t.Parallel() - connInfo, err := client.WorkspaceAgentConnectionInfo(testutil.Context(t, testutil.WaitLong), agentID) + ctx := testutil.Context(t, testutil.WaitLong) + connInfo, err := client.WorkspaceAgentConnectionInfo(ctx, agentID) require.NoError(t, err) require.NotNil(t, connInfo.DERPMap) require.Len(t, connInfo.DERPMap.Regions, 3+len(api.DeploymentValues.DERP.Server.STUNAddresses.Value())) @@ -287,7 +292,6 @@ resourceLoop: OmitDefaultRegions: true, } - ctx := testutil.Context(t, testutil.WaitLong) report := derphealth.Report{} report.Run(ctx, &derphealth.ReportOptions{ DERPMap: derpMap, @@ -350,14 +354,14 @@ func TestDERPEndToEnd(t *testing.T) { }) // Wait for the proxy to become healthy. + ctx := testutil.Context(t, testutil.WaitLong) require.Eventually(t, func() bool { - healthCtx := testutil.Context(t, testutil.WaitLong) - err := api.ProxyHealth.ForceUpdate(healthCtx) + err := api.ProxyHealth.ForceUpdate(ctx) if !assert.NoError(t, err) { return false } - regions, err := client.Regions(healthCtx) + regions, err := client.Regions(ctx) if !assert.NoError(t, err) { return false } @@ -413,7 +417,6 @@ resourceLoop: _ = coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) // Connect to the workspace agent. - ctx := testutil.Context(t, testutil.WaitLong) conn, err := client.DialWorkspaceAgent(ctx, agentID, &codersdk.DialWorkspaceAgentOptions{ Logger: slogtest.Make(t, &slogtest.Options{ IgnoreErrors: true, @@ -534,6 +537,243 @@ func TestDERPMesh(t *testing.T) { } } +// TestWorkspaceProxyDERPMeshProbe ensures that each replica pings every other +// replica in the same region as itself periodically. +func TestWorkspaceProxyDERPMeshProbe(t *testing.T) { + t.Parallel() + createProxyRegion := func(ctx context.Context, t *testing.T, client *codersdk.Client, name string) codersdk.UpdateWorkspaceProxyResponse { + t.Helper() + proxyRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{ + Name: name, + Icon: "/emojis/flag.png", + }) + require.NoError(t, err, "failed to create workspace proxy") + return proxyRes + } + + registerBrokenProxy := func(ctx context.Context, t *testing.T, primaryAccessURL *url.URL, accessURL, token string) { + t.Helper() + // Create a HTTP server that always replies with 500. + srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(srv.Close) + + // Register a proxy. + wsproxyClient := wsproxysdk.New(primaryAccessURL) + wsproxyClient.SetSessionToken(token) + + hostname, err := cryptorand.String(6) + require.NoError(t, err) + _, err = wsproxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{ + AccessURL: accessURL, + WildcardHostname: "", + DerpEnabled: true, + DerpOnly: false, + ReplicaID: uuid.New(), + ReplicaHostname: hostname, + ReplicaError: "", + ReplicaRelayAddress: srv.URL, + Version: buildinfo.Version(), + }) + require.NoError(t, err) + } + + t.Run("ProbeOK", func(t *testing.T) { + t.Parallel() + + deploymentValues := coderdtest.DeploymentValues(t) + deploymentValues.Experiments = []string{ + "*", + } + + client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + DeploymentValues: deploymentValues, + AppHostname: "*.primary.test.coder.com", + IncludeProvisionerDaemon: true, + RealIPConfig: &httpmw.RealIPConfig{ + TrustedOrigins: []*net.IPNet{{ + IP: net.ParseIP("127.0.0.1"), + Mask: net.CIDRMask(8, 32), + }}, + TrustedHeaders: []string{ + "CF-Connecting-IP", + }, + }, + }, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureWorkspaceProxy: 1, + }, + }, + }) + t.Cleanup(func() { + _ = closer.Close() + }) + + // Register but don't start a proxy in a different region. This + // shouldn't affect the mesh since it's in a different region. + ctx := testutil.Context(t, testutil.WaitLong) + fakeProxyRes := createProxyRegion(ctx, t, client, "fake-proxy") + registerBrokenProxy(ctx, t, api.AccessURL, "https://fake-proxy.test.coder.com", fakeProxyRes.ProxyToken) + + proxyURL, err := url.Parse("https://proxy1.test.coder.com") + require.NoError(t, err) + + // Create 6 proxy replicas. + const count = 6 + var ( + sessionToken = "" + proxies = [count]coderdenttest.WorkspaceProxy{} + replicaPingDone = [count]bool{} + ) + for i := range proxies { + i := i + proxies[i] = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ + Name: "proxy-1", + Token: sessionToken, + ProxyURL: proxyURL, + ReplicaPingCallback: func(replicas []codersdk.Replica, err string) { + if len(replicas) != count-1 { + // Still warming up... + return + } + replicaPingDone[i] = true + assert.Emptyf(t, err, "replica %d ping callback error", i) + }, + }) + if i == 0 { + sessionToken = proxies[i].Options.ProxySessionToken + } + } + + // Force all proxies to re-register immediately. This ensures the DERP + // mesh is up-to-date. In production this will happen automatically + // after about 15 seconds. + for i, proxy := range proxies { + err := proxy.RegisterNow() + require.NoErrorf(t, err, "failed to force proxy %d to re-register", i) + } + + // Ensure that all proxies have pinged. + require.Eventually(t, func() bool { + ok := true + for i := range proxies { + if !replicaPingDone[i] { + t.Logf("replica %d has not pinged yet", i) + ok = false + } + } + return ok + }, testutil.WaitLong, testutil.IntervalSlow) + t.Log("all replicas have pinged") + + // Check they're all healthy according to /healthz-report. + for _, proxy := range proxies { + // GET /healthz-report + u := proxy.ServerURL.ResolveReference(&url.URL{Path: "/healthz-report"}) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + require.NoError(t, err) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + var respJSON codersdk.ProxyHealthReport + err = json.NewDecoder(resp.Body).Decode(&respJSON) + resp.Body.Close() + require.NoError(t, err) + + require.Empty(t, respJSON.Errors, "proxy is not healthy") + } + }) + + // Register one proxy, then pretend to register 5 others. This should cause + // the mesh to fail and return an error. + t.Run("ProbeFail", func(t *testing.T) { + t.Parallel() + + deploymentValues := coderdtest.DeploymentValues(t) + deploymentValues.Experiments = []string{ + "*", + } + + client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + DeploymentValues: deploymentValues, + AppHostname: "*.primary.test.coder.com", + IncludeProvisionerDaemon: true, + RealIPConfig: &httpmw.RealIPConfig{ + TrustedOrigins: []*net.IPNet{{ + IP: net.ParseIP("127.0.0.1"), + Mask: net.CIDRMask(8, 32), + }}, + TrustedHeaders: []string{ + "CF-Connecting-IP", + }, + }, + }, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureWorkspaceProxy: 1, + }, + }, + }) + t.Cleanup(func() { + _ = closer.Close() + }) + + proxyURL, err := url.Parse("https://proxy2.test.coder.com") + require.NoError(t, err) + + // Create 1 real proxy replica. + const fakeCount = 5 + replicaPingErr := make(chan string, 4) + proxy := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{ + Name: "proxy-2", + ProxyURL: proxyURL, + ReplicaPingCallback: func(replicas []codersdk.Replica, err string) { + if len(replicas) != fakeCount { + // Still warming up... + return + } + replicaPingErr <- err + }, + }) + + // Register (but don't start wsproxy.Server) 5 other proxies in the same + // region. Since they registered recently they should be included in the + // mesh. We create a HTTP server on the relay address that always + // responds with 500 so probes fail. + ctx := testutil.Context(t, testutil.WaitLong) + for i := 0; i < fakeCount; i++ { + registerBrokenProxy(ctx, t, api.AccessURL, proxyURL.String(), proxy.Options.ProxySessionToken) + } + + // Force the proxy to re-register immediately. + err = proxy.RegisterNow() + require.NoError(t, err, "failed to force proxy to re-register") + + // Wait for the ping to fail. + replicaErr := testutil.RequireRecvCtx(ctx, t, replicaPingErr) + require.NotEmpty(t, replicaErr, "replica ping error") + + // GET /healthz-report + u := proxy.ServerURL.ResolveReference(&url.URL{Path: "/healthz-report"}) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + require.NoError(t, err) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + var respJSON codersdk.ProxyHealthReport + err = json.NewDecoder(resp.Body).Decode(&respJSON) + resp.Body.Close() + require.NoError(t, err) + + require.Len(t, respJSON.Errors, 1, "proxy is healthy") + require.Contains(t, respJSON.Errors[0], "High availability networking") + }) +} + func TestWorkspaceProxyWorkspaceApps(t *testing.T) { t.Parallel() From 129dd4078199a66f60ac29eca4991a1da8864a48 Mon Sep 17 00:00:00 2001 From: Michael Smith Date: Thu, 15 Feb 2024 16:44:53 -0500 Subject: [PATCH 4/5] fix: improve clipboard support on HTTP connections and older browsers (#12178) * fix: add future-proofing for clipboard copies on http connections * docs: clean up comment formatting --- site/src/hooks/useClipboard.ts | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/site/src/hooks/useClipboard.ts b/site/src/hooks/useClipboard.ts index ed50a67f44e92..f1cd662361ad9 100644 --- a/site/src/hooks/useClipboard.ts +++ b/site/src/hooks/useClipboard.ts @@ -22,7 +22,7 @@ export const useClipboard = (textToCopy: string): UseClipboardResult => { setIsCopied(false); }, 1000); } catch (err) { - const isCopied = simulateClipboardWrite(); + const isCopied = simulateClipboardWrite(textToCopy); if (isCopied) { setIsCopied(true); timeoutIdRef.current = window.setTimeout(() => { @@ -44,12 +44,16 @@ export const useClipboard = (textToCopy: string): UseClipboardResult => { }; /** + * Provides a fallback clipboard method for when browsers do not have access + * to the clipboard API (the browser is older, or the deployment is only running + * on HTTP, when the clipboard API is only available in secure contexts). + * * It feels silly that you have to make a whole dummy input just to simulate a * clipboard, but that's really the recommended approach for older browsers. * * @see {@link https://web.dev/patterns/clipboard/copy-text?hl=en} */ -function simulateClipboardWrite(): boolean { +function simulateClipboardWrite(textToCopy: string): boolean { const previousFocusTarget = document.activeElement; const dummyInput = document.createElement("input"); @@ -70,12 +74,26 @@ function simulateClipboardWrite(): boolean { style.border = "0"; document.body.appendChild(dummyInput); + dummyInput.value = textToCopy; dummyInput.focus(); dummyInput.select(); - const isCopied = document.execCommand("copy"); - dummyInput.remove(); + /** + * The document.execCommand method is officially deprecated. Browsers are free + * to remove the method entirely or choose to turn it into a no-op function + * that always returns false. You cannot make any assumptions about how its + * core functionality will be removed. + * + * @see {@link https://developer.mozilla.org/en-US/docs/Web/API/Clipboard} + */ + let isCopied: boolean; + try { + isCopied = document?.execCommand("copy") ?? false; + } catch { + isCopied = false; + } + dummyInput.remove(); if (previousFocusTarget instanceof HTMLElement) { previousFocusTarget.focus(); } From 630ed643cb6aef971750d0b36d2b135b717b5d24 Mon Sep 17 00:00:00 2001 From: Ben Date: Sat, 9 Mar 2024 01:05:41 +0000 Subject: [PATCH 5/5] docs: add v2.8.5 changelog --- docs/changelogs/README.md | 6 +++--- docs/changelogs/v2.8.5.md | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v2.8.5.md diff --git a/docs/changelogs/README.md b/docs/changelogs/README.md index 1f14d11b38b26..3240a41bc0d50 100644 --- a/docs/changelogs/README.md +++ b/docs/changelogs/README.md @@ -13,8 +13,8 @@ git checkout main; git pull; git fetch --all export CODER_IGNORE_MISSING_COMMIT_METADATA=1 export BRANCH=main ./scripts/release/generate_release_notes.sh \ - --old-version=v2.7.1 \ - --new-version=v2.7.2 \ + --old-version=v2.8.0 \ + --new-version=v2.9.0 \ --ref=$(git rev-parse --short "${ref:-origin/$BRANCH}") \ - > ./docs/changelogs/v2.7.2.md + > ./docs/changelogs/v2.9.0.md ``` diff --git a/docs/changelogs/v2.8.5.md b/docs/changelogs/v2.8.5.md new file mode 100644 index 0000000000000..85a928d24d80d --- /dev/null +++ b/docs/changelogs/v2.8.5.md @@ -0,0 +1,16 @@ +## Changelog + +- Fixed an where some template updates caused workspaces to stop after 2 hours, even while in use (#12446) +- Add DERP mesh health checking to workspace proxies (#12220) (#12222) +- Improve clipboard support on HTTP connections and older browsers (#12178) + + +Compare: [`v2.8.4...v2.8.5`](https://github.com/coder/coder/compare/v2.8.4...v2.8.5) + +## Container image + +- `docker pull ghcr.io/coder/coder:v2.8.4` + +## Install/upgrade + +Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.