diff --git a/.golangci.yaml b/.golangci.yaml index f2ecce63da607..fd8946319ca1d 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -195,6 +195,11 @@ linters-settings: - name: var-naming - name: waitgroup-by-value + # irrelevant as of Go v1.22: https://go.dev/blog/loopvar-preview + govet: + disable: + - loopclosure + issues: # Rules listed here: https://github.com/securego/gosec#available-rules exclude-rules: diff --git a/cli/server.go b/cli/server.go index 79d2b132ad6e3..6a35e8aaa95ea 100644 --- a/cli/server.go +++ b/cli/server.go @@ -55,6 +55,11 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/sloghuman" + "github.com/coder/pretty" + "github.com/coder/retry" + "github.com/coder/serpent" + "github.com/coder/wgtunnel/tunnelsdk" + "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/cli/clilog" "github.com/coder/coder/v2/cli/cliui" @@ -64,6 +69,7 @@ import ( "github.com/coder/coder/v2/coderd/autobuild" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/awsiamrds" + "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/database/dbmetrics" "github.com/coder/coder/v2/coderd/database/dbpurge" @@ -73,6 +79,7 @@ import ( "github.com/coder/coder/v2/coderd/externalauth" "github.com/coder/coder/v2/coderd/gitsshkey" "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/notifications" "github.com/coder/coder/v2/coderd/oauthpki" "github.com/coder/coder/v2/coderd/prometheusmetrics" "github.com/coder/coder/v2/coderd/prometheusmetrics/insights" @@ -97,10 +104,6 @@ import ( "github.com/coder/coder/v2/provisionersdk" sdkproto "github.com/coder/coder/v2/provisionersdk/proto" "github.com/coder/coder/v2/tailnet" - "github.com/coder/pretty" - "github.com/coder/retry" - "github.com/coder/serpent" - "github.com/coder/wgtunnel/tunnelsdk" ) func createOIDCConfig(ctx context.Context, vals *codersdk.DeploymentValues) (*coderd.OIDCConfig, error) { @@ -592,6 +595,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. SSHConfigOptions: configSSHOptions, }, AllowWorkspaceRenames: vals.AllowWorkspaceRenames.Value(), + NotificationsEnqueuer: notifications.NewNoopEnqueuer(), // Changed further down if notifications enabled. } if httpServers.TLSConfig != nil { options.TLSCertificates = httpServers.TLSConfig.Certificates @@ -660,6 +664,10 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. options.OIDCConfig = oc } + experiments := coderd.ReadExperiments( + options.Logger, options.DeploymentValues.Experiments.Value(), + ) + // We'll read from this channel in the select below that tracks shutdown. If it remains // nil, that case of the select will just never fire, but it's important not to have a // "bare" read on this channel. @@ -969,6 +977,32 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. options.WorkspaceUsageTracker = tracker defer tracker.Close() + // Manage notifications. + var ( + notificationsManager *notifications.Manager + ) + if experiments.Enabled(codersdk.ExperimentNotifications) { + cfg := options.DeploymentValues.Notifications + + // The enqueuer is responsible for enqueueing notifications to the given store. + enqueuer, err := notifications.NewStoreEnqueuer(cfg, options.Database, templateHelpers(options), logger.Named("notifications.enqueuer")) + if err != nil { + return xerrors.Errorf("failed to instantiate notification store enqueuer: %w", err) + } + options.NotificationsEnqueuer = enqueuer + + // The notification manager is responsible for: + // - creating notifiers and managing their lifecycles (notifiers are responsible for dequeueing/sending notifications) + // - keeping the store updated with status updates + notificationsManager, err = notifications.NewManager(cfg, options.Database, logger.Named("notifications.manager")) + if err != nil { + return xerrors.Errorf("failed to instantiate notification manager: %w", err) + } + + // nolint:gocritic // TODO: create own role. + notificationsManager.Run(dbauthz.AsSystemRestricted(ctx)) + } + // Wrap the server in middleware that redirects to the access URL if // the request is not to a local IP. var handler http.Handler = coderAPI.RootHandler @@ -1049,10 +1083,10 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. case <-stopCtx.Done(): exitErr = stopCtx.Err() waitForProvisionerJobs = true - _, _ = io.WriteString(inv.Stdout, cliui.Bold("Stop caught, waiting for provisioner jobs to complete and gracefully exiting. Use ctrl+\\ to force quit")) + _, _ = io.WriteString(inv.Stdout, cliui.Bold("Stop caught, waiting for provisioner jobs to complete and gracefully exiting. Use ctrl+\\ to force quit\n")) case <-interruptCtx.Done(): exitErr = interruptCtx.Err() - _, _ = io.WriteString(inv.Stdout, cliui.Bold("Interrupt caught, gracefully exiting. Use ctrl+\\ to force quit")) + _, _ = io.WriteString(inv.Stdout, cliui.Bold("Interrupt caught, gracefully exiting. Use ctrl+\\ to force quit\n")) case <-tunnelDone: exitErr = xerrors.New("dev tunnel closed unexpectedly") case <-pubsubWatchdogTimeout: @@ -1088,6 +1122,21 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. // Cancel any remaining in-flight requests. shutdownConns() + if notificationsManager != nil { + // Stop the notification manager, which will cause any buffered updates to the store to be flushed. + // If the Stop() call times out, messages that were sent but not reflected as such in the store will have + // their leases expire after a period of time and will be re-queued for sending. + // See CODER_NOTIFICATIONS_LEASE_PERIOD. + cliui.Info(inv.Stdout, "Shutting down notifications manager..."+"\n") + err = shutdownWithTimeout(notificationsManager.Stop, 5*time.Second) + if err != nil { + cliui.Warnf(inv.Stderr, "Notifications manager shutdown took longer than 5s, "+ + "this may result in duplicate notifications being sent: %s\n", err) + } else { + cliui.Info(inv.Stdout, "Gracefully shut down notifications manager\n") + } + } + // Shut down provisioners before waiting for WebSockets // connections to close. var wg sync.WaitGroup @@ -1227,6 +1276,15 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. return serverCmd } +// templateHelpers builds a set of functions which can be called in templates. +// We build them here to avoid an import cycle by using coderd.Options in notifications.Manager. +// We can later use this to inject whitelabel fields when app name / logo URL are overridden. +func templateHelpers(options *coderd.Options) map[string]any { + return map[string]any{ + "base_url": func() string { return options.AccessURL.String() }, + } +} + // printDeprecatedOptions loops through all command options, and prints // a warning for usage of deprecated options. func PrintDeprecatedOptions() serpent.MiddlewareFunc { diff --git a/cli/testdata/coder_server_--help.golden b/cli/testdata/coder_server_--help.golden index acd2c62ead445..d3bd1b587260a 100644 --- a/cli/testdata/coder_server_--help.golden +++ b/cli/testdata/coder_server_--help.golden @@ -326,6 +326,30 @@ can safely ignore these settings. Minimum supported version of TLS. Accepted values are "tls10", "tls11", "tls12" or "tls13". +NOTIFICATIONS OPTIONS: + --notifications-dispatch-timeout duration, $CODER_NOTIFICATIONS_DISPATCH_TIMEOUT (default: 1m0s) + How long to wait while a notification is being sent before giving up. + + --notifications-max-send-attempts int, $CODER_NOTIFICATIONS_MAX_SEND_ATTEMPTS (default: 5) + The upper limit of attempts to send a notification. + + --notifications-method string, $CODER_NOTIFICATIONS_METHOD (default: smtp) + Which delivery method to use (available options: 'smtp', 'webhook'). + +NOTIFICATIONS / EMAIL OPTIONS: + --notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM + The sender's address to use. + + --notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost) + The hostname identifying the SMTP server. + + --notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587) + The intermediary SMTP host through which emails are sent. + +NOTIFICATIONS / WEBHOOK OPTIONS: + --notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT + The endpoint to which to send webhooks. + OAUTH2 / GITHUB OPTIONS: --oauth2-github-allow-everyone bool, $CODER_OAUTH2_GITHUB_ALLOW_EVERYONE Allow all logins, setting this option means allowed orgs and teams diff --git a/cli/testdata/server-config.yaml.golden b/cli/testdata/server-config.yaml.golden index 9a34d6be56b20..b00fda26c2a7d 100644 --- a/cli/testdata/server-config.yaml.golden +++ b/cli/testdata/server-config.yaml.golden @@ -493,3 +493,58 @@ userQuietHoursSchedule: # compatibility reasons, this will be removed in a future release. # (default: false, type: bool) allowWorkspaceRenames: false +notifications: + # Which delivery method to use (available options: 'smtp', 'webhook'). + # (default: smtp, type: string) + method: smtp + # How long to wait while a notification is being sent before giving up. + # (default: 1m0s, type: duration) + dispatch-timeout: 1m0s + email: + # The sender's address to use. + # (default: , type: string) + from: "" + # The intermediary SMTP host through which emails are sent. + # (default: localhost:587, type: host:port) + smarthost: localhost:587 + # The hostname identifying the SMTP server. + # (default: localhost, type: string) + hello: localhost + webhook: + # The endpoint to which to send webhooks. + # (default: , type: url) + hello: + # The upper limit of attempts to send a notification. + # (default: 5, type: int) + max-send-attempts: 5 + # The minimum time between retries. + # (default: 5m0s, type: duration) + retry-interval: 5m0s + # The notifications system buffers message updates in memory to ease pressure on + # the database. This option controls how often it synchronizes its state with the + # database. The shorter this value the lower the change of state inconsistency in + # a non-graceful shutdown - but it also increases load on the database. It is + # recommended to keep this option at its default value. + # (default: 2s, type: duration) + store-sync-interval: 2s + # The notifications system buffers message updates in memory to ease pressure on + # the database. This option controls how many updates are kept in memory. The + # lower this value the lower the change of state inconsistency in a non-graceful + # shutdown - but it also increases load on the database. It is recommended to keep + # this option at its default value. + # (default: 50, type: int) + store-sync-buffer-size: 50 + # How long a notifier should lease a message. This is effectively how long a + # notification is 'owned' by a notifier, and once this period expires it will be + # available for lease by another notifier. Leasing is important in order for + # multiple running notifiers to not pick the same messages to deliver + # concurrently. This lease period will only expire if a notifier shuts down + # ungracefully; a dispatch of the notification releases the lease. + # (default: 2m0s, type: duration) + lease-period: 2m0s + # How many notifications a notifier should lease per fetch interval. + # (default: 20, type: int) + lease-count: 20 + # How often to query the database for queued notifications. + # (default: 15s, type: duration) + fetch-interval: 15s diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index cb59b53023644..538d67b81fc2d 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -9200,6 +9200,9 @@ const docTemplate = `{ "metrics_cache_refresh_interval": { "type": "integer" }, + "notifications": { + "$ref": "#/definitions/codersdk.NotificationsConfig" + }, "oauth2": { "$ref": "#/definitions/codersdk.OAuth2Config" }, @@ -9377,20 +9380,23 @@ const docTemplate = `{ "auto-fill-parameters", "multi-organization", "custom-roles", + "notifications", "workspace-usage" ], "x-enum-comments": { "ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.", - "ExperimentCustomRoles": "Allows creating runtime custom roles", + "ExperimentCustomRoles": "Allows creating runtime custom roles.", "ExperimentExample": "This isn't used for anything.", "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed.", - "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking" + "ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.", + "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking." }, "x-enum-varnames": [ "ExperimentExample", "ExperimentAutoFillParameters", "ExperimentMultiOrganization", "ExperimentCustomRoles", + "ExperimentNotifications", "ExperimentWorkspaceUsage" ] }, @@ -9925,6 +9931,97 @@ const docTemplate = `{ } } }, + "codersdk.NotificationsConfig": { + "type": "object", + "properties": { + "dispatch_timeout": { + "description": "How long to wait while a notification is being sent before giving up.", + "type": "integer" + }, + "email": { + "description": "SMTP settings.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.NotificationsEmailConfig" + } + ] + }, + "fetch_interval": { + "description": "How often to query the database for queued notifications.", + "type": "integer" + }, + "lease_count": { + "description": "How many notifications a notifier should lease per fetch interval.", + "type": "integer" + }, + "lease_period": { + "description": "How long a notifier should lease a message. This is effectively how long a notification is 'owned'\nby a notifier, and once this period expires it will be available for lease by another notifier. Leasing\nis important in order for multiple running notifiers to not pick the same messages to deliver concurrently.\nThis lease period will only expire if a notifier shuts down ungracefully; a dispatch of the notification\nreleases the lease.", + "type": "integer" + }, + "max_send_attempts": { + "description": "The upper limit of attempts to send a notification.", + "type": "integer" + }, + "method": { + "description": "Which delivery method to use (available options: 'smtp', 'webhook').", + "type": "string" + }, + "retry_interval": { + "description": "The minimum time between retries.", + "type": "integer" + }, + "sync_buffer_size": { + "description": "The notifications system buffers message updates in memory to ease pressure on the database.\nThis option controls how many updates are kept in memory. The lower this value the\nlower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the\ndatabase. It is recommended to keep this option at its default value.", + "type": "integer" + }, + "sync_interval": { + "description": "The notifications system buffers message updates in memory to ease pressure on the database.\nThis option controls how often it synchronizes its state with the database. The shorter this value the\nlower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the\ndatabase. It is recommended to keep this option at its default value.", + "type": "integer" + }, + "webhook": { + "description": "Webhook settings.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.NotificationsWebhookConfig" + } + ] + } + } + }, + "codersdk.NotificationsEmailConfig": { + "type": "object", + "properties": { + "from": { + "description": "The sender's address.", + "type": "string" + }, + "hello": { + "description": "The hostname identifying the SMTP server.", + "type": "string" + }, + "smarthost": { + "description": "The intermediary SMTP host through which emails are sent (host:port).", + "allOf": [ + { + "$ref": "#/definitions/serpent.HostPort" + } + ] + } + } + }, + "codersdk.NotificationsWebhookConfig": { + "type": "object", + "properties": { + "endpoint": { + "description": "The URL to which the payload will be sent with an HTTP POST request.", + "allOf": [ + { + "$ref": "#/definitions/serpent.URL" + } + ] + } + } + }, "codersdk.OAuth2AppEndpoints": { "type": "object", "properties": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index ee6dde53c0258..49dfde7a6b651 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -8220,6 +8220,9 @@ "metrics_cache_refresh_interval": { "type": "integer" }, + "notifications": { + "$ref": "#/definitions/codersdk.NotificationsConfig" + }, "oauth2": { "$ref": "#/definitions/codersdk.OAuth2Config" }, @@ -8393,20 +8396,23 @@ "auto-fill-parameters", "multi-organization", "custom-roles", + "notifications", "workspace-usage" ], "x-enum-comments": { "ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.", - "ExperimentCustomRoles": "Allows creating runtime custom roles", + "ExperimentCustomRoles": "Allows creating runtime custom roles.", "ExperimentExample": "This isn't used for anything.", "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed.", - "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking" + "ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.", + "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking." }, "x-enum-varnames": [ "ExperimentExample", "ExperimentAutoFillParameters", "ExperimentMultiOrganization", "ExperimentCustomRoles", + "ExperimentNotifications", "ExperimentWorkspaceUsage" ] }, @@ -8894,6 +8900,97 @@ } } }, + "codersdk.NotificationsConfig": { + "type": "object", + "properties": { + "dispatch_timeout": { + "description": "How long to wait while a notification is being sent before giving up.", + "type": "integer" + }, + "email": { + "description": "SMTP settings.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.NotificationsEmailConfig" + } + ] + }, + "fetch_interval": { + "description": "How often to query the database for queued notifications.", + "type": "integer" + }, + "lease_count": { + "description": "How many notifications a notifier should lease per fetch interval.", + "type": "integer" + }, + "lease_period": { + "description": "How long a notifier should lease a message. This is effectively how long a notification is 'owned'\nby a notifier, and once this period expires it will be available for lease by another notifier. Leasing\nis important in order for multiple running notifiers to not pick the same messages to deliver concurrently.\nThis lease period will only expire if a notifier shuts down ungracefully; a dispatch of the notification\nreleases the lease.", + "type": "integer" + }, + "max_send_attempts": { + "description": "The upper limit of attempts to send a notification.", + "type": "integer" + }, + "method": { + "description": "Which delivery method to use (available options: 'smtp', 'webhook').", + "type": "string" + }, + "retry_interval": { + "description": "The minimum time between retries.", + "type": "integer" + }, + "sync_buffer_size": { + "description": "The notifications system buffers message updates in memory to ease pressure on the database.\nThis option controls how many updates are kept in memory. The lower this value the\nlower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the\ndatabase. It is recommended to keep this option at its default value.", + "type": "integer" + }, + "sync_interval": { + "description": "The notifications system buffers message updates in memory to ease pressure on the database.\nThis option controls how often it synchronizes its state with the database. The shorter this value the\nlower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the\ndatabase. It is recommended to keep this option at its default value.", + "type": "integer" + }, + "webhook": { + "description": "Webhook settings.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.NotificationsWebhookConfig" + } + ] + } + } + }, + "codersdk.NotificationsEmailConfig": { + "type": "object", + "properties": { + "from": { + "description": "The sender's address.", + "type": "string" + }, + "hello": { + "description": "The hostname identifying the SMTP server.", + "type": "string" + }, + "smarthost": { + "description": "The intermediary SMTP host through which emails are sent (host:port).", + "allOf": [ + { + "$ref": "#/definitions/serpent.HostPort" + } + ] + } + } + }, + "codersdk.NotificationsWebhookConfig": { + "type": "object", + "properties": { + "endpoint": { + "description": "The URL to which the payload will be sent with an HTTP POST request.", + "allOf": [ + { + "$ref": "#/definitions/serpent.URL" + } + ] + } + } + }, "codersdk.OAuth2AppEndpoints": { "type": "object", "properties": { diff --git a/coderd/coderd.go b/coderd/coderd.go index 5dd9b3f171654..97b8a9337631a 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -37,6 +37,9 @@ import ( "tailscale.com/util/singleflight" "cdr.dev/slog" + "github.com/coder/quartz" + "github.com/coder/serpent" + agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/buildinfo" _ "github.com/coder/coder/v2/coderd/apidoc" // Used for swagger docs. @@ -55,6 +58,7 @@ import ( "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/metricscache" + "github.com/coder/coder/v2/coderd/notifications" "github.com/coder/coder/v2/coderd/portsharing" "github.com/coder/coder/v2/coderd/prometheusmetrics" "github.com/coder/coder/v2/coderd/provisionerdserver" @@ -75,8 +79,6 @@ import ( "github.com/coder/coder/v2/provisionersdk" "github.com/coder/coder/v2/site" "github.com/coder/coder/v2/tailnet" - "github.com/coder/quartz" - "github.com/coder/serpent" ) // We must only ever instantiate one httpSwagger.Handler because of a data race @@ -232,6 +234,8 @@ type Options struct { DatabaseRolluper *dbrollup.Rolluper // WorkspaceUsageTracker tracks workspace usage by the CLI. WorkspaceUsageTracker *workspacestats.UsageTracker + // NotificationsEnqueuer handles enqueueing notifications for delivery by SMTP, webhook, etc. + NotificationsEnqueuer notifications.Enqueuer } // @title Coder API @@ -420,6 +424,10 @@ func New(options *Options) *API { ) } + if options.NotificationsEnqueuer == nil { + options.NotificationsEnqueuer = notifications.NewNoopEnqueuer() + } + ctx, cancel := context.WithCancel(context.Background()) r := chi.NewRouter() @@ -1491,6 +1499,7 @@ func (api *API) CreateInMemoryTaggedProvisionerDaemon(dialCtx context.Context, n OIDCConfig: api.OIDCConfig, ExternalAuthConfigs: api.ExternalAuthConfigs, }, + api.NotificationsEnqueuer, ) if err != nil { return nil, err diff --git a/coderd/database/db2sdk/db2sdk.go b/coderd/database/db2sdk/db2sdk.go index 6734dac38d8c3..53e0cd53ad3e9 100644 --- a/coderd/database/db2sdk/db2sdk.go +++ b/coderd/database/db2sdk/db2sdk.go @@ -16,8 +16,8 @@ import ( "tailscale.com/tailcfg" "github.com/coder/coder/v2/coderd/database" - "github.com/coder/coder/v2/coderd/parameter" "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/render" "github.com/coder/coder/v2/coderd/workspaceapps/appurl" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisionersdk/proto" @@ -106,7 +106,7 @@ func TemplateVersionParameter(param database.TemplateVersionParameter) (codersdk return codersdk.TemplateVersionParameter{}, err } - descriptionPlaintext, err := parameter.Plaintext(param.Description) + descriptionPlaintext, err := render.PlaintextFromMarkdown(param.Description) if err != nil { return codersdk.TemplateVersionParameter{}, err } @@ -244,7 +244,7 @@ func TemplateInsightsParameters(parameterRows []database.GetTemplateParameterIns return nil, err } - plaintextDescription, err := parameter.Plaintext(param.Description) + plaintextDescription, err := render.PlaintextFromMarkdown(param.Description) if err != nil { return nil, err } diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 098922527c81f..67dadd5d74e19 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -17,6 +17,7 @@ import ( "github.com/open-policy-agent/opa/topdown" "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/coderd/rbac/rolestore" @@ -1471,6 +1472,13 @@ func (q *querier) GetLogoURL(ctx context.Context) (string, error) { return q.db.GetLogoURL(ctx) } +func (q *querier) GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) { + if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil { + return nil, err + } + return q.db.GetNotificationMessagesByStatus(ctx, arg) +} + func (q *querier) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (database.OAuth2ProviderApp, error) { if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceOauth2App); err != nil { return database.OAuth2ProviderApp{}, err diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index 3b663d3fa9561..d85192877f87a 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -13,6 +13,7 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/codersdk" @@ -2486,13 +2487,21 @@ func (s *MethodTestSuite) TestSystemFunctions() { s.Run("EnqueueNotificationMessage", s.Subtest(func(db database.Store, check *expects) { // TODO: update this test once we have a specific role for notifications check.Args(database.EnqueueNotificationMessageParams{ - Method: database.NotificationMethodWebhook, + Method: database.NotificationMethodWebhook, + Payload: []byte("{}"), }).Asserts(rbac.ResourceSystem, policy.ActionCreate) })) s.Run("FetchNewMessageMetadata", s.Subtest(func(db database.Store, check *expects) { // TODO: update this test once we have a specific role for notifications check.Args(database.FetchNewMessageMetadataParams{}).Asserts(rbac.ResourceSystem, policy.ActionRead) })) + s.Run("GetNotificationMessagesByStatus", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.GetNotificationMessagesByStatusParams{ + Status: database.NotificationMessageStatusLeased, + Limit: 10, + }).Asserts(rbac.ResourceSystem, policy.ActionRead) + })) } func (s *MethodTestSuite) TestOAuth2ProviderApps() { diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go index ec7becdfd39c9..3db958cb9a307 100644 --- a/coderd/database/dbmem/dbmem.go +++ b/coderd/database/dbmem/dbmem.go @@ -21,6 +21,8 @@ import ( "golang.org/x/exp/slices" "golang.org/x/xerrors" + "github.com/coder/coder/v2/coderd/notifications/types" + "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/rbac" @@ -62,6 +64,7 @@ func New() database.Store { auditLogs: make([]database.AuditLog, 0), files: make([]database.File, 0), gitSSHKey: make([]database.GitSSHKey, 0), + notificationMessages: make([]database.NotificationMessage, 0), parameterSchemas: make([]database.ParameterSchema, 0), provisionerDaemons: make([]database.ProvisionerDaemon, 0), workspaceAgents: make([]database.WorkspaceAgent, 0), @@ -156,6 +159,7 @@ type data struct { groups []database.Group jfrogXRayScans []database.JfrogXrayScan licenses []database.License + notificationMessages []database.NotificationMessage oauth2ProviderApps []database.OAuth2ProviderApp oauth2ProviderAppSecrets []database.OAuth2ProviderAppSecret oauth2ProviderAppCodes []database.OAuth2ProviderAppCode @@ -917,13 +921,45 @@ func (*FakeQuerier) AcquireLock(_ context.Context, _ int64) error { return xerrors.New("AcquireLock must only be called within a transaction") } -func (*FakeQuerier) AcquireNotificationMessages(_ context.Context, arg database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { +// AcquireNotificationMessages implements the *basic* business logic, but is *not* exhaustive or meant to be 1:1 with +// the real AcquireNotificationMessages query. +func (q *FakeQuerier) AcquireNotificationMessages(_ context.Context, arg database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { err := validateDatabaseType(arg) if err != nil { return nil, err } - // nolint:nilnil // Irrelevant. - return nil, nil + + q.mutex.Lock() + defer q.mutex.Unlock() + + var out []database.AcquireNotificationMessagesRow + for _, nm := range q.notificationMessages { + if len(out) >= int(arg.Count) { + break + } + + acquirableStatuses := []database.NotificationMessageStatus{database.NotificationMessageStatusPending, database.NotificationMessageStatusTemporaryFailure} + if !slices.Contains(acquirableStatuses, nm.Status) { + continue + } + + // Mimic mutation in database query. + nm.UpdatedAt = sql.NullTime{Time: dbtime.Now(), Valid: true} + nm.Status = database.NotificationMessageStatusLeased + nm.StatusReason = sql.NullString{String: fmt.Sprintf("Enqueued by notifier %d", arg.NotifierID), Valid: true} + nm.LeasedUntil = sql.NullTime{Time: dbtime.Now().Add(time.Second * time.Duration(arg.LeaseSeconds)), Valid: true} + + out = append(out, database.AcquireNotificationMessagesRow{ + ID: nm.ID, + Payload: nm.Payload, + Method: nm.Method, + CreatedBy: nm.CreatedBy, + TitleTemplate: "This is a title with {{.Labels.variable}}", + BodyTemplate: "This is a body with {{.Labels.variable}}", + }) + } + + return out, nil } func (q *FakeQuerier) AcquireProvisionerJob(_ context.Context, arg database.AcquireProvisionerJobParams) (database.ProvisionerJob, error) { @@ -1776,12 +1812,37 @@ func (q *FakeQuerier) DeleteWorkspaceAgentPortSharesByTemplate(_ context.Context return nil } -func (*FakeQuerier) EnqueueNotificationMessage(_ context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { +func (q *FakeQuerier) EnqueueNotificationMessage(_ context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { err := validateDatabaseType(arg) if err != nil { return database.NotificationMessage{}, err } - return database.NotificationMessage{}, nil + + q.mutex.Lock() + defer q.mutex.Unlock() + + var payload types.MessagePayload + err = json.Unmarshal(arg.Payload, &payload) + if err != nil { + return database.NotificationMessage{}, err + } + + nm := database.NotificationMessage{ + ID: arg.ID, + UserID: arg.UserID, + Method: arg.Method, + Payload: arg.Payload, + NotificationTemplateID: arg.NotificationTemplateID, + Targets: arg.Targets, + CreatedBy: arg.CreatedBy, + // Default fields. + CreatedAt: dbtime.Now(), + Status: database.NotificationMessageStatusPending, + } + + q.notificationMessages = append(q.notificationMessages, nm) + + return nm, err } func (q *FakeQuerier) FavoriteWorkspace(_ context.Context, arg uuid.UUID) error { @@ -1808,7 +1869,19 @@ func (*FakeQuerier) FetchNewMessageMetadata(_ context.Context, arg database.Fetc if err != nil { return database.FetchNewMessageMetadataRow{}, err } - return database.FetchNewMessageMetadataRow{}, nil + + actions, err := json.Marshal([]types.TemplateAction{{URL: "http://xyz.com", Label: "XYZ"}}) + if err != nil { + return database.FetchNewMessageMetadataRow{}, err + } + + return database.FetchNewMessageMetadataRow{ + UserEmail: "test@test.com", + UserName: "Testy McTester", + NotificationName: "Some notification", + Actions: actions, + UserID: arg.UserID, + }, nil } func (q *FakeQuerier) GetAPIKeyByID(_ context.Context, id string) (database.APIKey, error) { @@ -2667,6 +2740,26 @@ func (q *FakeQuerier) GetLogoURL(_ context.Context) (string, error) { return q.logoURL, nil } +func (q *FakeQuerier) GetNotificationMessagesByStatus(_ context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) { + err := validateDatabaseType(arg) + if err != nil { + return nil, err + } + + var out []database.NotificationMessage + for _, m := range q.notificationMessages { + if len(out) > int(arg.Limit) { + return out, nil + } + + if m.Status == arg.Status { + out = append(out, m) + } + } + + return out, nil +} + func (q *FakeQuerier) GetOAuth2ProviderAppByID(_ context.Context, id uuid.UUID) (database.OAuth2ProviderApp, error) { q.mutex.Lock() defer q.mutex.Unlock() diff --git a/coderd/database/dbmetrics/dbmetrics.go b/coderd/database/dbmetrics/dbmetrics.go index fbaf7d4fc0b4e..0a7ecd4fb5f10 100644 --- a/coderd/database/dbmetrics/dbmetrics.go +++ b/coderd/database/dbmetrics/dbmetrics.go @@ -732,6 +732,13 @@ func (m metricsStore) GetLogoURL(ctx context.Context) (string, error) { return url, err } +func (m metricsStore) GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) { + start := time.Now() + r0, r1 := m.s.GetNotificationMessagesByStatus(ctx, arg) + m.queryLatencies.WithLabelValues("GetNotificationMessagesByStatus").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (database.OAuth2ProviderApp, error) { start := time.Now() r0, r1 := m.s.GetOAuth2ProviderAppByID(ctx, id) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index 7f00a57587216..982a6472ec16c 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -1452,6 +1452,21 @@ func (mr *MockStoreMockRecorder) GetLogoURL(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogoURL", reflect.TypeOf((*MockStore)(nil).GetLogoURL), arg0) } +// GetNotificationMessagesByStatus mocks base method. +func (m *MockStore) GetNotificationMessagesByStatus(arg0 context.Context, arg1 database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNotificationMessagesByStatus", arg0, arg1) + ret0, _ := ret[0].([]database.NotificationMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNotificationMessagesByStatus indicates an expected call of GetNotificationMessagesByStatus. +func (mr *MockStoreMockRecorder) GetNotificationMessagesByStatus(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotificationMessagesByStatus", reflect.TypeOf((*MockStore)(nil).GetNotificationMessagesByStatus), arg0, arg1) +} + // GetOAuth2ProviderAppByID mocks base method. func (m *MockStore) GetOAuth2ProviderAppByID(arg0 context.Context, arg1 uuid.UUID) (database.OAuth2ProviderApp, error) { m.ctrl.T.Helper() diff --git a/coderd/database/migrations/000221_notifications.up.sql b/coderd/database/migrations/000221_notifications.up.sql index 567ed87d80764..29a6b912d3e20 100644 --- a/coderd/database/migrations/000221_notifications.up.sql +++ b/coderd/database/migrations/000221_notifications.up.sql @@ -52,7 +52,7 @@ CREATE INDEX idx_notification_messages_status ON notification_messages (status); -- TODO: autogenerate constants which reference the UUIDs INSERT INTO notification_templates (id, name, title_template, body_template, "group", actions) VALUES ('f517da0b-cdc9-410f-ab89-a86107c420ed', 'Workspace Deleted', E'Workspace "{{.Labels.name}}" deleted', - E'Hi {{.UserName}}\n\nYour workspace **{{.Labels.name}}** was deleted.\nThe specified reason was "**{{.Labels.reason}}**".', + E'Hi {{.UserName}}\n\nYour workspace **{{.Labels.name}}** was deleted.\nThe specified reason was "**{{.Labels.reason}}{{ if .Labels.initiator }} ({{ .Labels.initiator }}){{end}}**".', 'Workspace Events', '[ { "label": "View workspaces", diff --git a/coderd/database/querier.go b/coderd/database/querier.go index 179a5e06039ff..75ade1dc12e5e 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -160,6 +160,7 @@ type sqlcQuerier interface { GetLicenseByID(ctx context.Context, id int32) (License, error) GetLicenses(ctx context.Context) ([]License, error) GetLogoURL(ctx context.Context) (string, error) + GetNotificationMessagesByStatus(ctx context.Context, arg GetNotificationMessagesByStatusParams) ([]NotificationMessage, error) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderApp, error) GetOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderAppCode, error) GetOAuth2ProviderAppCodeByPrefix(ctx context.Context, secretPrefix []byte) (OAuth2ProviderAppCode, error) diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index cd48412c2ff40..95f25ee1dbd11 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -3579,6 +3579,53 @@ func (q *sqlQuerier) FetchNewMessageMetadata(ctx context.Context, arg FetchNewMe return i, err } +const getNotificationMessagesByStatus = `-- name: GetNotificationMessagesByStatus :many +SELECT id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after FROM notification_messages WHERE status = $1 LIMIT $2::int +` + +type GetNotificationMessagesByStatusParams struct { + Status NotificationMessageStatus `db:"status" json:"status"` + Limit int32 `db:"limit" json:"limit"` +} + +func (q *sqlQuerier) GetNotificationMessagesByStatus(ctx context.Context, arg GetNotificationMessagesByStatusParams) ([]NotificationMessage, error) { + rows, err := q.db.QueryContext(ctx, getNotificationMessagesByStatus, arg.Status, arg.Limit) + if err != nil { + return nil, err + } + defer rows.Close() + var items []NotificationMessage + for rows.Next() { + var i NotificationMessage + if err := rows.Scan( + &i.ID, + &i.NotificationTemplateID, + &i.UserID, + &i.Method, + &i.Status, + &i.StatusReason, + &i.CreatedBy, + &i.Payload, + &i.AttemptCount, + pq.Array(&i.Targets), + &i.CreatedAt, + &i.UpdatedAt, + &i.LeasedUntil, + &i.NextRetryAfter, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const deleteOAuth2ProviderAppByID = `-- name: DeleteOAuth2ProviderAppByID :exec DELETE FROM oauth2_provider_apps WHERE id = $1 ` diff --git a/coderd/database/queries/notifications.sql b/coderd/database/queries/notifications.sql index 8cc31e0661927..2949c8f86e27b 100644 --- a/coderd/database/queries/notifications.sql +++ b/coderd/database/queries/notifications.sql @@ -125,3 +125,6 @@ WHERE id IN FROM notification_messages AS nested WHERE nested.updated_at < NOW() - INTERVAL '7 days'); +-- name: GetNotificationMessagesByStatus :many +SELECT * FROM notification_messages WHERE status = @status LIMIT sqlc.arg('limit')::int; + diff --git a/coderd/notifications/dispatch/smtp.go b/coderd/notifications/dispatch/smtp.go new file mode 100644 index 0000000000000..9473a1666974d --- /dev/null +++ b/coderd/notifications/dispatch/smtp.go @@ -0,0 +1,332 @@ +package dispatch + +import ( + "bytes" + "context" + _ "embed" + "fmt" + "mime/multipart" + "mime/quotedprintable" + "net" + "net/mail" + "net/smtp" + "net/textproto" + "os" + "strings" + "time" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "cdr.dev/slog" + + "github.com/coder/coder/v2/coderd/notifications/render" + "github.com/coder/coder/v2/coderd/notifications/types" + markdown "github.com/coder/coder/v2/coderd/render" + "github.com/coder/coder/v2/codersdk" +) + +var ( + ValidationNoFromAddressErr = xerrors.New("no 'from' address defined") + ValidationNoToAddressErr = xerrors.New("no 'to' address(es) defined") + ValidationNoSmarthostHostErr = xerrors.New("smarthost 'host' is not defined, or is invalid") + ValidationNoSmarthostPortErr = xerrors.New("smarthost 'port' is not defined, or is invalid") + ValidationNoHelloErr = xerrors.New("'hello' not defined") + + //go:embed smtp/html.gotmpl + htmlTemplate string + //go:embed smtp/plaintext.gotmpl + plainTemplate string +) + +// SMTPHandler is responsible for dispatching notification messages via SMTP. +// NOTE: auth and TLS is currently *not* enabled in this initial thin slice. +// TODO: implement auth +// TODO: implement TLS +type SMTPHandler struct { + cfg codersdk.NotificationsEmailConfig + log slog.Logger +} + +func NewSMTPHandler(cfg codersdk.NotificationsEmailConfig, log slog.Logger) *SMTPHandler { + return &SMTPHandler{cfg: cfg, log: log} +} + +func (s *SMTPHandler) Dispatcher(payload types.MessagePayload, titleTmpl, bodyTmpl string) (DeliveryFunc, error) { + // First render the subject & body into their own discrete strings. + subject, err := markdown.PlaintextFromMarkdown(titleTmpl) + if err != nil { + return nil, xerrors.Errorf("render subject: %w", err) + } + + htmlBody := markdown.HTMLFromMarkdown(bodyTmpl) + plainBody, err := markdown.PlaintextFromMarkdown(bodyTmpl) + if err != nil { + return nil, xerrors.Errorf("render plaintext body: %w", err) + } + + // Then, reuse these strings in the HTML & plain body templates. + payload.Labels["_subject"] = subject + payload.Labels["_body"] = htmlBody + htmlBody, err = render.GoTemplate(htmlTemplate, payload, nil) + if err != nil { + return nil, xerrors.Errorf("render full html template: %w", err) + } + payload.Labels["_body"] = plainBody + plainBody, err = render.GoTemplate(plainTemplate, payload, nil) + if err != nil { + return nil, xerrors.Errorf("render full plaintext template: %w", err) + } + + return s.dispatch(subject, htmlBody, plainBody, payload.UserEmail), nil +} + +// dispatch returns a DeliveryFunc capable of delivering a notification via SMTP. +// +// NOTE: this is heavily inspired by Alertmanager's email notifier: +// https://github.com/prometheus/alertmanager/blob/342f6a599ce16c138663f18ed0b880e777c3017d/notify/email/email.go +func (s *SMTPHandler) dispatch(subject, htmlBody, plainBody, to string) DeliveryFunc { + return func(ctx context.Context, msgID uuid.UUID) (bool, error) { + select { + case <-ctx.Done(): + return false, ctx.Err() + default: + } + + var ( + c *smtp.Client + conn net.Conn + err error + ) + + s.log.Debug(ctx, "dispatching via SMTP", slog.F("msg_id", msgID)) + + // Dial the smarthost to establish a connection. + smarthost, smarthostPort, err := s.smarthost() + if err != nil { + return false, xerrors.Errorf("'smarthost' validation: %w", err) + } + if smarthostPort == "465" { + return false, xerrors.New("TLS is not currently supported") + } + + var d net.Dialer + // Outer context has a deadline (see CODER_NOTIFICATIONS_DISPATCH_TIMEOUT). + conn, err = d.DialContext(ctx, "tcp", fmt.Sprintf("%s:%s", smarthost, smarthostPort)) + if err != nil { + return true, xerrors.Errorf("establish connection to server: %w", err) + } + + // Create an SMTP client. + c, err = smtp.NewClient(conn, smarthost) + if err != nil { + if cerr := conn.Close(); cerr != nil { + s.log.Warn(ctx, "failed to close connection", slog.Error(cerr)) + } + return true, xerrors.Errorf("create client: %w", err) + } + + // Cleanup. + defer func() { + if err := c.Quit(); err != nil { + s.log.Warn(ctx, "failed to close SMTP connection", slog.Error(err)) + } + }() + + // Server handshake. + hello, err := s.hello() + if err != nil { + return false, xerrors.Errorf("'hello' validation: %w", err) + } + err = c.Hello(hello) + if err != nil { + return false, xerrors.Errorf("server handshake: %w", err) + } + + // Check for authentication capabilities. + // if ok, mech := c.Extension("AUTH"); ok { + // auth, err := s.auth(mech) + // if err != nil { + // return true, xerrors.Errorf("find auth mechanism: %w", err) + // } + // if auth != nil { + // if err := c.Auth(auth); err != nil { + // return true, xerrors.Errorf("%T auth: %w", auth, err) + // } + // } + //} + + // Sender identification. + from, err := s.validateFromAddr(s.cfg.From.String()) + if err != nil { + return false, xerrors.Errorf("'from' validation: %w", err) + } + err = c.Mail(from) + if err != nil { + // This is retryable because the server may be temporarily down. + return true, xerrors.Errorf("sender identification: %w", err) + } + + // Recipient designation. + to, err := s.validateToAddrs(to) + if err != nil { + return false, xerrors.Errorf("'to' validation: %w", err) + } + for _, addr := range to { + err = c.Rcpt(addr) + if err != nil { + // This is a retryable case because the server may be temporarily down. + // The addresses are already validated, although it is possible that the server might disagree - in which case + // this will lead to some spurious retries, but that's not a big deal. + return true, xerrors.Errorf("recipient designation: %w", err) + } + } + + // Start message transmission. + message, err := c.Data() + if err != nil { + return true, xerrors.Errorf("message transmission: %w", err) + } + defer message.Close() + + // Transmit message headers. + msg := &bytes.Buffer{} + multipartBuffer := &bytes.Buffer{} + multipartWriter := multipart.NewWriter(multipartBuffer) + _, _ = fmt.Fprintf(msg, "From: %s\r\n", from) + _, _ = fmt.Fprintf(msg, "To: %s\r\n", strings.Join(to, ", ")) + _, _ = fmt.Fprintf(msg, "Subject: %s\r\n", subject) + _, _ = fmt.Fprintf(msg, "Message-Id: %s@%s\r\n", msgID, s.hostname()) + _, _ = fmt.Fprintf(msg, "Date: %s\r\n", time.Now().Format(time.RFC1123Z)) + _, _ = fmt.Fprintf(msg, "Content-Type: multipart/alternative; boundary=%s\r\n", multipartWriter.Boundary()) + _, _ = fmt.Fprintf(msg, "MIME-Version: 1.0\r\n\r\n") + _, err = message.Write(msg.Bytes()) + if err != nil { + return false, xerrors.Errorf("write headers: %w", err) + } + + // Transmit message body. + + // Text body + w, err := multipartWriter.CreatePart(textproto.MIMEHeader{ + "Content-Transfer-Encoding": {"quoted-printable"}, + "Content-Type": {"text/plain; charset=UTF-8"}, + }) + if err != nil { + return false, xerrors.Errorf("create part for text body: %w", err) + } + qw := quotedprintable.NewWriter(w) + _, err = qw.Write([]byte(plainBody)) + if err != nil { + return true, xerrors.Errorf("write text part: %w", err) + } + err = qw.Close() + if err != nil { + return true, xerrors.Errorf("close text part: %w", err) + } + + // HTML body + // Preferred body placed last per section 5.1.4 of RFC 2046 + // https://www.ietf.org/rfc/rfc2046.txt + w, err = multipartWriter.CreatePart(textproto.MIMEHeader{ + "Content-Transfer-Encoding": {"quoted-printable"}, + "Content-Type": {"text/html; charset=UTF-8"}, + }) + if err != nil { + return false, xerrors.Errorf("create part for HTML body: %w", err) + } + qw = quotedprintable.NewWriter(w) + _, err = qw.Write([]byte(htmlBody)) + if err != nil { + return true, xerrors.Errorf("write HTML part: %w", err) + } + err = qw.Close() + if err != nil { + return true, xerrors.Errorf("close HTML part: %w", err) + } + + err = multipartWriter.Close() + if err != nil { + return false, xerrors.Errorf("close multipartWriter: %w", err) + } + + _, err = message.Write(multipartBuffer.Bytes()) + if err != nil { + return false, xerrors.Errorf("write body buffer: %w", err) + } + + // Returning false, nil indicates successful send (i.e. non-retryable non-error) + return false, nil + } +} + +// auth returns a value which implements the smtp.Auth based on the available auth mechanism. +// func (*SMTPHandler) auth(_ string) (smtp.Auth, error) { +// return nil, nil +//} + +func (*SMTPHandler) validateFromAddr(from string) (string, error) { + addrs, err := mail.ParseAddressList(from) + if err != nil { + return "", xerrors.Errorf("parse 'from' address: %w", err) + } + if len(addrs) != 1 { + return "", ValidationNoFromAddressErr + } + return from, nil +} + +func (s *SMTPHandler) validateToAddrs(to string) ([]string, error) { + addrs, err := mail.ParseAddressList(to) + if err != nil { + return nil, xerrors.Errorf("parse 'to' addresses: %w", err) + } + if len(addrs) == 0 { + s.log.Warn(context.Background(), "no valid 'to' address(es) defined; some may be invalid", slog.F("defined", to)) + return nil, ValidationNoToAddressErr + } + + var out []string + for _, addr := range addrs { + out = append(out, addr.Address) + } + + return out, nil +} + +// smarthost retrieves the host/port defined and validates them. +// Does not allow overriding. +// nolint:revive // documented. +func (s *SMTPHandler) smarthost() (string, string, error) { + host := s.cfg.Smarthost.Host + port := s.cfg.Smarthost.Port + + // We don't validate the contents themselves; this will be done by the underlying SMTP library. + if host == "" { + return "", "", ValidationNoSmarthostHostErr + } + if port == "" { + return "", "", ValidationNoSmarthostPortErr + } + + return host, port, nil +} + +// hello retrieves the hostname identifying the SMTP server. +// Does not allow overriding. +func (s *SMTPHandler) hello() (string, error) { + val := s.cfg.Hello.String() + if val == "" { + return "", ValidationNoHelloErr + } + return val, nil +} + +func (*SMTPHandler) hostname() string { + h, err := os.Hostname() + // If we can't get the hostname, we'll use localhost + if err != nil { + h = "localhost.localdomain" + } + return h +} diff --git a/coderd/notifications/dispatch/smtp/html.gotmpl b/coderd/notifications/dispatch/smtp/html.gotmpl new file mode 100644 index 0000000000000..fc34a701ecc61 --- /dev/null +++ b/coderd/notifications/dispatch/smtp/html.gotmpl @@ -0,0 +1,43 @@ + + + + + + {{ .Labels._subject }} + + +
+
+ + + + + + + + + + + + + + + + + +
+
+

{{ .Labels._subject }}

+ {{ .Labels._body }} + + {{ range $action := .Actions }} + {{ $action.Label }}
+ {{ end }} +
+
+ + © 2024 Coder. All rights reserved. +
+
+ + \ No newline at end of file diff --git a/coderd/notifications/dispatch/smtp/plaintext.gotmpl b/coderd/notifications/dispatch/smtp/plaintext.gotmpl new file mode 100644 index 0000000000000..ecc60611d04bd --- /dev/null +++ b/coderd/notifications/dispatch/smtp/plaintext.gotmpl @@ -0,0 +1,5 @@ +{{ .Labels._body }} + +{{ range $action := .Actions }} +{{ $action.Label }}: {{ $action.URL }} +{{ end }} \ No newline at end of file diff --git a/coderd/notifications/dispatch/spec.go b/coderd/notifications/dispatch/spec.go new file mode 100644 index 0000000000000..037a0ebb4a1bf --- /dev/null +++ b/coderd/notifications/dispatch/spec.go @@ -0,0 +1,13 @@ +package dispatch + +import ( + "context" + + "github.com/google/uuid" +) + +// DeliveryFunc delivers the notification. +// The first return param indicates whether a retry can be attempted (i.e. a temporary error), and the second returns +// any error that may have arisen. +// If (false, nil) is returned, that is considered a successful dispatch. +type DeliveryFunc func(ctx context.Context, msgID uuid.UUID) (retryable bool, err error) diff --git a/coderd/notifications/dispatch/webhook.go b/coderd/notifications/dispatch/webhook.go new file mode 100644 index 0000000000000..c1fb47ea35692 --- /dev/null +++ b/coderd/notifications/dispatch/webhook.go @@ -0,0 +1,105 @@ +package dispatch + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "cdr.dev/slog" + + "github.com/coder/coder/v2/coderd/notifications/types" + markdown "github.com/coder/coder/v2/coderd/render" + "github.com/coder/coder/v2/codersdk" +) + +// WebhookHandler dispatches notification messages via an HTTP POST webhook. +type WebhookHandler struct { + cfg codersdk.NotificationsWebhookConfig + log slog.Logger + + cl *http.Client +} + +// WebhookPayload describes the JSON payload to be delivered to the configured webhook endpoint. +type WebhookPayload struct { + Version string `json:"_version"` + MsgID uuid.UUID `json:"msg_id"` + Payload types.MessagePayload `json:"payload"` + Title string `json:"title"` + Body string `json:"body"` +} + +func NewWebhookHandler(cfg codersdk.NotificationsWebhookConfig, log slog.Logger) *WebhookHandler { + return &WebhookHandler{cfg: cfg, log: log, cl: &http.Client{}} +} + +func (w *WebhookHandler) Dispatcher(payload types.MessagePayload, titleTmpl, bodyTmpl string) (DeliveryFunc, error) { + if w.cfg.Endpoint.String() == "" { + return nil, xerrors.New("webhook endpoint not defined") + } + + title, err := markdown.PlaintextFromMarkdown(titleTmpl) + if err != nil { + return nil, xerrors.Errorf("render title: %w", err) + } + body, err := markdown.PlaintextFromMarkdown(bodyTmpl) + if err != nil { + return nil, xerrors.Errorf("render body: %w", err) + } + + return w.dispatch(payload, title, body, w.cfg.Endpoint.String()), nil +} + +func (w *WebhookHandler) dispatch(msgPayload types.MessagePayload, title, body, endpoint string) DeliveryFunc { + return func(ctx context.Context, msgID uuid.UUID) (retryable bool, err error) { + // Prepare payload. + payload := WebhookPayload{ + Version: "1.0", + MsgID: msgID, + Title: title, + Body: body, + Payload: msgPayload, + } + m, err := json.Marshal(payload) + if err != nil { + return false, xerrors.Errorf("marshal payload: %v", err) + } + + // Prepare request. + // Outer context has a deadline (see CODER_NOTIFICATIONS_DISPATCH_TIMEOUT). + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(m)) + if err != nil { + return false, xerrors.Errorf("create HTTP request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + // Send request. + resp, err := w.cl.Do(req) + if err != nil { + return true, xerrors.Errorf("failed to send HTTP request: %v", err) + } + defer resp.Body.Close() + + // Handle response. + if resp.StatusCode/100 > 2 { + // Body could be quite long here, let's grab the first 512B and hope it contains useful debug info. + respBody := make([]byte, 512) + lr := io.LimitReader(resp.Body, int64(len(respBody))) + n, err := lr.Read(respBody) + if err != nil && !errors.Is(err, io.EOF) { + return true, xerrors.Errorf("non-200 response (%d), read body: %w", resp.StatusCode, err) + } + w.log.Warn(ctx, "unsuccessful delivery", slog.F("status_code", resp.StatusCode), + slog.F("response", respBody[:n]), slog.F("msg_id", msgID)) + return true, xerrors.Errorf("non-200 response (%d)", resp.StatusCode) + } + + return false, nil + } +} diff --git a/coderd/notifications/enqueuer.go b/coderd/notifications/enqueuer.go new file mode 100644 index 0000000000000..f7b5c4655f477 --- /dev/null +++ b/coderd/notifications/enqueuer.go @@ -0,0 +1,129 @@ +package notifications + +import ( + "context" + "encoding/json" + "text/template" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "cdr.dev/slog" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/notifications/render" + "github.com/coder/coder/v2/coderd/notifications/types" + "github.com/coder/coder/v2/codersdk" +) + +type StoreEnqueuer struct { + store Store + log slog.Logger + + // TODO: expand this to allow for each notification to have custom delivery methods, or multiple, or none. + // For example, Larry might want email notifications for "workspace deleted" notifications, but Harry wants + // Slack notifications, and Mary doesn't want any. + method database.NotificationMethod + // helpers holds a map of template funcs which are used when rendering templates. These need to be passed in because + // the template funcs will return values which are inappropriately encapsulated in this struct. + helpers template.FuncMap +} + +// NewStoreEnqueuer creates an Enqueuer implementation which can persist notification messages in the store. +func NewStoreEnqueuer(cfg codersdk.NotificationsConfig, store Store, helpers template.FuncMap, log slog.Logger) (*StoreEnqueuer, error) { + var method database.NotificationMethod + if err := method.Scan(cfg.Method.String()); err != nil { + return nil, xerrors.Errorf("given notification method %q is invalid", cfg.Method) + } + + return &StoreEnqueuer{ + store: store, + log: log, + method: method, + helpers: helpers, + }, nil +} + +// Enqueue queues a notification message for later delivery. +// Messages will be dequeued by a notifier later and dispatched. +func (s *StoreEnqueuer) Enqueue(ctx context.Context, userID, templateID uuid.UUID, labels map[string]string, createdBy string, targets ...uuid.UUID) (*uuid.UUID, error) { + payload, err := s.buildPayload(ctx, userID, templateID, labels) + if err != nil { + s.log.Warn(ctx, "failed to build payload", slog.F("template_id", templateID), slog.F("user_id", userID), slog.Error(err)) + return nil, xerrors.Errorf("enqueue notification (payload build): %w", err) + } + + input, err := json.Marshal(payload) + if err != nil { + return nil, xerrors.Errorf("failed encoding input labels: %w", err) + } + + id := uuid.New() + msg, err := s.store.EnqueueNotificationMessage(ctx, database.EnqueueNotificationMessageParams{ + ID: id, + UserID: userID, + NotificationTemplateID: templateID, + Method: s.method, + Payload: input, + Targets: targets, + CreatedBy: createdBy, + }) + if err != nil { + s.log.Warn(ctx, "failed to enqueue notification", slog.F("template_id", templateID), slog.F("input", input), slog.Error(err)) + return nil, xerrors.Errorf("enqueue notification: %w", err) + } + + s.log.Debug(ctx, "enqueued notification", slog.F("msg_id", msg.ID)) + return &id, nil +} + +// buildPayload creates the payload that the notification will for variable substitution and/or routing. +// The payload contains information about the recipient, the event that triggered the notification, and any subsequent +// actions which can be taken by the recipient. +func (s *StoreEnqueuer) buildPayload(ctx context.Context, userID uuid.UUID, templateID uuid.UUID, labels map[string]string) (*types.MessagePayload, error) { + metadata, err := s.store.FetchNewMessageMetadata(ctx, database.FetchNewMessageMetadataParams{ + UserID: userID, + NotificationTemplateID: templateID, + }) + if err != nil { + return nil, xerrors.Errorf("new message metadata: %w", err) + } + + // Execute any templates in actions. + out, err := render.GoTemplate(string(metadata.Actions), types.MessagePayload{}, s.helpers) + if err != nil { + return nil, xerrors.Errorf("render actions: %w", err) + } + metadata.Actions = []byte(out) + + var actions []types.TemplateAction + if err = json.Unmarshal(metadata.Actions, &actions); err != nil { + return nil, xerrors.Errorf("new message metadata: parse template actions: %w", err) + } + + return &types.MessagePayload{ + Version: "1.0", + + NotificationName: metadata.NotificationName, + + UserID: metadata.UserID.String(), + UserEmail: metadata.UserEmail, + UserName: metadata.UserName, + + Actions: actions, + Labels: labels, + }, nil +} + +// NoopEnqueuer implements the Enqueuer interface but performs a noop. +type NoopEnqueuer struct{} + +// NewNoopEnqueuer builds a NoopEnqueuer which is used to fulfill the contract for enqueuing notifications, if ExperimentNotifications is not set. +func NewNoopEnqueuer() *NoopEnqueuer { + return &NoopEnqueuer{} +} + +func (*NoopEnqueuer) Enqueue(context.Context, uuid.UUID, uuid.UUID, map[string]string, string, ...uuid.UUID) (*uuid.UUID, error) { + // nolint:nilnil // irrelevant. + return nil, nil +} diff --git a/coderd/notifications/events.go b/coderd/notifications/events.go new file mode 100644 index 0000000000000..6cb2870748b61 --- /dev/null +++ b/coderd/notifications/events.go @@ -0,0 +1,9 @@ +package notifications + +import "github.com/google/uuid" + +// These vars are mapped to UUIDs in the notification_templates table. +// TODO: autogenerate these. + +// Workspace-related events. +var TemplateWorkspaceDeleted = uuid.MustParse("f517da0b-cdc9-410f-ab89-a86107c420ed") diff --git a/coderd/notifications/manager.go b/coderd/notifications/manager.go new file mode 100644 index 0000000000000..36e82d65af31b --- /dev/null +++ b/coderd/notifications/manager.go @@ -0,0 +1,367 @@ +package notifications + +import ( + "context" + "sync" + "time" + + "github.com/google/uuid" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "cdr.dev/slog" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/notifications/dispatch" + "github.com/coder/coder/v2/codersdk" +) + +var ErrInvalidDispatchTimeout = xerrors.New("dispatch timeout must be less than lease period") + +// Manager manages all notifications being enqueued and dispatched. +// +// Manager maintains a notifier: this consumes the queue of notification messages in the store. +// +// The notifier dequeues messages from the store _CODER_NOTIFICATIONS_LEASE_COUNT_ at a time and concurrently "dispatches" +// these messages, meaning they are sent by their respective methods (email, webhook, etc). +// +// To reduce load on the store, successful and failed dispatches are accumulated in two separate buffers (success/failure) +// of size CODER_NOTIFICATIONS_STORE_SYNC_INTERVAL in the Manager, and updates are sent to the store about which messages +// succeeded or failed every CODER_NOTIFICATIONS_STORE_SYNC_INTERVAL seconds. +// These buffers are limited in size, and naturally introduce some backpressure; if there are hundreds of messages to be +// sent but they start failing too quickly, the buffers (receive channels) will fill up and block senders, which will +// slow down the dispatch rate. +// +// NOTE: The above backpressure mechanism only works within the same process, which may not be true forever, such as if +// we split notifiers out into separate targets for greater processing throughput; in this case we will need an +// alternative mechanism for handling backpressure. +type Manager struct { + cfg codersdk.NotificationsConfig + + store Store + log slog.Logger + + notifier *notifier + handlers map[database.NotificationMethod]Handler + + success, failure chan dispatchResult + + runOnce sync.Once + stopOnce sync.Once + stop chan any + done chan any +} + +// NewManager instantiates a new Manager instance which coordinates notification enqueuing and delivery. +// +// helpers is a map of template helpers which are used to customize notification messages to use global settings like +// access URL etc. +func NewManager(cfg codersdk.NotificationsConfig, store Store, log slog.Logger) (*Manager, error) { + // If dispatch timeout exceeds lease period, it is possible that messages can be delivered in duplicate because the + // lease can expire before the notifier gives up on the dispatch, which results in the message becoming eligible for + // being re-acquired. + if cfg.DispatchTimeout.Value() >= cfg.LeasePeriod.Value() { + return nil, ErrInvalidDispatchTimeout + } + + return &Manager{ + log: log, + cfg: cfg, + store: store, + + // Buffer successful/failed notification dispatches in memory to reduce load on the store. + // + // We keep separate buffered for success/failure right now because the bulk updates are already a bit janky, + // see BulkMarkNotificationMessagesSent/BulkMarkNotificationMessagesFailed. If we had the ability to batch updates, + // like is offered in https://docs.sqlc.dev/en/stable/reference/query-annotations.html#batchmany, we'd have a cleaner + // approach to this - but for now this will work fine. + success: make(chan dispatchResult, cfg.StoreSyncBufferSize), + failure: make(chan dispatchResult, cfg.StoreSyncBufferSize), + + stop: make(chan any), + done: make(chan any), + + handlers: defaultHandlers(cfg, log), + }, nil +} + +// defaultHandlers builds a set of known handlers; panics if any error occurs as these handlers should be valid at compile time. +func defaultHandlers(cfg codersdk.NotificationsConfig, log slog.Logger) map[database.NotificationMethod]Handler { + return map[database.NotificationMethod]Handler{ + database.NotificationMethodSmtp: dispatch.NewSMTPHandler(cfg.SMTP, log.Named("dispatcher.smtp")), + database.NotificationMethodWebhook: dispatch.NewWebhookHandler(cfg.Webhook, log.Named("dispatcher.webhook")), + } +} + +// WithHandlers allows for tests to inject their own handlers to verify functionality. +func (m *Manager) WithHandlers(reg map[database.NotificationMethod]Handler) { + m.handlers = reg +} + +// Run initiates the control loop in the background, which spawns a given number of notifier goroutines. +// Manager requires system-level permissions to interact with the store. +// Run is only intended to be run once. +func (m *Manager) Run(ctx context.Context) { + m.log.Info(ctx, "started") + + m.runOnce.Do(func() { + // Closes when Stop() is called or context is canceled. + go func() { + err := m.loop(ctx) + if err != nil { + m.log.Error(ctx, "notification manager stopped with error", slog.Error(err)) + } + }() + }) +} + +// loop contains the main business logic of the notification manager. It is responsible for subscribing to notification +// events, creating a notifier, and publishing bulk dispatch result updates to the store. +func (m *Manager) loop(ctx context.Context) error { + defer func() { + close(m.done) + m.log.Info(context.Background(), "notification manager stopped") + }() + + // Caught a terminal signal before notifier was created, exit immediately. + select { + case <-m.stop: + m.log.Warn(ctx, "gracefully stopped") + return xerrors.Errorf("gracefully stopped") + case <-ctx.Done(): + m.log.Error(ctx, "ungracefully stopped", slog.Error(ctx.Err())) + return xerrors.Errorf("notifications: %w", ctx.Err()) + default: + } + + var eg errgroup.Group + + // Create a notifier to run concurrently, which will handle dequeueing and dispatching notifications. + m.notifier = newNotifier(m.cfg, uuid.New(), m.log, m.store, m.handlers) + eg.Go(func() error { + return m.notifier.run(ctx, m.success, m.failure) + }) + + // Periodically flush notification state changes to the store. + eg.Go(func() error { + // Every interval, collect the messages in the channels and bulk update them in the store. + tick := time.NewTicker(m.cfg.StoreSyncInterval.Value()) + defer tick.Stop() + for { + select { + case <-ctx.Done(): + // Nothing we can do in this scenario except bail out; after the message lease expires, the messages will + // be requeued and users will receive duplicates. + // This is an explicit trade-off between keeping the database load light (by bulk-updating records) and + // exactly-once delivery. + // + // The current assumption is that duplicate delivery of these messages is, at worst, slightly annoying. + // If these notifications are triggering external actions (e.g. via webhooks) this could be more + // consequential, and we may need a more sophisticated mechanism. + // + // TODO: mention the above tradeoff in documentation. + m.log.Warn(ctx, "exiting ungracefully", slog.Error(ctx.Err())) + + if len(m.success)+len(m.failure) > 0 { + m.log.Warn(ctx, "content canceled with pending updates in buffer, these messages will be sent again after lease expires", + slog.F("success_count", len(m.success)), slog.F("failure_count", len(m.failure))) + } + return ctx.Err() + case <-m.stop: + if len(m.success)+len(m.failure) > 0 { + m.log.Warn(ctx, "flushing buffered updates before stop", + slog.F("success_count", len(m.success)), slog.F("failure_count", len(m.failure))) + m.bulkUpdate(ctx) + m.log.Warn(ctx, "flushing updates done") + } + return nil + case <-tick.C: + m.bulkUpdate(ctx) + } + } + }) + + err := eg.Wait() + if err != nil { + m.log.Error(ctx, "manager loop exited with error", slog.Error(err)) + } + return err +} + +// BufferedUpdatesCount returns the number of buffered updates which are currently waiting to be flushed to the store. +// The returned values are for success & failure, respectively. +func (m *Manager) BufferedUpdatesCount() (success int, failure int) { + return len(m.success), len(m.failure) +} + +// bulkUpdate updates messages in the store based on the given successful and failed message dispatch results. +func (m *Manager) bulkUpdate(ctx context.Context) { + select { + case <-ctx.Done(): + return + default: + } + + nSuccess := len(m.success) + nFailure := len(m.failure) + + // Nothing to do. + if nSuccess+nFailure == 0 { + return + } + + var ( + successParams database.BulkMarkNotificationMessagesSentParams + failureParams database.BulkMarkNotificationMessagesFailedParams + ) + + // Read all the existing messages due for update from the channel, but don't range over the channels because they + // block until they are closed. + // + // This is vulnerable to TOCTOU, but it's fine. + // If more items are added to the success or failure channels between measuring their lengths and now, those items + // will be processed on the next bulk update. + + for i := 0; i < nSuccess; i++ { + res := <-m.success + successParams.IDs = append(successParams.IDs, res.msg) + successParams.SentAts = append(successParams.SentAts, res.ts) + } + for i := 0; i < nFailure; i++ { + res := <-m.failure + + status := database.NotificationMessageStatusPermanentFailure + if res.retryable { + status = database.NotificationMessageStatusTemporaryFailure + } + + failureParams.IDs = append(failureParams.IDs, res.msg) + failureParams.FailedAts = append(failureParams.FailedAts, res.ts) + failureParams.Statuses = append(failureParams.Statuses, status) + var reason string + if res.err != nil { + reason = res.err.Error() + } + failureParams.StatusReasons = append(failureParams.StatusReasons, reason) + } + + // Execute bulk updates for success/failure concurrently. + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + if len(successParams.IDs) == 0 { + return + } + + logger := m.log.With(slog.F("type", "update_sent")) + + // Give up after waiting for the store for 30s. + uctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + n, err := m.store.BulkMarkNotificationMessagesSent(uctx, successParams) + if err != nil { + logger.Error(ctx, "bulk update failed", slog.Error(err)) + return + } + + logger.Debug(ctx, "bulk update completed", slog.F("updated", n)) + }() + + go func() { + defer wg.Done() + if len(failureParams.IDs) == 0 { + return + } + + logger := m.log.With(slog.F("type", "update_failed")) + + // Give up after waiting for the store for 30s. + uctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + failureParams.MaxAttempts = int32(m.cfg.MaxSendAttempts) + failureParams.RetryInterval = int32(m.cfg.RetryInterval.Value().Seconds()) + n, err := m.store.BulkMarkNotificationMessagesFailed(uctx, failureParams) + if err != nil { + logger.Error(ctx, "bulk update failed", slog.Error(err)) + return + } + + logger.Debug(ctx, "bulk update completed", slog.F("updated", n)) + }() + + wg.Wait() +} + +// Stop stops the notifier and waits until it has stopped. +func (m *Manager) Stop(ctx context.Context) error { + var err error + m.stopOnce.Do(func() { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + } + + m.log.Info(context.Background(), "graceful stop requested") + + // If the notifier hasn't been started, we don't need to wait for anything. + // This is only really during testing when we want to enqueue messages only but not deliver them. + if m.notifier == nil { + close(m.done) + } else { + m.notifier.stop() + } + + // Signal the stop channel to cause loop to exit. + close(m.stop) + + // Wait for the manager loop to exit or the context to be canceled, whichever comes first. + select { + case <-ctx.Done(): + var errStr string + if ctx.Err() != nil { + errStr = ctx.Err().Error() + } + // For some reason, slog.Error returns {} for a context error. + m.log.Error(context.Background(), "graceful stop failed", slog.F("err", errStr)) + err = ctx.Err() + return + case <-m.done: + m.log.Info(context.Background(), "gracefully stopped") + return + } + }) + + return err +} + +type dispatchResult struct { + notifier uuid.UUID + msg uuid.UUID + ts time.Time + err error + retryable bool +} + +func newSuccessfulDispatch(notifier, msg uuid.UUID) dispatchResult { + return dispatchResult{ + notifier: notifier, + msg: msg, + ts: time.Now(), + } +} + +func newFailedDispatch(notifier, msg uuid.UUID, err error, retryable bool) dispatchResult { + return dispatchResult{ + notifier: notifier, + msg: msg, + ts: time.Now(), + err: err, + retryable: retryable, + } +} diff --git a/coderd/notifications/manager_test.go b/coderd/notifications/manager_test.go new file mode 100644 index 0000000000000..d0d6355f0c68c --- /dev/null +++ b/coderd/notifications/manager_test.go @@ -0,0 +1,234 @@ +package notifications_test + +import ( + "context" + "encoding/json" + "sync/atomic" + "testing" + "time" + + "github.com/coder/serpent" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbgen" + "github.com/coder/coder/v2/coderd/database/dbmem" + "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/coderd/notifications" + "github.com/coder/coder/v2/coderd/notifications/dispatch" + "github.com/coder/coder/v2/coderd/notifications/types" + "github.com/coder/coder/v2/testutil" +) + +func TestBufferedUpdates(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + + ctx, logger, db := setup(t) + interceptor := &bulkUpdateInterceptor{Store: db} + santa := &santaHandler{} + + cfg := defaultNotificationsConfig(database.NotificationMethodSmtp) + cfg.StoreSyncInterval = serpent.Duration(time.Hour) // Ensure we don't sync the store automatically. + + mgr, err := notifications.NewManager(cfg, interceptor, logger.Named("notifications-manager")) + require.NoError(t, err) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{ + database.NotificationMethodSmtp: santa, + }) + enq, err := notifications.NewStoreEnqueuer(cfg, interceptor, defaultHelpers(), logger.Named("notifications-enqueuer")) + require.NoError(t, err) + + user := dbgen.User(t, db, database.User{}) + + // given + _, err = enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"nice": "true"}, "") // Will succeed. + require.NoError(t, err) + _, err = enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"nice": "true"}, "") // Will succeed. + require.NoError(t, err) + _, err = enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"nice": "false"}, "") // Will fail. + require.NoError(t, err) + + // when + mgr.Run(ctx) + + // then + + const ( + expectedSuccess = 2 + expectedFailure = 1 + ) + + // Wait for messages to be dispatched. + require.Eventually(t, func() bool { + return santa.naughty.Load() == expectedFailure && + santa.nice.Load() == expectedSuccess + }, testutil.WaitMedium, testutil.IntervalFast) + + // Wait for the expected number of buffered updates to be accumulated. + require.Eventually(t, func() bool { + success, failure := mgr.BufferedUpdatesCount() + return success == expectedSuccess && failure == expectedFailure + }, testutil.WaitShort, testutil.IntervalFast) + + // Stop the manager which forces an update of buffered updates. + require.NoError(t, mgr.Stop(ctx)) + + // Wait until both success & failure updates have been sent to the store. + require.EventuallyWithT(t, func(ct *assert.CollectT) { + if err := interceptor.err.Load(); err != nil { + ct.Errorf("bulk update encountered error: %s", err) + // Panic when an unexpected error occurs. + ct.FailNow() + } + + assert.EqualValues(ct, expectedFailure, interceptor.failed.Load()) + assert.EqualValues(ct, expectedSuccess, interceptor.sent.Load()) + }, testutil.WaitMedium, testutil.IntervalFast) +} + +func TestBuildPayload(t *testing.T) { + t.Parallel() + + // given + const label = "Click here!" + const url = "http://xyz.com/" + helpers := map[string]any{ + "my_label": func() string { return label }, + "my_url": func() string { return url }, + } + + db := dbmem.New() + interceptor := newEnqueueInterceptor(db, + // Inject custom message metadata to influence the payload construction. + func() database.FetchNewMessageMetadataRow { + // Inject template actions which use injected help functions. + actions := []types.TemplateAction{ + { + Label: "{{ my_label }}", + URL: "{{ my_url }}", + }, + } + out, err := json.Marshal(actions) + assert.NoError(t, err) + + return database.FetchNewMessageMetadataRow{ + NotificationName: "My Notification", + Actions: out, + UserID: uuid.New(), + UserEmail: "bob@bob.com", + UserName: "bobby", + } + }) + + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true, IgnoredErrorIs: []error{}}).Leveled(slog.LevelDebug) + enq, err := notifications.NewStoreEnqueuer(defaultNotificationsConfig(database.NotificationMethodSmtp), interceptor, helpers, logger.Named("notifications-enqueuer")) + require.NoError(t, err) + + ctx := testutil.Context(t, testutil.WaitShort) + + // when + _, err = enq.Enqueue(ctx, uuid.New(), notifications.TemplateWorkspaceDeleted, nil, "test") + require.NoError(t, err) + + // then + payload := testutil.RequireRecvCtx(ctx, t, interceptor.payload) + require.Len(t, payload.Actions, 1) + require.Equal(t, label, payload.Actions[0].Label) + require.Equal(t, url, payload.Actions[0].URL) +} + +func TestStopBeforeRun(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true, IgnoredErrorIs: []error{}}).Leveled(slog.LevelDebug) + mgr, err := notifications.NewManager(defaultNotificationsConfig(database.NotificationMethodSmtp), dbmem.New(), logger.Named("notifications-manager")) + require.NoError(t, err) + + // Call stop before notifier is started with Run(). + require.Eventually(t, func() bool { + assert.NoError(t, mgr.Stop(ctx)) + return true + }, testutil.WaitShort, testutil.IntervalFast) +} + +type bulkUpdateInterceptor struct { + notifications.Store + + sent atomic.Int32 + failed atomic.Int32 + err atomic.Value +} + +func (b *bulkUpdateInterceptor) BulkMarkNotificationMessagesSent(ctx context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) { + updated, err := b.Store.BulkMarkNotificationMessagesSent(ctx, arg) + b.sent.Add(int32(updated)) + if err != nil { + b.err.Store(err) + } + return updated, err +} + +func (b *bulkUpdateInterceptor) BulkMarkNotificationMessagesFailed(ctx context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + updated, err := b.Store.BulkMarkNotificationMessagesFailed(ctx, arg) + b.failed.Add(int32(updated)) + if err != nil { + b.err.Store(err) + } + return updated, err +} + +// santaHandler only dispatches nice messages. +type santaHandler struct { + naughty atomic.Int32 + nice atomic.Int32 +} + +func (s *santaHandler) Dispatcher(payload types.MessagePayload, _, _ string) (dispatch.DeliveryFunc, error) { + return func(ctx context.Context, msgID uuid.UUID) (retryable bool, err error) { + if payload.Labels["nice"] != "true" { + s.naughty.Add(1) + return false, xerrors.New("be nice") + } + + s.nice.Add(1) + return false, nil + }, nil +} + +type enqueueInterceptor struct { + notifications.Store + + payload chan types.MessagePayload + metadataFn func() database.FetchNewMessageMetadataRow +} + +func newEnqueueInterceptor(db notifications.Store, metadataFn func() database.FetchNewMessageMetadataRow) *enqueueInterceptor { + return &enqueueInterceptor{Store: db, payload: make(chan types.MessagePayload, 1), metadataFn: metadataFn} +} + +func (e *enqueueInterceptor) EnqueueNotificationMessage(_ context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { + var payload types.MessagePayload + err := json.Unmarshal(arg.Payload, &payload) + if err != nil { + return database.NotificationMessage{}, err + } + + e.payload <- payload + return database.NotificationMessage{}, err +} + +func (e *enqueueInterceptor) FetchNewMessageMetadata(_ context.Context, _ database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) { + return e.metadataFn(), nil +} diff --git a/coderd/notifications/notifications_test.go b/coderd/notifications/notifications_test.go new file mode 100644 index 0000000000000..6c2cf430fe460 --- /dev/null +++ b/coderd/notifications/notifications_test.go @@ -0,0 +1,616 @@ +package notifications_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "sort" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/uuid" + smtpmock "github.com/mocktools/go-smtp-mock/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/serpent" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbgen" + "github.com/coder/coder/v2/coderd/database/dbmem" + "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/coderd/notifications" + "github.com/coder/coder/v2/coderd/notifications/dispatch" + "github.com/coder/coder/v2/coderd/notifications/types" + "github.com/coder/coder/v2/coderd/util/syncmap" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/testutil" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} + +// TestBasicNotificationRoundtrip enqueues a message to the store, waits for it to be acquired by a notifier, +// and passes it off to a fake handler. +// TODO: split this test up into table tests or separate tests. +func TestBasicNotificationRoundtrip(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + ctx, logger, db := setup(t) + method := database.NotificationMethodSmtp + + // given + handler := &fakeHandler{} + + cfg := defaultNotificationsConfig(method) + mgr, err := notifications.NewManager(cfg, db, logger.Named("manager")) + require.NoError(t, err) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{method: handler}) + t.Cleanup(func() { + assert.NoError(t, mgr.Stop(ctx)) + }) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := createSampleUser(t, db) + + // when + sid, err := enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"type": "success"}, "test") + require.NoError(t, err) + fid, err := enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"type": "failure"}, "test") + require.NoError(t, err) + + mgr.Run(ctx) + + // then + require.Eventually(t, func() bool { + handler.mu.RLock() + defer handler.mu.RUnlock() + return handler.succeeded == sid.String() + }, testutil.WaitLong, testutil.IntervalMedium) + require.Eventually(t, func() bool { + handler.mu.RLock() + defer handler.mu.RUnlock() + return handler.failed == fid.String() + }, testutil.WaitLong, testutil.IntervalMedium) +} + +func TestSMTPDispatch(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + ctx, logger, db := setup(t) + + // start mock SMTP server + mockSMTPSrv := smtpmock.New(smtpmock.ConfigurationAttr{ + LogToStdout: false, + LogServerActivity: true, + }) + require.NoError(t, mockSMTPSrv.Start()) + t.Cleanup(func() { + assert.NoError(t, mockSMTPSrv.Stop()) + }) + + // given + const from = "danny@coder.com" + method := database.NotificationMethodSmtp + cfg := defaultNotificationsConfig(method) + cfg.SMTP = codersdk.NotificationsEmailConfig{ + From: from, + Smarthost: serpent.HostPort{Host: "localhost", Port: fmt.Sprintf("%d", mockSMTPSrv.PortNumber())}, + Hello: "localhost", + } + handler := newDispatchInterceptor(dispatch.NewSMTPHandler(cfg.SMTP, logger.Named("smtp"))) + mgr, err := notifications.NewManager(cfg, db, logger.Named("manager")) + require.NoError(t, err) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{method: handler}) + t.Cleanup(func() { + assert.NoError(t, mgr.Stop(ctx)) + }) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := createSampleUser(t, db) + + // when + msgID, err := enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{}, "test") + require.NoError(t, err) + + mgr.Run(ctx) + + // then + require.Eventually(t, func() bool { + assert.Nil(t, handler.lastErr.Load()) + assert.True(t, handler.retryable.Load() == 0) + return handler.sent.Load() == 1 + }, testutil.WaitLong, testutil.IntervalMedium) + + msgs := mockSMTPSrv.MessagesAndPurge() + require.Len(t, msgs, 1) + require.Contains(t, msgs[0].MsgRequest(), fmt.Sprintf("From: %s", from)) + require.Contains(t, msgs[0].MsgRequest(), fmt.Sprintf("To: %s", user.Email)) + require.Contains(t, msgs[0].MsgRequest(), fmt.Sprintf("Message-Id: %s", msgID)) +} + +func TestWebhookDispatch(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + ctx, logger, db := setup(t) + + sent := make(chan dispatch.WebhookPayload, 1) + // Mock server to simulate webhook endpoint. + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var payload dispatch.WebhookPayload + err := json.NewDecoder(r.Body).Decode(&payload) + assert.NoError(t, err) + assert.Equal(t, "application/json", r.Header.Get("Content-Type")) + + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte("noted.")) + assert.NoError(t, err) + sent <- payload + })) + defer server.Close() + + endpoint, err := url.Parse(server.URL) + require.NoError(t, err) + + // given + cfg := defaultNotificationsConfig(database.NotificationMethodWebhook) + cfg.Webhook = codersdk.NotificationsWebhookConfig{ + Endpoint: *serpent.URLOf(endpoint), + } + mgr, err := notifications.NewManager(cfg, db, logger.Named("manager")) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, mgr.Stop(ctx)) + }) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := dbgen.User(t, db, database.User{ + Email: "bob@coder.com", + Username: "bob", + Name: "Robert McBobbington", + }) + + // when + input := map[string]string{ + "a": "b", + "c": "d", + } + msgID, err := enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, input, "test") + require.NoError(t, err) + + mgr.Run(ctx) + + // then + payload := testutil.RequireRecvCtx(testutil.Context(t, testutil.WaitShort), t, sent) + require.EqualValues(t, "1.0", payload.Version) + require.Equal(t, *msgID, payload.MsgID) + require.Equal(t, payload.Payload.Labels, input) + require.Equal(t, payload.Payload.UserEmail, "bob@coder.com") + // UserName is coalesced from `name` and `username`; in this case `name` wins. + require.Equal(t, payload.Payload.UserName, "Robert McBobbington") + require.Equal(t, payload.Payload.NotificationName, "Workspace Deleted") +} + +// TestBackpressure validates that delays in processing the buffered updates will result in slowed dequeue rates. +// As a side-effect, this also tests the graceful shutdown and flushing of the buffers. +func TestBackpressure(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + + ctx, logger, db := setup(t) + + // Mock server to simulate webhook endpoint. + var received atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var payload dispatch.WebhookPayload + err := json.NewDecoder(r.Body).Decode(&payload) + assert.NoError(t, err) + + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte("noted.")) + assert.NoError(t, err) + + received.Add(1) + })) + defer server.Close() + + endpoint, err := url.Parse(server.URL) + require.NoError(t, err) + + method := database.NotificationMethodWebhook + cfg := defaultNotificationsConfig(method) + cfg.Webhook = codersdk.NotificationsWebhookConfig{ + Endpoint: *serpent.URLOf(endpoint), + } + + // Tune the queue to fetch often. + const fetchInterval = time.Millisecond * 200 + const batchSize = 10 + cfg.FetchInterval = serpent.Duration(fetchInterval) + cfg.LeaseCount = serpent.Int64(batchSize) + + // Shrink buffers down and increase flush interval to provoke backpressure. + // Flush buffers every 5 fetch intervals. + const syncInterval = time.Second + cfg.StoreSyncInterval = serpent.Duration(syncInterval) + cfg.StoreSyncBufferSize = serpent.Int64(2) + + handler := newDispatchInterceptor(dispatch.NewWebhookHandler(cfg.Webhook, logger.Named("webhook"))) + + // Intercept calls to submit the buffered updates to the store. + storeInterceptor := &bulkUpdateInterceptor{Store: db} + + // given + mgr, err := notifications.NewManager(cfg, storeInterceptor, logger.Named("manager")) + require.NoError(t, err) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{method: handler}) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := createSampleUser(t, db) + + // when + const totalMessages = 30 + for i := 0; i < totalMessages; i++ { + _, err = enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"i": fmt.Sprintf("%d", i)}, "test") + require.NoError(t, err) + } + + // Start the notifier. + mgr.Run(ctx) + + // then + + // Wait for 3 fetch intervals, then check progress. + time.Sleep(fetchInterval * 3) + + // We expect the notifier will have dispatched ONLY the initial batch of messages. + // In other words, the notifier should have dispatched 3 batches by now, but because the buffered updates have not + // been processed: there is backpressure. + require.EqualValues(t, batchSize, handler.sent.Load()+handler.err.Load()) + // We expect that the store will have received NO updates. + require.EqualValues(t, 0, storeInterceptor.sent.Load()+storeInterceptor.failed.Load()) + + // However, when we Stop() the manager the backpressure will be relieved and the buffered updates will ALL be flushed, + // since all the goroutines that were blocked (on writing updates to the buffer) will be unblocked and will complete. + require.NoError(t, mgr.Stop(ctx)) + require.EqualValues(t, batchSize, storeInterceptor.sent.Load()+storeInterceptor.failed.Load()) +} + +func TestRetries(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + + const maxAttempts = 3 + ctx, logger, db := setup(t) + + // given + + receivedMap := syncmap.New[uuid.UUID, int]() + // Mock server to simulate webhook endpoint. + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var payload dispatch.WebhookPayload + err := json.NewDecoder(r.Body).Decode(&payload) + assert.NoError(t, err) + + count, _ := receivedMap.LoadOrStore(payload.MsgID, 0) + count++ + receivedMap.Store(payload.MsgID, count) + + // Let the request succeed if this is its last attempt. + if count == maxAttempts { + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte("noted.")) + assert.NoError(t, err) + return + } + + w.WriteHeader(http.StatusInternalServerError) + _, err = w.Write([]byte("retry again later...")) + assert.NoError(t, err) + })) + defer server.Close() + + endpoint, err := url.Parse(server.URL) + require.NoError(t, err) + + method := database.NotificationMethodWebhook + cfg := defaultNotificationsConfig(method) + cfg.Webhook = codersdk.NotificationsWebhookConfig{ + Endpoint: *serpent.URLOf(endpoint), + } + + cfg.MaxSendAttempts = maxAttempts + + // Tune intervals low to speed up test. + cfg.StoreSyncInterval = serpent.Duration(time.Millisecond * 100) + cfg.RetryInterval = serpent.Duration(time.Second) // query uses second-precision + cfg.FetchInterval = serpent.Duration(time.Millisecond * 100) + + handler := newDispatchInterceptor(dispatch.NewWebhookHandler(cfg.Webhook, logger.Named("webhook"))) + + // Intercept calls to submit the buffered updates to the store. + storeInterceptor := &bulkUpdateInterceptor{Store: db} + + mgr, err := notifications.NewManager(cfg, storeInterceptor, logger.Named("manager")) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, mgr.Stop(ctx)) + }) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{method: handler}) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := createSampleUser(t, db) + + // when + const msgCount = 5 + for i := 0; i < msgCount; i++ { + _, err = enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"i": fmt.Sprintf("%d", i)}, "test") + require.NoError(t, err) + } + + mgr.Run(ctx) + + // then + require.Eventually(t, func() bool { + // We expect all messages to fail all attempts but the final; + return storeInterceptor.failed.Load() == msgCount*(maxAttempts-1) && + // ...and succeed on the final attempt. + storeInterceptor.sent.Load() == msgCount + }, testutil.WaitLong, testutil.IntervalFast) +} + +// TestExpiredLeaseIsRequeued validates that notification messages which are left in "leased" status will be requeued once their lease expires. +// "leased" is the status which messages are set to when they are acquired for processing, and this should not be a terminal +// state unless the Manager shuts down ungracefully; the Manager is responsible for updating these messages' statuses once +// they have been processed. +func TestExpiredLeaseIsRequeued(t *testing.T) { + t.Parallel() + + // setup + if !dbtestutil.WillUsePostgres() { + t.Skip("This test requires postgres") + } + + ctx, logger, db := setup(t) + + // given + + const ( + leasePeriod = time.Second + msgCount = 5 + method = database.NotificationMethodSmtp + ) + + cfg := defaultNotificationsConfig(method) + // Set low lease period to speed up tests. + cfg.LeasePeriod = serpent.Duration(leasePeriod) + cfg.DispatchTimeout = serpent.Duration(leasePeriod - time.Millisecond) + + noopInterceptor := newNoopBulkUpdater(db) + + mgrCtx, cancelManagerCtx := context.WithCancel(context.Background()) + t.Cleanup(cancelManagerCtx) + + mgr, err := notifications.NewManager(cfg, noopInterceptor, logger.Named("manager")) + require.NoError(t, err) + enq, err := notifications.NewStoreEnqueuer(cfg, db, defaultHelpers(), logger.Named("enqueuer")) + require.NoError(t, err) + + user := createSampleUser(t, db) + + // when + var msgs []string + for i := 0; i < msgCount; i++ { + id, err := enq.Enqueue(ctx, user.ID, notifications.TemplateWorkspaceDeleted, map[string]string{"type": "success"}, "test") + require.NoError(t, err) + msgs = append(msgs, id.String()) + } + + mgr.Run(mgrCtx) + + // Wait for the messages to be acquired + <-noopInterceptor.acquiredChan + // Then cancel the context, forcing the notification manager to shutdown ungracefully (simulating a crash); leaving messages in "leased" status. + cancelManagerCtx() + + // Fetch any messages currently in "leased" status, and verify that they're exactly the ones we enqueued. + leased, err := db.GetNotificationMessagesByStatus(ctx, database.GetNotificationMessagesByStatusParams{ + Status: database.NotificationMessageStatusLeased, + Limit: msgCount, + }) + require.NoError(t, err) + + var leasedIDs []string + for _, msg := range leased { + leasedIDs = append(leasedIDs, msg.ID.String()) + } + + sort.Strings(msgs) + sort.Strings(leasedIDs) + require.EqualValues(t, msgs, leasedIDs) + + // Wait out the lease period; all messages should be eligible to be re-acquired. + time.Sleep(leasePeriod + time.Millisecond) + + // Start a new notification manager. + // Intercept calls to submit the buffered updates to the store. + storeInterceptor := &bulkUpdateInterceptor{Store: db} + handler := newDispatchInterceptor(&fakeHandler{}) + mgr, err = notifications.NewManager(cfg, storeInterceptor, logger.Named("manager")) + require.NoError(t, err) + mgr.WithHandlers(map[database.NotificationMethod]notifications.Handler{method: handler}) + + // Use regular context now. + t.Cleanup(func() { + assert.NoError(t, mgr.Stop(ctx)) + }) + mgr.Run(ctx) + + // Wait until all messages are sent & updates flushed to the database. + require.Eventually(t, func() bool { + return handler.sent.Load() == msgCount && + storeInterceptor.sent.Load() == msgCount + }, testutil.WaitLong, testutil.IntervalFast) + + // Validate that no more messages are in "leased" status. + leased, err = db.GetNotificationMessagesByStatus(ctx, database.GetNotificationMessagesByStatusParams{ + Status: database.NotificationMessageStatusLeased, + Limit: msgCount, + }) + require.NoError(t, err) + require.Len(t, leased, 0) +} + +// TestInvalidConfig validates that misconfigurations lead to errors. +func TestInvalidConfig(t *testing.T) { + t.Parallel() + + db := dbmem.New() + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true, IgnoredErrorIs: []error{}}).Leveled(slog.LevelDebug) + + // given + + const ( + leasePeriod = time.Second + method = database.NotificationMethodSmtp + ) + + cfg := defaultNotificationsConfig(method) + cfg.LeasePeriod = serpent.Duration(leasePeriod) + cfg.DispatchTimeout = serpent.Duration(leasePeriod) + + _, err := notifications.NewManager(cfg, db, logger.Named("manager")) + require.ErrorIs(t, err, notifications.ErrInvalidDispatchTimeout) +} + +type fakeHandler struct { + mu sync.RWMutex + + succeeded string + failed string +} + +func (f *fakeHandler) Dispatcher(payload types.MessagePayload, _, _ string) (dispatch.DeliveryFunc, error) { + return func(ctx context.Context, msgID uuid.UUID) (retryable bool, err error) { + f.mu.Lock() + defer f.mu.Unlock() + + if payload.Labels["type"] == "success" { + f.succeeded = msgID.String() + } else { + f.failed = msgID.String() + } + return false, nil + }, nil +} + +type dispatchInterceptor struct { + handler notifications.Handler + + sent atomic.Int32 + retryable atomic.Int32 + unretryable atomic.Int32 + err atomic.Int32 + lastErr atomic.Value +} + +func newDispatchInterceptor(h notifications.Handler) *dispatchInterceptor { + return &dispatchInterceptor{ + handler: h, + } +} + +func (i *dispatchInterceptor) Dispatcher(payload types.MessagePayload, title, body string) (dispatch.DeliveryFunc, error) { + return func(ctx context.Context, msgID uuid.UUID) (retryable bool, err error) { + deliveryFn, err := i.handler.Dispatcher(payload, title, body) + if err != nil { + return false, err + } + + retryable, err = deliveryFn(ctx, msgID) + + if err != nil { + i.err.Add(1) + i.lastErr.Store(err) + } + + switch { + case !retryable && err == nil: + i.sent.Add(1) + case retryable: + i.retryable.Add(1) + case !retryable && err != nil: + i.unretryable.Add(1) + } + return retryable, err + }, nil +} + +// noopBulkUpdater pretends to perform bulk updates, but does not; leading to messages being stuck in "leased" state. +type noopBulkUpdater struct { + *acquireSignalingInterceptor +} + +func newNoopBulkUpdater(db notifications.Store) *noopBulkUpdater { + return &noopBulkUpdater{newAcquireSignalingInterceptor(db)} +} + +func (*noopBulkUpdater) BulkMarkNotificationMessagesSent(_ context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) { + return int64(len(arg.IDs)), nil +} + +func (*noopBulkUpdater) BulkMarkNotificationMessagesFailed(_ context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + return int64(len(arg.IDs)), nil +} + +type acquireSignalingInterceptor struct { + notifications.Store + acquiredChan chan struct{} +} + +func newAcquireSignalingInterceptor(db notifications.Store) *acquireSignalingInterceptor { + return &acquireSignalingInterceptor{ + Store: db, + acquiredChan: make(chan struct{}, 1), + } +} + +func (n *acquireSignalingInterceptor) AcquireNotificationMessages(ctx context.Context, params database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { + messages, err := n.Store.AcquireNotificationMessages(ctx, params) + n.acquiredChan <- struct{}{} + return messages, err +} diff --git a/coderd/notifications/notifier.go b/coderd/notifications/notifier.go new file mode 100644 index 0000000000000..b214f8a77a070 --- /dev/null +++ b/coderd/notifications/notifier.go @@ -0,0 +1,247 @@ +package notifications + +import ( + "context" + "encoding/json" + "sync" + "time" + + "github.com/google/uuid" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/coder/coder/v2/coderd/notifications/dispatch" + "github.com/coder/coder/v2/coderd/notifications/render" + "github.com/coder/coder/v2/coderd/notifications/types" + "github.com/coder/coder/v2/codersdk" + + "cdr.dev/slog" + + "github.com/coder/coder/v2/coderd/database" +) + +// notifier is a consumer of the notifications_messages queue. It dequeues messages from that table and processes them +// through a pipeline of fetch -> prepare -> render -> acquire handler -> deliver. +type notifier struct { + id uuid.UUID + cfg codersdk.NotificationsConfig + log slog.Logger + store Store + + tick *time.Ticker + stopOnce sync.Once + quit chan any + done chan any + + handlers map[database.NotificationMethod]Handler +} + +func newNotifier(cfg codersdk.NotificationsConfig, id uuid.UUID, log slog.Logger, db Store, hr map[database.NotificationMethod]Handler) *notifier { + return ¬ifier{ + id: id, + cfg: cfg, + log: log.Named("notifier").With(slog.F("notifier_id", id)), + quit: make(chan any), + done: make(chan any), + tick: time.NewTicker(cfg.FetchInterval.Value()), + store: db, + handlers: hr, + } +} + +// run is the main loop of the notifier. +func (n *notifier) run(ctx context.Context, success chan<- dispatchResult, failure chan<- dispatchResult) error { + n.log.Info(ctx, "started") + + defer func() { + close(n.done) + n.log.Info(context.Background(), "gracefully stopped") + }() + + // TODO: idea from Cian: instead of querying the database on a short interval, we could wait for pubsub notifications. + // if 100 notifications are enqueued, we shouldn't activate this routine for each one; so how to debounce these? + // PLUS we should also have an interval (but a longer one, maybe 1m) to account for retries (those will not get + // triggered by a code path, but rather by a timeout expiring which makes the message retryable) + for { + select { + case <-ctx.Done(): + return xerrors.Errorf("notifier %q context canceled: %w", n.id, ctx.Err()) + case <-n.quit: + return nil + default: + } + + // Call process() immediately (i.e. don't wait an initial tick). + err := n.process(ctx, success, failure) + if err != nil { + n.log.Error(ctx, "failed to process messages", slog.Error(err)) + } + + // Shortcut to bail out quickly if stop() has been called or the context canceled. + select { + case <-ctx.Done(): + return xerrors.Errorf("notifier %q context canceled: %w", n.id, ctx.Err()) + case <-n.quit: + return nil + case <-n.tick.C: + // sleep until next invocation + } + } +} + +// process is responsible for coordinating the retrieval, processing, and delivery of messages. +// Messages are dispatched concurrently, but they may block when success/failure channels are full. +// +// NOTE: it is _possible_ that these goroutines could block for long enough to exceed CODER_NOTIFICATIONS_DISPATCH_TIMEOUT, +// resulting in a failed attempt for each notification when their contexts are canceled; this is not possible with the +// default configurations but could be brought about by an operator tuning things incorrectly. +func (n *notifier) process(ctx context.Context, success chan<- dispatchResult, failure chan<- dispatchResult) error { + n.log.Debug(ctx, "attempting to dequeue messages") + + msgs, err := n.fetch(ctx) + if err != nil { + return xerrors.Errorf("fetch messages: %w", err) + } + + n.log.Debug(ctx, "dequeued messages", slog.F("count", len(msgs))) + if len(msgs) == 0 { + return nil + } + + var eg errgroup.Group + for _, msg := range msgs { + // A message failing to be prepared correctly should not affect other messages. + deliverFn, err := n.prepare(ctx, msg) + if err != nil { + n.log.Warn(ctx, "dispatcher construction failed", slog.F("msg_id", msg.ID), slog.Error(err)) + failure <- newFailedDispatch(n.id, msg.ID, err, false) + continue + } + + eg.Go(func() error { + // Dispatch must only return an error for exceptional cases, NOT for failed messages. + return n.deliver(ctx, msg, deliverFn, success, failure) + }) + } + + if err = eg.Wait(); err != nil { + n.log.Debug(ctx, "dispatch failed", slog.Error(err)) + return xerrors.Errorf("dispatch failed: %w", err) + } + + n.log.Debug(ctx, "dispatch completed", slog.F("count", len(msgs))) + return nil +} + +// fetch retrieves messages from the queue by "acquiring a lease" whereby this notifier is the exclusive handler of these +// messages until they are dispatched - or until the lease expires (in exceptional cases). +func (n *notifier) fetch(ctx context.Context) ([]database.AcquireNotificationMessagesRow, error) { + msgs, err := n.store.AcquireNotificationMessages(ctx, database.AcquireNotificationMessagesParams{ + Count: int32(n.cfg.LeaseCount), + MaxAttemptCount: int32(n.cfg.MaxSendAttempts), + NotifierID: n.id, + LeaseSeconds: int32(n.cfg.LeasePeriod.Value().Seconds()), + }) + if err != nil { + return nil, xerrors.Errorf("acquire messages: %w", err) + } + + return msgs, nil +} + +// prepare has two roles: +// 1. render the title & body templates +// 2. build a dispatcher from the given message, payload, and these templates - to be used for delivering the notification +func (n *notifier) prepare(ctx context.Context, msg database.AcquireNotificationMessagesRow) (dispatch.DeliveryFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // NOTE: when we change the format of the MessagePayload, we have to bump its version and handle unmarshalling + // differently here based on that version. + var payload types.MessagePayload + err := json.Unmarshal(msg.Payload, &payload) + if err != nil { + return nil, xerrors.Errorf("unmarshal payload: %w", err) + } + + handler, ok := n.handlers[msg.Method] + if !ok { + return nil, xerrors.Errorf("failed to resolve handler %q", msg.Method) + } + + var title, body string + if title, err = render.GoTemplate(msg.TitleTemplate, payload, nil); err != nil { + return nil, xerrors.Errorf("render title: %w", err) + } + if body, err = render.GoTemplate(msg.BodyTemplate, payload, nil); err != nil { + return nil, xerrors.Errorf("render body: %w", err) + } + + return handler.Dispatcher(payload, title, body) +} + +// deliver sends a given notification message via its defined method. +// This method *only* returns an error when a context error occurs; any other error is interpreted as a failure to +// deliver the notification and as such the message will be marked as failed (to later be optionally retried). +func (n *notifier) deliver(ctx context.Context, msg database.AcquireNotificationMessagesRow, deliver dispatch.DeliveryFunc, success, failure chan<- dispatchResult) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + ctx, cancel := context.WithTimeout(ctx, n.cfg.DispatchTimeout.Value()) + defer cancel() + logger := n.log.With(slog.F("msg_id", msg.ID), slog.F("method", msg.Method)) + + retryable, err := deliver(ctx, msg.ID) + if err != nil { + // Don't try to accumulate message responses if the context has been canceled. + // + // This message's lease will expire in the store and will be requeued. + // It's possible this will lead to a message being delivered more than once, and that is why Stop() is preferable + // instead of canceling the context. + // + // In the case of backpressure (i.e. the success/failure channels are full because the database is slow), + // we can't append any more updates to the channels otherwise this, too, will block. + if xerrors.Is(err, context.Canceled) { + return err + } + + select { + case <-ctx.Done(): + logger.Warn(context.Background(), "cannot record dispatch failure result", slog.Error(ctx.Err())) + return ctx.Err() + default: + logger.Warn(ctx, "message dispatch failed", slog.Error(err)) + failure <- newFailedDispatch(n.id, msg.ID, err, retryable) + } + } else { + select { + case <-ctx.Done(): + logger.Warn(context.Background(), "cannot record dispatch success result", slog.Error(ctx.Err())) + return ctx.Err() + default: + logger.Debug(ctx, "message dispatch succeeded") + success <- newSuccessfulDispatch(n.id, msg.ID) + } + } + + return nil +} + +// stop stops the notifier from processing any new notifications. +// This is a graceful stop, so any in-flight notifications will be completed before the notifier stops. +// Once a notifier has stopped, it cannot be restarted. +func (n *notifier) stop() { + n.stopOnce.Do(func() { + n.log.Info(context.Background(), "graceful stop requested") + + n.tick.Stop() + close(n.quit) + <-n.done + }) +} diff --git a/coderd/notifications/render/gotmpl.go b/coderd/notifications/render/gotmpl.go new file mode 100644 index 0000000000000..e194c9837d2a9 --- /dev/null +++ b/coderd/notifications/render/gotmpl.go @@ -0,0 +1,26 @@ +package render + +import ( + "strings" + "text/template" + + "golang.org/x/xerrors" + + "github.com/coder/coder/v2/coderd/notifications/types" +) + +// GoTemplate attempts to substitute the given payload into the given template using Go's templating syntax. +// TODO: memoize templates for memory efficiency? +func GoTemplate(in string, payload types.MessagePayload, extraFuncs template.FuncMap) (string, error) { + tmpl, err := template.New("text").Funcs(extraFuncs).Parse(in) + if err != nil { + return "", xerrors.Errorf("template parse: %w", err) + } + + var out strings.Builder + if err = tmpl.Execute(&out, payload); err != nil { + return "", xerrors.Errorf("template execute: %w", err) + } + + return out.String(), nil +} diff --git a/coderd/notifications/render/gotmpl_test.go b/coderd/notifications/render/gotmpl_test.go new file mode 100644 index 0000000000000..32970dd6cd8b6 --- /dev/null +++ b/coderd/notifications/render/gotmpl_test.go @@ -0,0 +1,59 @@ +package render_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/v2/coderd/notifications/render" + + "github.com/coder/coder/v2/coderd/notifications/types" +) + +func TestGoTemplate(t *testing.T) { + t.Parallel() + + const userEmail = "bob@xyz.com" + + tests := []struct { + name string + in string + payload types.MessagePayload + expectedOutput string + expectedErr error + }{ + { + name: "top-level variables are accessible and substituted", + in: "{{ .UserEmail }}", + payload: types.MessagePayload{UserEmail: userEmail}, + expectedOutput: userEmail, + expectedErr: nil, + }, + { + name: "input labels are accessible and substituted", + in: "{{ .Labels.user_email }}", + payload: types.MessagePayload{Labels: map[string]string{ + "user_email": userEmail, + }}, + expectedOutput: userEmail, + expectedErr: nil, + }, + } + + for _, tc := range tests { + tc := tc // unnecessary as of go1.22 but the linter is outdated + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + out, err := render.GoTemplate(tc.in, tc.payload, nil) + if tc.expectedErr == nil { + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tc.expectedErr) + } + + require.Equal(t, tc.expectedOutput, out) + }) + } +} diff --git a/coderd/notifications/spec.go b/coderd/notifications/spec.go new file mode 100644 index 0000000000000..63f6af7101d1b --- /dev/null +++ b/coderd/notifications/spec.go @@ -0,0 +1,35 @@ +package notifications + +import ( + "context" + + "github.com/google/uuid" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/notifications/dispatch" + "github.com/coder/coder/v2/coderd/notifications/types" +) + +// Store defines the API between the notifications system and the storage. +// This abstraction is in place so that we can intercept the direct database interactions, or (later) swap out these calls +// with dRPC calls should we want to split the notifiers out into their own component for high availability/throughput. +// TODO: don't use database types here +type Store interface { + AcquireNotificationMessages(ctx context.Context, params database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) + BulkMarkNotificationMessagesSent(ctx context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) + BulkMarkNotificationMessagesFailed(ctx context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) + EnqueueNotificationMessage(ctx context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) + FetchNewMessageMetadata(ctx context.Context, arg database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) + GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) +} + +// Handler is responsible for preparing and delivering a notification by a given method. +type Handler interface { + // Dispatcher constructs a DeliveryFunc to be used for delivering a notification via the chosen method. + Dispatcher(payload types.MessagePayload, title, body string) (dispatch.DeliveryFunc, error) +} + +// Enqueuer enqueues a new notification message in the store and returns its ID, should it enqueue without failure. +type Enqueuer interface { + Enqueue(ctx context.Context, userID, templateID uuid.UUID, labels map[string]string, createdBy string, targets ...uuid.UUID) (*uuid.UUID, error) +} diff --git a/coderd/notifications/types/cta.go b/coderd/notifications/types/cta.go new file mode 100644 index 0000000000000..d47ead0259251 --- /dev/null +++ b/coderd/notifications/types/cta.go @@ -0,0 +1,6 @@ +package types + +type TemplateAction struct { + Label string `json:"label"` + URL string `json:"url"` +} diff --git a/coderd/notifications/types/payload.go b/coderd/notifications/types/payload.go new file mode 100644 index 0000000000000..a3067f456c18e --- /dev/null +++ b/coderd/notifications/types/payload.go @@ -0,0 +1,19 @@ +package types + +// MessagePayload describes the JSON payload to be stored alongside the notification message, which specifies all of its +// metadata, labels, and routing information. +// +// Any BC-incompatible changes must bump the version, and special handling must be put in place to unmarshal multiple versions. +type MessagePayload struct { + Version string `json:"_version"` + + NotificationName string `json:"notification_name"` + CreatedBy string `json:"created_by"` + + UserID string `json:"user_id"` + UserEmail string `json:"user_email"` + UserName string `json:"user_name"` + + Actions []TemplateAction `json:"actions"` + Labels map[string]string `json:"labels"` +} diff --git a/coderd/notifications/utils_test.go b/coderd/notifications/utils_test.go new file mode 100644 index 0000000000000..12db76f5e48aa --- /dev/null +++ b/coderd/notifications/utils_test.go @@ -0,0 +1,71 @@ +package notifications_test + +import ( + "context" + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/serpent" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/database/dbgen" + "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/testutil" +) + +func setup(t *testing.T) (context.Context, slog.Logger, database.Store) { + t.Helper() + + connectionURL, closeFunc, err := dbtestutil.Open() + require.NoError(t, err) + t.Cleanup(closeFunc) + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) + t.Cleanup(cancel) + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true, IgnoredErrorIs: []error{}}).Leveled(slog.LevelDebug) + + sqlDB, err := sql.Open("postgres", connectionURL) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + + // nolint:gocritic // unit tests. + return dbauthz.AsSystemRestricted(ctx), logger, database.New(sqlDB) +} + +func defaultNotificationsConfig(method database.NotificationMethod) codersdk.NotificationsConfig { + return codersdk.NotificationsConfig{ + Method: serpent.String(method), + MaxSendAttempts: 5, + RetryInterval: serpent.Duration(time.Minute * 5), + StoreSyncInterval: serpent.Duration(time.Second * 2), + StoreSyncBufferSize: 50, + LeasePeriod: serpent.Duration(time.Minute * 2), + LeaseCount: 10, + FetchInterval: serpent.Duration(time.Second * 10), + DispatchTimeout: serpent.Duration(time.Minute), + SMTP: codersdk.NotificationsEmailConfig{}, + Webhook: codersdk.NotificationsWebhookConfig{}, + } +} + +func defaultHelpers() map[string]any { + return map[string]any{ + "base_url": func() string { return "http://test.com" }, + } +} + +func createSampleUser(t *testing.T, db database.Store) database.User { + return dbgen.User(t, db, database.User{ + Email: "bob@coder.com", + Username: "bob", + }) +} diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go index 413ed999aa6a6..79185862daa2e 100644 --- a/coderd/provisionerdserver/provisionerdserver.go +++ b/coderd/provisionerdserver/provisionerdserver.go @@ -25,6 +25,7 @@ import ( protobuf "google.golang.org/protobuf/proto" "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/apikey" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" @@ -32,6 +33,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/externalauth" + "github.com/coder/coder/v2/coderd/notifications" "github.com/coder/coder/v2/coderd/promoauth" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/telemetry" @@ -96,6 +98,7 @@ type server struct { TemplateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore] UserQuietHoursScheduleStore *atomic.Pointer[schedule.UserQuietHoursScheduleStore] DeploymentValues *codersdk.DeploymentValues + NotificationEnqueuer notifications.Enqueuer OIDCConfig promoauth.OAuth2Config @@ -150,6 +153,7 @@ func NewServer( userQuietHoursScheduleStore *atomic.Pointer[schedule.UserQuietHoursScheduleStore], deploymentValues *codersdk.DeploymentValues, options Options, + enqueuer notifications.Enqueuer, ) (proto.DRPCProvisionerDaemonServer, error) { // Fail-fast if pointers are nil if lifecycleCtx == nil { @@ -198,6 +202,7 @@ func NewServer( Database: db, Pubsub: ps, Acquirer: acquirer, + NotificationEnqueuer: enqueuer, Telemetry: tel, Tracer: tracer, QuotaCommitter: quotaCommitter, @@ -1411,6 +1416,11 @@ func (s *server) CompleteJob(ctx context.Context, completed *proto.CompletedJob) // audit the outcome of the workspace build if getWorkspaceError == nil { + // If the workspace has been deleted, notify the owner about it. + if workspaceBuild.Transition == database.WorkspaceTransitionDelete { + s.notifyWorkspaceDeleted(ctx, workspace, workspaceBuild) + } + auditor := s.Auditor.Load() auditAction := auditActionFromTransition(workspaceBuild.Transition) @@ -1511,6 +1521,41 @@ func (s *server) CompleteJob(ctx context.Context, completed *proto.CompletedJob) return &proto.Empty{}, nil } +func (s *server) notifyWorkspaceDeleted(ctx context.Context, workspace database.Workspace, build database.WorkspaceBuild) { + var reason string + if build.Reason.Valid() { + switch build.Reason { + case database.BuildReasonInitiator: + if build.InitiatorID == workspace.OwnerID { + // Deletions initiated by self should not notify. + return + } + + reason = "initiated by user" + case database.BuildReasonAutodelete: + reason = "autodeleted due to dormancy" + default: + reason = string(build.Reason) + } + } else { + reason = string(build.Reason) + s.Logger.Warn(ctx, "invalid build reason when sending deletion notification", + slog.F("reason", reason), slog.F("workspace_id", workspace.ID), slog.F("build_id", build.ID)) + } + + if _, err := s.NotificationEnqueuer.Enqueue(ctx, workspace.OwnerID, notifications.TemplateWorkspaceDeleted, + map[string]string{ + "name": workspace.Name, + "initiatedBy": build.InitiatorByUsername, + "reason": reason, + }, "provisionerdserver", + // Associate this notification with all the related entities. + workspace.ID, workspace.OwnerID, workspace.TemplateID, workspace.OrganizationID, + ); err != nil { + s.Logger.Warn(ctx, "failed to notify of workspace deletion", slog.Error(err)) + } +} + func (s *server) startTrace(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { return s.Tracer.Start(ctx, name, append(opts, trace.WithAttributes( semconv.ServiceNameKey.String("coderd.provisionerd"), diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go index 36f2ac5f601ce..7049359be98a7 100644 --- a/coderd/provisionerdserver/provisionerdserver_test.go +++ b/coderd/provisionerdserver/provisionerdserver_test.go @@ -24,6 +24,8 @@ import ( "golang.org/x/oauth2" "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/serpent" + "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" @@ -32,6 +34,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/externalauth" + "github.com/coder/coder/v2/coderd/notifications" "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/schedule/cron" @@ -41,7 +44,6 @@ import ( "github.com/coder/coder/v2/provisionersdk" sdkproto "github.com/coder/coder/v2/provisionersdk/proto" "github.com/coder/coder/v2/testutil" - "github.com/coder/serpent" ) func testTemplateScheduleStore() *atomic.Pointer[schedule.TemplateScheduleStore] { @@ -1564,6 +1566,137 @@ func TestInsertWorkspaceResource(t *testing.T) { }) } +func TestNotifications(t *testing.T) { + t.Parallel() + + t.Run("Workspace deletion", func(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + deletionReason database.BuildReason + shouldNotify bool + shouldSelfInitiate bool + }{ + { + name: "initiated by autodelete", + deletionReason: database.BuildReasonAutodelete, + shouldNotify: true, + }, + { + name: "initiated by self", + deletionReason: database.BuildReasonInitiator, + shouldNotify: false, + shouldSelfInitiate: true, + }, + { + name: "initiated by someone else", + deletionReason: database.BuildReasonInitiator, + shouldNotify: true, + shouldSelfInitiate: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + notifEnq := &fakeNotificationEnqueuer{} + + srv, db, ps, pd := setup(t, false, &overrides{ + notificationEnqueuer: notifEnq, + }) + + user := dbgen.User(t, db, database.User{}) + initiator := user + if !tc.shouldSelfInitiate { + initiator = dbgen.User(t, db, database.User{}) + } + + template := dbgen.Template(t, db, database.Template{ + Name: "template", + Provisioner: database.ProvisionerTypeEcho, + OrganizationID: pd.OrganizationID, + }) + template, err := db.GetTemplateByID(ctx, template.ID) + require.NoError(t, err) + file := dbgen.File(t, db, database.File{CreatedBy: user.ID}) + workspace := dbgen.Workspace(t, db, database.Workspace{ + TemplateID: template.ID, + OwnerID: user.ID, + OrganizationID: pd.OrganizationID, + }) + version := dbgen.TemplateVersion(t, db, database.TemplateVersion{ + OrganizationID: pd.OrganizationID, + TemplateID: uuid.NullUUID{ + UUID: template.ID, + Valid: true, + }, + JobID: uuid.New(), + }) + build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{ + WorkspaceID: workspace.ID, + TemplateVersionID: version.ID, + InitiatorID: initiator.ID, + Transition: database.WorkspaceTransitionDelete, + Reason: tc.deletionReason, + }) + job := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{ + FileID: file.ID, + Type: database.ProvisionerJobTypeWorkspaceBuild, + Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{ + WorkspaceBuildID: build.ID, + })), + OrganizationID: pd.OrganizationID, + }) + _, err = db.AcquireProvisionerJob(ctx, database.AcquireProvisionerJobParams{ + OrganizationID: pd.OrganizationID, + WorkerID: uuid.NullUUID{ + UUID: pd.ID, + Valid: true, + }, + Types: []database.ProvisionerType{database.ProvisionerTypeEcho}, + }) + require.NoError(t, err) + + _, err = srv.CompleteJob(ctx, &proto.CompletedJob{ + JobId: job.ID.String(), + Type: &proto.CompletedJob_WorkspaceBuild_{ + WorkspaceBuild: &proto.CompletedJob_WorkspaceBuild{ + State: []byte{}, + Resources: []*sdkproto.Resource{{ + Name: "example", + Type: "aws_instance", + }}, + }, + }, + }) + require.NoError(t, err) + + workspace, err = db.GetWorkspaceByID(ctx, workspace.ID) + require.NoError(t, err) + require.True(t, workspace.Deleted) + + if tc.shouldNotify { + // Validate that the notification was sent and contained the expected values. + require.Len(t, notifEnq.sent, 1) + require.Equal(t, notifEnq.sent[0].userID, user.ID) + require.Contains(t, notifEnq.sent[0].targets, template.ID) + require.Contains(t, notifEnq.sent[0].targets, workspace.ID) + require.Contains(t, notifEnq.sent[0].targets, workspace.OrganizationID) + require.Contains(t, notifEnq.sent[0].targets, user.ID) + if tc.deletionReason == database.BuildReasonInitiator { + require.Equal(t, notifEnq.sent[0].labels["initiatedBy"], initiator.Username) + } + } else { + require.Len(t, notifEnq.sent, 0) + } + }) + } + }) +} + type overrides struct { ctx context.Context deploymentValues *codersdk.DeploymentValues @@ -1575,6 +1708,7 @@ type overrides struct { heartbeatFn func(ctx context.Context) error heartbeatInterval time.Duration auditor audit.Auditor + notificationEnqueuer notifications.Enqueuer } func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisionerDaemonServer, database.Store, pubsub.Pubsub, database.ProvisionerDaemon) { @@ -1636,6 +1770,12 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi } auditPtr.Store(&auditor) pollDur = ov.acquireJobLongPollDuration + var notifEnq notifications.Enqueuer + if ov.notificationEnqueuer != nil { + notifEnq = ov.notificationEnqueuer + } else { + notifEnq = notifications.NewNoopEnqueuer() + } daemon, err := db.UpsertProvisionerDaemon(ov.ctx, database.UpsertProvisionerDaemonParams{ Name: "test", @@ -1675,6 +1815,7 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi HeartbeatInterval: ov.heartbeatInterval, HeartbeatFn: ov.heartbeatFn, }, + notifEnq, ) require.NoError(t, err) return srv, db, ps, daemon @@ -1778,3 +1919,31 @@ func (s *fakeStream) cancel() { s.canceled = true s.c.Broadcast() } + +type fakeNotificationEnqueuer struct { + mu sync.Mutex + sent []*notification +} + +type notification struct { + userID, templateID uuid.UUID + labels map[string]string + createdBy string + targets []uuid.UUID +} + +func (f *fakeNotificationEnqueuer) Enqueue(_ context.Context, userID, templateID uuid.UUID, labels map[string]string, createdBy string, targets ...uuid.UUID) (*uuid.UUID, error) { + f.mu.Lock() + defer f.mu.Unlock() + + f.sent = append(f.sent, ¬ification{ + userID: userID, + templateID: templateID, + labels: labels, + createdBy: createdBy, + targets: targets, + }) + + id := uuid.New() + return &id, nil +} diff --git a/coderd/parameter/renderer.go b/coderd/render/markdown.go similarity index 89% rename from coderd/parameter/renderer.go rename to coderd/render/markdown.go index 3767f63cd889c..75e6d8d1c1813 100644 --- a/coderd/parameter/renderer.go +++ b/coderd/render/markdown.go @@ -1,4 +1,4 @@ -package parameter +package render import ( "bytes" @@ -79,9 +79,9 @@ var plaintextStyle = ansi.StyleConfig{ DefinitionDescription: ansi.StylePrimitive{}, } -// Plaintext function converts the description with optional Markdown tags +// PlaintextFromMarkdown function converts the description with optional Markdown tags // to the plaintext form. -func Plaintext(markdown string) (string, error) { +func PlaintextFromMarkdown(markdown string) (string, error) { renderer, err := glamour.NewTermRenderer( glamour.WithStandardStyle("ascii"), glamour.WithWordWrap(0), // don't need to add spaces in the end of line @@ -100,12 +100,11 @@ func Plaintext(markdown string) (string, error) { return strings.TrimSpace(output), nil } -func HTML(markdown string) string { - p := parser.NewWithExtensions(parser.CommonExtensions) +func HTMLFromMarkdown(markdown string) string { + p := parser.NewWithExtensions(parser.CommonExtensions | parser.HardLineBreak) // Added HardLineBreak. doc := p.Parse([]byte(markdown)) renderer := html.NewRenderer(html.RendererOptions{ Flags: html.CommonFlags | html.SkipHTML, - }, - ) + }) return string(bytes.TrimSpace(gomarkdown.Render(doc, renderer))) } diff --git a/coderd/parameter/renderer_test.go b/coderd/render/markdown_test.go similarity index 91% rename from coderd/parameter/renderer_test.go rename to coderd/render/markdown_test.go index f0765a7a6eb14..40f3dae137633 100644 --- a/coderd/parameter/renderer_test.go +++ b/coderd/render/markdown_test.go @@ -1,11 +1,11 @@ -package parameter_test +package render_test import ( "testing" - "github.com/coder/coder/v2/coderd/parameter" - "github.com/stretchr/testify/require" + + "github.com/coder/coder/v2/coderd/render" ) func TestPlaintext(t *testing.T) { @@ -32,7 +32,7 @@ __This is bold text.__ expected := "Provide the machine image\nSee the registry (https://container.registry.blah/namespace) for options.\n\nMinion (https://octodex.github.com/images/minion.png)\n\nThis is bold text.\nThis is bold text.\nThis is italic text.\n\nBlockquotes can also be nested.\nStrikethrough.\n\n1. Lorem ipsum dolor sit amet.\n2. Consectetur adipiscing elit.\n3. Integer molestie lorem at massa.\n\nThere are also code tags!" - stripped, err := parameter.Plaintext(mdDescription) + stripped, err := render.PlaintextFromMarkdown(mdDescription) require.NoError(t, err) require.Equal(t, expected, stripped) }) @@ -42,7 +42,7 @@ __This is bold text.__ nothingChanges := "This is a simple description, so nothing changes." - stripped, err := parameter.Plaintext(nothingChanges) + stripped, err := render.PlaintextFromMarkdown(nothingChanges) require.NoError(t, err) require.Equal(t, nothingChanges, stripped) }) @@ -84,7 +84,7 @@ func TestHTML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - rendered := parameter.HTML(tt.input) + rendered := render.HTMLFromMarkdown(tt.input) require.Equal(t, tt.expected, rendered) }) } diff --git a/coderd/templateversions.go b/coderd/templateversions.go index 1c9131ef0d17c..6eb2b61be0f1d 100644 --- a/coderd/templateversions.go +++ b/coderd/templateversions.go @@ -17,7 +17,6 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" - "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" @@ -26,9 +25,10 @@ import ( "github.com/coder/coder/v2/coderd/externalauth" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/parameter" "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/coderd/render" "github.com/coder/coder/v2/coderd/tracing" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/examples" @@ -1643,7 +1643,7 @@ func convertTemplateVersionParameter(param database.TemplateVersionParameter) (c }) } - descriptionPlaintext, err := parameter.Plaintext(param.Description) + descriptionPlaintext, err := render.PlaintextFromMarkdown(param.Description) if err != nil { return codersdk.TemplateVersionParameter{}, err } diff --git a/coderd/userauth.go b/coderd/userauth.go index c7550b89d05f7..303f8a3473bea 100644 --- a/coderd/userauth.go +++ b/coderd/userauth.go @@ -25,6 +25,7 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/apikey" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" @@ -32,9 +33,9 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/parameter" "github.com/coder/coder/v2/coderd/promoauth" "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/render" "github.com/coder/coder/v2/coderd/userpassword" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/cryptorand" @@ -1353,7 +1354,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C if user.ID == uuid.Nil && !params.AllowSignups { signupsDisabledText := "Please contact your Coder administrator to request access." if api.OIDCConfig != nil && api.OIDCConfig.SignupsDisabledText != "" { - signupsDisabledText = parameter.HTML(api.OIDCConfig.SignupsDisabledText) + signupsDisabledText = render.HTMLFromMarkdown(api.OIDCConfig.SignupsDisabledText) } return httpError{ code: http.StatusForbidden, diff --git a/coderd/workspaces_test.go b/coderd/workspaces_test.go index a657b5ce149dd..cef7f875fde46 100644 --- a/coderd/workspaces_test.go +++ b/coderd/workspaces_test.go @@ -20,6 +20,7 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/agent/agenttest" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/coderdtest" @@ -29,9 +30,9 @@ import ( "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtime" - "github.com/coder/coder/v2/coderd/parameter" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/coderd/render" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/schedule/cron" "github.com/coder/coder/v2/coderd/util/ptr" @@ -2948,9 +2949,9 @@ func TestWorkspaceWithRichParameters(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) defer cancel() - firstParameterDescriptionPlaintext, err := parameter.Plaintext(firstParameterDescription) + firstParameterDescriptionPlaintext, err := render.PlaintextFromMarkdown(firstParameterDescription) require.NoError(t, err) - secondParameterDescriptionPlaintext, err := parameter.Plaintext(secondParameterDescription) + secondParameterDescriptionPlaintext, err := render.PlaintextFromMarkdown(secondParameterDescription) require.NoError(t, err) templateRichParameters, err := client.TemplateVersionRichParameters(ctx, version.ID) diff --git a/codersdk/deployment.go b/codersdk/deployment.go index 7b13d083a4435..56aeb894fb4b7 100644 --- a/codersdk/deployment.go +++ b/codersdk/deployment.go @@ -17,10 +17,11 @@ import ( "github.com/coreos/go-oidc/v3/oidc" + "github.com/coder/serpent" + "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/coderd/agentmetrics" "github.com/coder/coder/v2/coderd/workspaceapps/appurl" - "github.com/coder/serpent" ) // Entitlement represents whether a feature is licensed. @@ -204,6 +205,7 @@ type DeploymentValues struct { Healthcheck HealthcheckConfig `json:"healthcheck,omitempty" typescript:",notnull"` CLIUpgradeMessage serpent.String `json:"cli_upgrade_message,omitempty" typescript:",notnull"` TermsOfServiceURL serpent.String `json:"terms_of_service_url,omitempty" typescript:",notnull"` + Notifications NotificationsConfig `json:"notifications,omitempty" typescript:",notnull"` Config serpent.YAMLConfigPath `json:"config,omitempty" typescript:",notnull"` WriteConfig serpent.Bool `json:"write_config,omitempty" typescript:",notnull"` @@ -455,6 +457,76 @@ type HealthcheckConfig struct { ThresholdDatabase serpent.Duration `json:"threshold_database" typescript:",notnull"` } +type NotificationsConfig struct { + // The upper limit of attempts to send a notification. + MaxSendAttempts serpent.Int64 `json:"max_send_attempts" typescript:",notnull"` + // The minimum time between retries. + RetryInterval serpent.Duration `json:"retry_interval" typescript:",notnull"` + + // The notifications system buffers message updates in memory to ease pressure on the database. + // This option controls how often it synchronizes its state with the database. The shorter this value the + // lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the + // database. It is recommended to keep this option at its default value. + StoreSyncInterval serpent.Duration `json:"sync_interval" typescript:",notnull"` + // The notifications system buffers message updates in memory to ease pressure on the database. + // This option controls how many updates are kept in memory. The lower this value the + // lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the + // database. It is recommended to keep this option at its default value. + StoreSyncBufferSize serpent.Int64 `json:"sync_buffer_size" typescript:",notnull"` + + // How long a notifier should lease a message. This is effectively how long a notification is 'owned' + // by a notifier, and once this period expires it will be available for lease by another notifier. Leasing + // is important in order for multiple running notifiers to not pick the same messages to deliver concurrently. + // This lease period will only expire if a notifier shuts down ungracefully; a dispatch of the notification + // releases the lease. + LeasePeriod serpent.Duration `json:"lease_period"` + // How many notifications a notifier should lease per fetch interval. + LeaseCount serpent.Int64 `json:"lease_count"` + // How often to query the database for queued notifications. + FetchInterval serpent.Duration `json:"fetch_interval"` + + // Which delivery method to use (available options: 'smtp', 'webhook'). + Method serpent.String `json:"method"` + // How long to wait while a notification is being sent before giving up. + DispatchTimeout serpent.Duration `json:"dispatch_timeout"` + // SMTP settings. + SMTP NotificationsEmailConfig `json:"email" typescript:",notnull"` + // Webhook settings. + Webhook NotificationsWebhookConfig `json:"webhook" typescript:",notnull"` +} + +type NotificationsEmailConfig struct { + // The sender's address. + From serpent.String `json:"from" typescript:",notnull"` + // The intermediary SMTP host through which emails are sent (host:port). + Smarthost serpent.HostPort `json:"smarthost" typescript:",notnull"` + // The hostname identifying the SMTP server. + Hello serpent.String `json:"hello" typescript:",notnull"` + + // TODO: Auth and Headers + //// Authentication details. + // Auth struct { + // // Username for CRAM-MD5/LOGIN/PLAIN auth; authentication is disabled if this is left blank. + // Username serpent.String `json:"username" typescript:",notnull"` + // // Password to use for LOGIN/PLAIN auth. + // Password serpent.String `json:"password" typescript:",notnull"` + // // File from which to load the password to use for LOGIN/PLAIN auth. + // PasswordFile serpent.String `json:"password_file" typescript:",notnull"` + // // Secret to use for CRAM-MD5 auth. + // Secret serpent.String `json:"secret" typescript:",notnull"` + // // Identity used for PLAIN auth. + // Identity serpent.String `json:"identity" typescript:",notnull"` + // } `json:"auth" typescript:",notnull"` + // // Additional headers to use in the SMTP request. + // Headers map[string]string `json:"headers" typescript:",notnull"` + // TODO: TLS +} + +type NotificationsWebhookConfig struct { + // The URL to which the payload will be sent with an HTTP POST request. + Endpoint serpent.URL `json:"endpoint" typescript:",notnull"` +} + const ( annotationFormatDuration = "format_duration" annotationEnterpriseKey = "enterprise" @@ -600,6 +672,20 @@ when required by your organization's security policy.`, Name: "Config", Description: `Use a YAML configuration file when your server launch become unwieldy.`, } + deploymentGroupNotifications = serpent.Group{ + Name: "Notifications", + YAML: "notifications", + } + deploymentGroupNotificationsEmail = serpent.Group{ + Name: "Email", + Parent: &deploymentGroupNotifications, + YAML: "email", + } + deploymentGroupNotificationsWebhook = serpent.Group{ + Name: "Webhook", + Parent: &deploymentGroupNotifications, + YAML: "webhook", + } ) httpAddress := serpent.Option{ @@ -2016,6 +2102,156 @@ Write out the current server config as YAML to stdout.`, YAML: "thresholdDatabase", Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), }, + // Notifications Options + { + Name: "Notifications: Method", + Description: "Which delivery method to use (available options: 'smtp', 'webhook').", + Flag: "notifications-method", + Env: "CODER_NOTIFICATIONS_METHOD", + Value: &c.Notifications.Method, + Default: "smtp", + Group: &deploymentGroupNotifications, + YAML: "method", + }, + { + Name: "Notifications: Dispatch Timeout", + Description: "How long to wait while a notification is being sent before giving up.", + Flag: "notifications-dispatch-timeout", + Env: "CODER_NOTIFICATIONS_DISPATCH_TIMEOUT", + Value: &c.Notifications.DispatchTimeout, + Default: time.Minute.String(), + Group: &deploymentGroupNotifications, + YAML: "dispatch-timeout", + Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), + }, + { + Name: "Notifications: Email: From Address", + Description: "The sender's address to use.", + Flag: "notifications-email-from", + Env: "CODER_NOTIFICATIONS_EMAIL_FROM", + Value: &c.Notifications.SMTP.From, + Group: &deploymentGroupNotificationsEmail, + YAML: "from", + }, + { + Name: "Notifications: Email: Smarthost", + Description: "The intermediary SMTP host through which emails are sent.", + Flag: "notifications-email-smarthost", + Env: "CODER_NOTIFICATIONS_EMAIL_SMARTHOST", + Default: "localhost:587", // To pass validation. + Value: &c.Notifications.SMTP.Smarthost, + Group: &deploymentGroupNotificationsEmail, + YAML: "smarthost", + }, + { + Name: "Notifications: Email: Hello", + Description: "The hostname identifying the SMTP server.", + Flag: "notifications-email-hello", + Env: "CODER_NOTIFICATIONS_EMAIL_HELLO", + Default: "localhost", + Value: &c.Notifications.SMTP.Hello, + Group: &deploymentGroupNotificationsEmail, + YAML: "hello", + }, + { + Name: "Notifications: Webhook: Endpoint", + Description: "The endpoint to which to send webhooks.", + Flag: "notifications-webhook-endpoint", + Env: "CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT", + Value: &c.Notifications.Webhook.Endpoint, + Group: &deploymentGroupNotificationsWebhook, + YAML: "hello", + }, + { + Name: "Notifications: Max Send Attempts", + Description: "The upper limit of attempts to send a notification.", + Flag: "notifications-max-send-attempts", + Env: "CODER_NOTIFICATIONS_MAX_SEND_ATTEMPTS", + Value: &c.Notifications.MaxSendAttempts, + Default: "5", + Group: &deploymentGroupNotifications, + YAML: "max-send-attempts", + }, + { + Name: "Notifications: Retry Interval", + Description: "The minimum time between retries.", + Flag: "notifications-retry-interval", + Env: "CODER_NOTIFICATIONS_RETRY_INTERVAL", + Value: &c.Notifications.RetryInterval, + Default: (time.Minute * 5).String(), + Group: &deploymentGroupNotifications, + YAML: "retry-interval", + Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), + Hidden: true, // Hidden because most operators should not need to modify this. + }, + { + Name: "Notifications: Store Sync Interval", + Description: "The notifications system buffers message updates in memory to ease pressure on the database. " + + "This option controls how often it synchronizes its state with the database. The shorter this value the " + + "lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the " + + "database. It is recommended to keep this option at its default value.", + Flag: "notifications-store-sync-interval", + Env: "CODER_NOTIFICATIONS_STORE_SYNC_INTERVAL", + Value: &c.Notifications.StoreSyncInterval, + Default: (time.Second * 2).String(), + Group: &deploymentGroupNotifications, + YAML: "store-sync-interval", + Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), + Hidden: true, // Hidden because most operators should not need to modify this. + }, + { + Name: "Notifications: Store Sync Buffer Size", + Description: "The notifications system buffers message updates in memory to ease pressure on the database. " + + "This option controls how many updates are kept in memory. The lower this value the " + + "lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the " + + "database. It is recommended to keep this option at its default value.", + Flag: "notifications-store-sync-buffer-size", + Env: "CODER_NOTIFICATIONS_STORE_SYNC_BUFFER_SIZE", + Value: &c.Notifications.StoreSyncBufferSize, + Default: "50", + Group: &deploymentGroupNotifications, + YAML: "store-sync-buffer-size", + Hidden: true, // Hidden because most operators should not need to modify this. + }, + { + Name: "Notifications: Lease Period", + Description: "How long a notifier should lease a message. This is effectively how long a notification is 'owned' " + + "by a notifier, and once this period expires it will be available for lease by another notifier. Leasing " + + "is important in order for multiple running notifiers to not pick the same messages to deliver concurrently. " + + "This lease period will only expire if a notifier shuts down ungracefully; a dispatch of the notification " + + "releases the lease.", + Flag: "notifications-lease-period", + Env: "CODER_NOTIFICATIONS_LEASE_PERIOD", + Value: &c.Notifications.LeasePeriod, + Default: (time.Minute * 2).String(), + Group: &deploymentGroupNotifications, + YAML: "lease-period", + Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), + Hidden: true, // Hidden because most operators should not need to modify this. + }, + { + Name: "Notifications: Lease Count", + Description: "How many notifications a notifier should lease per fetch interval.", + Flag: "notifications-lease-count", + Env: "CODER_NOTIFICATIONS_LEASE_COUNT", + Value: &c.Notifications.LeaseCount, + Default: "20", + Group: &deploymentGroupNotifications, + YAML: "lease-count", + Hidden: true, // Hidden because most operators should not need to modify this. + }, + { + Name: "Notifications: Fetch Interval", + Description: "How often to query the database for queued notifications.", + Flag: "notifications-fetch-interval", + Env: "CODER_NOTIFICATIONS_FETCH_INTERVAL", + Value: &c.Notifications.FetchInterval, + Default: (time.Second * 15).String(), + Group: &deploymentGroupNotifications, + YAML: "fetch-interval", + Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"), + Hidden: true, // Hidden because most operators should not need to modify this. + }, } return opts @@ -2233,15 +2469,16 @@ const ( ExperimentExample Experiment = "example" // This isn't used for anything. ExperimentAutoFillParameters Experiment = "auto-fill-parameters" // This should not be taken out of experiments until we have redesigned the feature. ExperimentMultiOrganization Experiment = "multi-organization" // Requires organization context for interactions, default org is assumed. - ExperimentCustomRoles Experiment = "custom-roles" // Allows creating runtime custom roles - ExperimentWorkspaceUsage Experiment = "workspace-usage" // Enables the new workspace usage tracking + ExperimentCustomRoles Experiment = "custom-roles" // Allows creating runtime custom roles. + ExperimentNotifications Experiment = "notifications" // Sends notifications via SMTP and webhooks following certain events. + ExperimentWorkspaceUsage Experiment = "workspace-usage" // Enables the new workspace usage tracking. ) // ExperimentsAll should include all experiments that are safe for // users to opt-in to via --experimental='*'. // Experiments that are not ready for consumption by all users should // not be included here and will be essentially hidden. -var ExperimentsAll = Experiments{} +var ExperimentsAll = Experiments{ExperimentNotifications} // Experiments is a list of experiments. // Multiple experiments may be enabled at the same time. diff --git a/docs/api/general.md b/docs/api/general.md index 620e3b238d7b3..8bd968c6b18ed 100644 --- a/docs/api/general.md +++ b/docs/api/general.md @@ -253,6 +253,40 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "stackdriver": "string" }, "metrics_cache_refresh_interval": 0, + "notifications": { + "dispatch_timeout": 0, + "email": { + "from": "string", + "hello": "string", + "smarthost": { + "host": "string", + "port": "string" + } + }, + "fetch_interval": 0, + "lease_count": 0, + "lease_period": 0, + "max_send_attempts": 0, + "method": "string", + "retry_interval": 0, + "sync_buffer_size": 0, + "sync_interval": 0, + "webhook": { + "endpoint": { + "forceQuery": true, + "fragment": "string", + "host": "string", + "omitHost": true, + "opaque": "string", + "path": "string", + "rawFragment": "string", + "rawPath": "string", + "rawQuery": "string", + "scheme": "string", + "user": {} + } + } + }, "oauth2": { "github": { "allow_everyone": true, diff --git a/docs/api/schemas.md b/docs/api/schemas.md index e7611c2b03253..5e2eaf7b74784 100644 --- a/docs/api/schemas.md +++ b/docs/api/schemas.md @@ -1679,6 +1679,40 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "stackdriver": "string" }, "metrics_cache_refresh_interval": 0, + "notifications": { + "dispatch_timeout": 0, + "email": { + "from": "string", + "hello": "string", + "smarthost": { + "host": "string", + "port": "string" + } + }, + "fetch_interval": 0, + "lease_count": 0, + "lease_period": 0, + "max_send_attempts": 0, + "method": "string", + "retry_interval": 0, + "sync_buffer_size": 0, + "sync_interval": 0, + "webhook": { + "endpoint": { + "forceQuery": true, + "fragment": "string", + "host": "string", + "omitHost": true, + "opaque": "string", + "path": "string", + "rawFragment": "string", + "rawPath": "string", + "rawQuery": "string", + "scheme": "string", + "user": {} + } + } + }, "oauth2": { "github": { "allow_everyone": true, @@ -2052,6 +2086,40 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "stackdriver": "string" }, "metrics_cache_refresh_interval": 0, + "notifications": { + "dispatch_timeout": 0, + "email": { + "from": "string", + "hello": "string", + "smarthost": { + "host": "string", + "port": "string" + } + }, + "fetch_interval": 0, + "lease_count": 0, + "lease_period": 0, + "max_send_attempts": 0, + "method": "string", + "retry_interval": 0, + "sync_buffer_size": 0, + "sync_interval": 0, + "webhook": { + "endpoint": { + "forceQuery": true, + "fragment": "string", + "host": "string", + "omitHost": true, + "opaque": "string", + "path": "string", + "rawFragment": "string", + "rawPath": "string", + "rawQuery": "string", + "scheme": "string", + "user": {} + } + } + }, "oauth2": { "github": { "allow_everyone": true, @@ -2246,6 +2314,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `job_hang_detector_interval` | integer | false | | | | `logging` | [codersdk.LoggingConfig](#codersdkloggingconfig) | false | | | | `metrics_cache_refresh_interval` | integer | false | | | +| `notifications` | [codersdk.NotificationsConfig](#codersdknotificationsconfig) | false | | | | `oauth2` | [codersdk.OAuth2Config](#codersdkoauth2config) | false | | | | `oidc` | [codersdk.OIDCConfig](#codersdkoidcconfig) | false | | | | `pg_auth` | string | false | | | @@ -2368,6 +2437,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `auto-fill-parameters` | | `multi-organization` | | `custom-roles` | +| `notifications` | | `workspace-usage` | ## codersdk.ExternalAuth @@ -2976,6 +3046,108 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `id` | string | true | | | | `username` | string | true | | | +## codersdk.NotificationsConfig + +```json +{ + "dispatch_timeout": 0, + "email": { + "from": "string", + "hello": "string", + "smarthost": { + "host": "string", + "port": "string" + } + }, + "fetch_interval": 0, + "lease_count": 0, + "lease_period": 0, + "max_send_attempts": 0, + "method": "string", + "retry_interval": 0, + "sync_buffer_size": 0, + "sync_interval": 0, + "webhook": { + "endpoint": { + "forceQuery": true, + "fragment": "string", + "host": "string", + "omitHost": true, + "opaque": "string", + "path": "string", + "rawFragment": "string", + "rawPath": "string", + "rawQuery": "string", + "scheme": "string", + "user": {} + } + } +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ------------------- | -------------------------------------------------------------------------- | -------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `dispatch_timeout` | integer | false | | How long to wait while a notification is being sent before giving up. | +| `email` | [codersdk.NotificationsEmailConfig](#codersdknotificationsemailconfig) | false | | Email settings. | +| `fetch_interval` | integer | false | | How often to query the database for queued notifications. | +| `lease_count` | integer | false | | How many notifications a notifier should lease per fetch interval. | +| `lease_period` | integer | false | | How long a notifier should lease a message. This is effectively how long a notification is 'owned' by a notifier, and once this period expires it will be available for lease by another notifier. Leasing is important in order for multiple running notifiers to not pick the same messages to deliver concurrently. This lease period will only expire if a notifier shuts down ungracefully; a dispatch of the notification releases the lease. | +| `max_send_attempts` | integer | false | | The upper limit of attempts to send a notification. | +| `method` | string | false | | Which delivery method to use (available options: 'smtp', 'webhook'). | +| `retry_interval` | integer | false | | The minimum time between retries. | +| `sync_buffer_size` | integer | false | | The notifications system buffers message updates in memory to ease pressure on the database. This option controls how many updates are kept in memory. The lower this value the lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the database. It is recommended to keep this option at its default value. | +| `sync_interval` | integer | false | | The notifications system buffers message updates in memory to ease pressure on the database. This option controls how often it synchronizes its state with the database. The shorter this value the lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the database. It is recommended to keep this option at its default value. | +| `webhook` | [codersdk.NotificationsWebhookConfig](#codersdknotificationswebhookconfig) | false | | Webhook settings. | + +## codersdk.NotificationsEmailConfig + +```json +{ + "from": "string", + "hello": "string", + "smarthost": { + "host": "string", + "port": "string" + } +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ----------- | ------------------------------------ | -------- | ------------ | --------------------------------------------------------------------- | +| `from` | string | false | | The sender's address. | +| `hello` | string | false | | The hostname identifying the SMTP server. | +| `smarthost` | [serpent.HostPort](#serpenthostport) | false | | The intermediary SMTP host through which emails are sent (host:port). | + +## codersdk.NotificationsWebhookConfig + +```json +{ + "endpoint": { + "forceQuery": true, + "fragment": "string", + "host": "string", + "omitHost": true, + "opaque": "string", + "path": "string", + "rawFragment": "string", + "rawPath": "string", + "rawQuery": "string", + "scheme": "string", + "user": {} + } +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ---------- | -------------------------- | -------- | ------------ | -------------------------------------------------------------------- | +| `endpoint` | [serpent.URL](#serpenturl) | false | | The URL to which the payload will be sent with an HTTP POST request. | + ## codersdk.OAuth2AppEndpoints ```json diff --git a/docs/cli/server.md b/docs/cli/server.md index ea3672a1cb2d7..b3e8da3213b3d 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -1194,3 +1194,78 @@ Refresh interval for healthchecks. | Default | 15ms | The threshold for the database health check. If the median latency of the database exceeds this threshold over 5 attempts, the database is considered unhealthy. The default value is 15ms. + +### --notifications-method + +| | | +| ----------- | ---------------------------------------- | +| Type | string | +| Environment | $CODER_NOTIFICATIONS_METHOD | +| YAML | notifications.method | +| Default | smtp | + +Which delivery method to use (available options: 'smtp', 'webhook'). + +### --notifications-dispatch-timeout + +| | | +| ----------- | -------------------------------------------------- | +| Type | duration | +| Environment | $CODER_NOTIFICATIONS_DISPATCH_TIMEOUT | +| YAML | notifications.dispatch-timeout | +| Default | 1m0s | + +How long to wait while a notification is being sent before giving up. + +### --notifications-email-from + +| | | +| ----------- | -------------------------------------------- | +| Type | string | +| Environment | $CODER_NOTIFICATIONS_EMAIL_FROM | +| YAML | notifications.email.from | + +The sender's address to use. + +### --notifications-email-smarthost + +| | | +| ----------- | ------------------------------------------------- | +| Type | host:port | +| Environment | $CODER_NOTIFICATIONS_EMAIL_SMARTHOST | +| YAML | notifications.email.smarthost | +| Default | localhost:587 | + +The intermediary SMTP host through which emails are sent. + +### --notifications-email-hello + +| | | +| ----------- | --------------------------------------------- | +| Type | string | +| Environment | $CODER_NOTIFICATIONS_EMAIL_HELLO | +| YAML | notifications.email.hello | +| Default | localhost | + +The hostname identifying the SMTP server. + +### --notifications-webhook-endpoint + +| | | +| ----------- | -------------------------------------------------- | +| Type | url | +| Environment | $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT | +| YAML | notifications.webhook.hello | + +The endpoint to which to send webhooks. + +### --notifications-max-send-attempts + +| | | +| ----------- | --------------------------------------------------- | +| Type | int | +| Environment | $CODER_NOTIFICATIONS_MAX_SEND_ATTEMPTS | +| YAML | notifications.max-send-attempts | +| Default | 5 | + +The upper limit of attempts to send a notification. diff --git a/enterprise/cli/testdata/coder_server_--help.golden b/enterprise/cli/testdata/coder_server_--help.golden index 2c094e84913f0..8bde8a9d3fc94 100644 --- a/enterprise/cli/testdata/coder_server_--help.golden +++ b/enterprise/cli/testdata/coder_server_--help.golden @@ -327,6 +327,30 @@ can safely ignore these settings. Minimum supported version of TLS. Accepted values are "tls10", "tls11", "tls12" or "tls13". +NOTIFICATIONS OPTIONS: + --notifications-dispatch-timeout duration, $CODER_NOTIFICATIONS_DISPATCH_TIMEOUT (default: 1m0s) + How long to wait while a notification is being sent before giving up. + + --notifications-max-send-attempts int, $CODER_NOTIFICATIONS_MAX_SEND_ATTEMPTS (default: 5) + The upper limit of attempts to send a notification. + + --notifications-method string, $CODER_NOTIFICATIONS_METHOD (default: smtp) + Which delivery method to use (available options: 'smtp', 'webhook'). + +NOTIFICATIONS / EMAIL OPTIONS: + --notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM + The sender's address to use. + + --notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost) + The hostname identifying the SMTP server. + + --notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587) + The intermediary SMTP host through which emails are sent. + +NOTIFICATIONS / WEBHOOK OPTIONS: + --notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT + The endpoint to which to send webhooks. + OAUTH2 / GITHUB OPTIONS: --oauth2-github-allow-everyone bool, $CODER_OAUTH2_GITHUB_ALLOW_EVERYONE Allow all logins, setting this option means allowed orgs and teams diff --git a/enterprise/coderd/provisionerdaemons.go b/enterprise/coderd/provisionerdaemons.go index 827ecfffe46a6..64b3933b44014 100644 --- a/enterprise/coderd/provisionerdaemons.go +++ b/enterprise/coderd/provisionerdaemons.go @@ -27,6 +27,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/notifications" "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" @@ -336,6 +337,7 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request) ExternalAuthConfigs: api.ExternalAuthConfigs, OIDCConfig: api.OIDCConfig, }, + notifications.NewNoopEnqueuer(), ) if err != nil { if !xerrors.Is(err, context.Canceled) { diff --git a/flake.nix b/flake.nix index 930294b71a8b4..6e1aa4a5ffe51 100644 --- a/flake.nix +++ b/flake.nix @@ -97,7 +97,7 @@ name = "coder-${osArch}"; # Updated with ./scripts/update-flake.sh`. # This should be updated whenever go.mod changes! - vendorHash = "sha256-xHrnqSq2Ya04d9Y48tbkQTNo9bYnp7LqcUnXXRbMFXE="; + vendorHash = "sha256-HXDei93ALEImIMgX3Ez829jmJJsf46GwaqPDlleQFmk="; proxyVendor = true; src = ./.; nativeBuildInputs = with pkgs; [ getopt openssl zstd ]; diff --git a/go.mod b/go.mod index eb4350d9e7649..69cbfc9ecdec3 100644 --- a/go.mod +++ b/go.mod @@ -199,6 +199,7 @@ require ( github.com/coder/serpent v0.7.0 github.com/gomarkdown/markdown v0.0.0-20231222211730-1d6d20845b47 github.com/google/go-github/v61 v61.0.0 + github.com/mocktools/go-smtp-mock/v2 v2.3.0 ) require ( diff --git a/go.sum b/go.sum index 163270b486af4..dfc4e1794ad64 100644 --- a/go.sum +++ b/go.sum @@ -706,6 +706,8 @@ github.com/moby/moby v26.1.0+incompatible h1:mjepCwMH0KpCgPvrXjqqyCeTCHgzO7p9TwZ github.com/moby/moby v26.1.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/mocktools/go-smtp-mock/v2 v2.3.0 h1:jgTDBEoQ8Kpw/fPWxy6qR2pGwtNn5j01T3Wut4xJo5Y= +github.com/mocktools/go-smtp-mock/v2 v2.3.0/go.mod h1:n8aNpDYncZHH/cZHtJKzQyeYT/Dut00RghVM+J1Ed94= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts index c10b8d17fac62..ad142b41392d0 100644 --- a/site/src/api/typesGenerated.ts +++ b/site/src/api/typesGenerated.ts @@ -464,6 +464,7 @@ export interface DeploymentValues { readonly healthcheck?: HealthcheckConfig; readonly cli_upgrade_message?: string; readonly terms_of_service_url?: string; + readonly notifications?: NotificationsConfig; readonly config?: string; readonly write_config?: boolean; readonly address?: string; @@ -686,6 +687,33 @@ export interface MinimalUser { readonly avatar_url: string; } +// From codersdk/deployment.go +export interface NotificationsConfig { + readonly max_send_attempts: number; + readonly retry_interval: number; + readonly sync_interval: number; + readonly sync_buffer_size: number; + readonly lease_period: number; + readonly lease_count: number; + readonly fetch_interval: number; + readonly method: string; + readonly dispatch_timeout: number; + readonly email: NotificationsEmailConfig; + readonly webhook: NotificationsWebhookConfig; +} + +// From codersdk/deployment.go +export interface NotificationsEmailConfig { + readonly from: string; + readonly smarthost: string; + readonly hello: string; +} + +// From codersdk/deployment.go +export interface NotificationsWebhookConfig { + readonly endpoint: string; +} + // From codersdk/oauth2.go export interface OAuth2AppEndpoints { readonly authorization: string; @@ -1968,12 +1996,14 @@ export type Experiment = | "custom-roles" | "example" | "multi-organization" + | "notifications" | "workspace-usage"; export const Experiments: Experiment[] = [ "auto-fill-parameters", "custom-roles", "example", "multi-organization", + "notifications", "workspace-usage", ];