From befb4dc2e88bb6711db1cab799e84125701c8675 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 9 Aug 2023 15:08:05 +0000 Subject: [PATCH 01/25] feat(coderd): track `coder_app` usage Updates #8658 --- coderd/apidoc/docs.go | 79 +++++++ coderd/apidoc/swagger.json | 73 +++++++ coderd/coderd.go | 8 +- coderd/database/dbauthz/dbauthz.go | 5 + coderd/database/dbfake/dbfake.go | 9 + coderd/database/dbmetrics/dbmetrics.go | 7 + coderd/database/dbmock/dbmock.go | 14 ++ coderd/database/dump.sql | 48 +++++ .../000148_workspace_app_stats.down.sql | 1 + .../000148_workspace_app_stats.up.sql | 29 +++ coderd/database/models.go | 22 ++ coderd/database/querier.go | 1 + coderd/database/queries.sql.go | 53 +++++ coderd/database/queries/workspaceappstats.sql | 24 +++ coderd/database/unique_constraint.go | 1 + coderd/workspaceapps/proxy.go | 17 +- coderd/workspaceapps/stats.go | 196 ++++++++++++++++++ docs/api/schemas.md | 53 +++++ enterprise/coderd/coderd.go | 1 + enterprise/coderd/workspaceproxy.go | 29 +++ enterprise/wsproxy/appstatsreporter.go | 21 ++ enterprise/wsproxy/wsproxy.go | 8 +- enterprise/wsproxy/wsproxysdk/wsproxysdk.go | 19 ++ 23 files changed, 715 insertions(+), 3 deletions(-) create mode 100644 coderd/database/migrations/000148_workspace_app_stats.down.sql create mode 100644 coderd/database/migrations/000148_workspace_app_stats.up.sql create mode 100644 coderd/database/queries/workspaceappstats.sql create mode 100644 coderd/workspaceapps/stats.go create mode 100644 enterprise/wsproxy/appstatsreporter.go diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 61a6989ba27fd..0b3e86fb9a666 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -5479,6 +5479,45 @@ const docTemplate = `{ } } }, + "/workspaceproxies/me/app-stats": { + "post": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Enterprise" + ], + "summary": "Report workspace app stats", + "operationId": "report-workspace-app-stats", + "parameters": [ + { + "description": "Report app stats request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wsproxysdk.ReportAppStatsRequest" + } + } + ], + "responses": { + "204": { + "description": "No Content" + } + }, + "x-apidocgen": { + "skip": true + } + } + }, "/workspaceproxies/me/coordinate": { "get": { "security": [ @@ -11679,6 +11718,35 @@ const docTemplate = `{ } } }, + "workspaceapps.StatsReport": { + "type": "object", + "properties": { + "access_method": { + "$ref": "#/definitions/workspaceapps.AccessMethod" + }, + "agent_id": { + "type": "string" + }, + "session_end_time": { + "type": "string" + }, + "session_id": { + "type": "string" + }, + "session_start_time": { + "type": "string" + }, + "slug": { + "type": "string" + }, + "user_id": { + "type": "string" + }, + "workspace_id": { + "type": "string" + } + } + }, "wsproxysdk.AgentIsLegacyResponse": { "type": "object", "properties": { @@ -11769,6 +11837,17 @@ const docTemplate = `{ } } } + }, + "wsproxysdk.ReportAppStatsRequest": { + "type": "object", + "properties": { + "stats": { + "type": "array", + "items": { + "$ref": "#/definitions/workspaceapps.StatsReport" + } + } + } } }, "securityDefinitions": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index 982ed816264c1..92d911ac96d9d 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -4831,6 +4831,39 @@ } } }, + "/workspaceproxies/me/app-stats": { + "post": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "consumes": ["application/json"], + "produces": ["application/json"], + "tags": ["Enterprise"], + "summary": "Report workspace app stats", + "operationId": "report-workspace-app-stats", + "parameters": [ + { + "description": "Report app stats request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wsproxysdk.ReportAppStatsRequest" + } + } + ], + "responses": { + "204": { + "description": "No Content" + } + }, + "x-apidocgen": { + "skip": true + } + } + }, "/workspaceproxies/me/coordinate": { "get": { "security": [ @@ -10659,6 +10692,35 @@ } } }, + "workspaceapps.StatsReport": { + "type": "object", + "properties": { + "access_method": { + "$ref": "#/definitions/workspaceapps.AccessMethod" + }, + "agent_id": { + "type": "string" + }, + "session_end_time": { + "type": "string" + }, + "session_id": { + "type": "string" + }, + "session_start_time": { + "type": "string" + }, + "slug": { + "type": "string" + }, + "user_id": { + "type": "string" + }, + "workspace_id": { + "type": "string" + } + } + }, "wsproxysdk.AgentIsLegacyResponse": { "type": "object", "properties": { @@ -10749,6 +10811,17 @@ } } } + }, + "wsproxysdk.ReportAppStatsRequest": { + "type": "object", + "properties": { + "stats": { + "type": "array", + "items": { + "$ref": "#/definitions/workspaceapps.StatsReport" + } + } + } } }, "securityDefinitions": { diff --git a/coderd/coderd.go b/coderd/coderd.go index 58b6c902c7dbc..29e350cd60985 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -417,8 +417,9 @@ func New(options *Options) *API { } } + workspaceAppsLogger := options.Logger.Named("workspaceapps") api.workspaceAppServer = &workspaceapps.Server{ - Logger: options.Logger.Named("workspaceapps"), + Logger: workspaceAppsLogger, DashboardURL: api.AccessURL, AccessURL: api.AccessURL, @@ -429,6 +430,11 @@ func New(options *Options) *API { SignedTokenProvider: api.WorkspaceAppsProvider, AgentProvider: api.agentProvider, AppSecurityKey: options.AppSecurityKey, + StatsCollector: workspaceapps.NewStatsCollector( + workspaceAppsLogger.Named("stats_collector"), + workspaceapps.NewStatsDBReporter(options.Database), + workspaceapps.DefaultStatsCollectorReportInterval, + ), DisablePathApps: options.DeploymentValues.DisablePathApps.Value(), SecureAuthCookie: options.DeploymentValues.SecureAuthCookie.Value(), diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 488842dcaf351..28ed877a4d3e0 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -2031,6 +2031,11 @@ func (q *querier) InsertWorkspaceApp(ctx context.Context, arg database.InsertWor return q.db.InsertWorkspaceApp(ctx, arg) } +func (q *querier) InsertWorkspaceAppStats(ctx context.Context, arg database.InsertWorkspaceAppStatsParams) error { + // TODO(mafredri): Add auth. + return q.db.InsertWorkspaceAppStats(ctx, arg) +} + func (q *querier) InsertWorkspaceBuild(ctx context.Context, arg database.InsertWorkspaceBuildParams) error { w, err := q.db.GetWorkspaceByID(ctx, arg.WorkspaceID) if err != nil { diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index c968a52f8d00d..aac77175cf8be 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -4260,6 +4260,15 @@ func (q *FakeQuerier) InsertWorkspaceApp(_ context.Context, arg database.InsertW return workspaceApp, nil } +func (q *FakeQuerier) InsertWorkspaceAppStats(ctx context.Context, arg database.InsertWorkspaceAppStatsParams) error { + err := validateDatabaseType(arg) + if err != nil { + return err + } + + panic("not implemented") +} + func (q *FakeQuerier) InsertWorkspaceBuild(_ context.Context, arg database.InsertWorkspaceBuildParams) error { if err := validateDatabaseType(arg); err != nil { return err diff --git a/coderd/database/dbmetrics/dbmetrics.go b/coderd/database/dbmetrics/dbmetrics.go index 85ee8c26a0d51..eb648bab215b9 100644 --- a/coderd/database/dbmetrics/dbmetrics.go +++ b/coderd/database/dbmetrics/dbmetrics.go @@ -1250,6 +1250,13 @@ func (m metricsStore) InsertWorkspaceApp(ctx context.Context, arg database.Inser return app, err } +func (m metricsStore) InsertWorkspaceAppStats(ctx context.Context, arg database.InsertWorkspaceAppStatsParams) error { + start := time.Now() + r0 := m.s.InsertWorkspaceAppStats(ctx, arg) + m.queryLatencies.WithLabelValues("InsertWorkspaceAppStats").Observe(time.Since(start).Seconds()) + return r0 +} + func (m metricsStore) InsertWorkspaceBuild(ctx context.Context, arg database.InsertWorkspaceBuildParams) error { start := time.Now() err := m.s.InsertWorkspaceBuild(ctx, arg) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index cb7278369884b..e7e83d5faa035 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -2627,6 +2627,20 @@ func (mr *MockStoreMockRecorder) InsertWorkspaceApp(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceApp", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceApp), arg0, arg1) } +// InsertWorkspaceAppStats mocks base method. +func (m *MockStore) InsertWorkspaceAppStats(arg0 context.Context, arg1 database.InsertWorkspaceAppStatsParams) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InsertWorkspaceAppStats", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// InsertWorkspaceAppStats indicates an expected call of InsertWorkspaceAppStats. +func (mr *MockStoreMockRecorder) InsertWorkspaceAppStats(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWorkspaceAppStats", reflect.TypeOf((*MockStore)(nil).InsertWorkspaceAppStats), arg0, arg1) +} + // InsertWorkspaceBuild mocks base method. func (m *MockStore) InsertWorkspaceBuild(arg0 context.Context, arg1 database.InsertWorkspaceBuildParams) error { m.ctrl.T.Helper() diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index f121fccf8cebb..22f325920c37f 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -802,6 +802,38 @@ COMMENT ON COLUMN workspace_agents.started_at IS 'The time the agent entered the COMMENT ON COLUMN workspace_agents.ready_at IS 'The time the agent entered the ready or start_error lifecycle state'; +CREATE TABLE workspace_app_stats ( + id uuid NOT NULL, + user_id uuid NOT NULL, + workspace_id uuid NOT NULL, + agent_id uuid NOT NULL, + access_method text NOT NULL, + slug_or_port text NOT NULL, + session_id uuid NOT NULL, + session_started_at timestamp with time zone NOT NULL, + session_ended_at timestamp with time zone +); + +COMMENT ON TABLE workspace_app_stats IS 'A record of workspace app usage statistics'; + +COMMENT ON COLUMN workspace_app_stats.id IS 'The unique identifier for the workspace app stat record'; + +COMMENT ON COLUMN workspace_app_stats.user_id IS 'The user who used the workspace app'; + +COMMENT ON COLUMN workspace_app_stats.workspace_id IS 'The workspace that the workspace app was used in'; + +COMMENT ON COLUMN workspace_app_stats.agent_id IS 'The workspace agent that was used'; + +COMMENT ON COLUMN workspace_app_stats.access_method IS 'The method used to access the workspace app'; + +COMMENT ON COLUMN workspace_app_stats.slug_or_port IS 'The slug or port used to to identify the app'; + +COMMENT ON COLUMN workspace_app_stats.session_id IS 'The unique identifier for the session'; + +COMMENT ON COLUMN workspace_app_stats.session_started_at IS 'The time the session started'; + +COMMENT ON COLUMN workspace_app_stats.session_ended_at IS 'The time the session ended'; + CREATE TABLE workspace_apps ( id uuid NOT NULL, created_at timestamp with time zone NOT NULL, @@ -1071,6 +1103,9 @@ ALTER TABLE ONLY workspace_agent_logs ALTER TABLE ONLY workspace_agents ADD CONSTRAINT workspace_agents_pkey PRIMARY KEY (id); +ALTER TABLE ONLY workspace_app_stats + ADD CONSTRAINT workspace_app_stats_pkey PRIMARY KEY (id); + ALTER TABLE ONLY workspace_apps ADD CONSTRAINT workspace_apps_agent_id_slug_idx UNIQUE (agent_id, slug); @@ -1157,6 +1192,10 @@ CREATE INDEX workspace_agents_auth_token_idx ON workspace_agents USING btree (au CREATE INDEX workspace_agents_resource_id_idx ON workspace_agents USING btree (resource_id); +CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats USING btree (agent_id, session_id); + +CREATE INDEX workspace_app_stats_workspace_id_idx ON workspace_app_stats USING btree (workspace_id); + CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false); CREATE INDEX workspace_resources_job_id_idx ON workspace_resources USING btree (job_id); @@ -1242,6 +1281,15 @@ ALTER TABLE ONLY workspace_agent_logs ALTER TABLE ONLY workspace_agents ADD CONSTRAINT workspace_agents_resource_id_fkey FOREIGN KEY (resource_id) REFERENCES workspace_resources(id) ON DELETE CASCADE; +ALTER TABLE ONLY workspace_app_stats + ADD CONSTRAINT workspace_app_stats_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id); + +ALTER TABLE ONLY workspace_app_stats + ADD CONSTRAINT workspace_app_stats_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id); + +ALTER TABLE ONLY workspace_app_stats + ADD CONSTRAINT workspace_app_stats_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id); + ALTER TABLE ONLY workspace_apps ADD CONSTRAINT workspace_apps_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE; diff --git a/coderd/database/migrations/000148_workspace_app_stats.down.sql b/coderd/database/migrations/000148_workspace_app_stats.down.sql new file mode 100644 index 0000000000000..983a13c180edc --- /dev/null +++ b/coderd/database/migrations/000148_workspace_app_stats.down.sql @@ -0,0 +1 @@ +DROP TABLE workspace_app_stats; diff --git a/coderd/database/migrations/000148_workspace_app_stats.up.sql b/coderd/database/migrations/000148_workspace_app_stats.up.sql new file mode 100644 index 0000000000000..dbd5e920bb560 --- /dev/null +++ b/coderd/database/migrations/000148_workspace_app_stats.up.sql @@ -0,0 +1,29 @@ +CREATE TABLE workspace_app_stats ( + id uuid PRIMARY KEY, + user_id uuid NOT NULL REFERENCES users (id), + workspace_id uuid NOT NULL REFERENCES workspaces (id), + agent_id uuid NOT NULL REFERENCES workspace_agents (id), + access_method text NOT NULL, + slug_or_port text NOT NULL, + session_id uuid NOT NULL, + session_started_at timestamp with time zone NOT NULL, + session_ended_at timestamp with time zone +); + +COMMENT ON TABLE workspace_app_stats IS 'A record of workspace app usage statistics'; + +COMMENT ON COLUMN workspace_app_stats.id IS 'The unique identifier for the workspace app stat record'; +COMMENT ON COLUMN workspace_app_stats.user_id IS 'The user who used the workspace app'; +COMMENT ON COLUMN workspace_app_stats.workspace_id IS 'The workspace that the workspace app was used in'; +COMMENT ON COLUMN workspace_app_stats.agent_id IS 'The workspace agent that was used'; +COMMENT ON COLUMN workspace_app_stats.access_method IS 'The method used to access the workspace app'; +COMMENT ON COLUMN workspace_app_stats.slug_or_port IS 'The slug or port used to to identify the app'; +COMMENT ON COLUMN workspace_app_stats.session_id IS 'The unique identifier for the session'; +COMMENT ON COLUMN workspace_app_stats.session_started_at IS 'The time the session started'; +COMMENT ON COLUMN workspace_app_stats.session_ended_at IS 'The time the session ended'; + +-- Create a unique index to prevent duplicate records (scoped to agent to ensure no collisions). +CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats (agent_id, session_id); + +-- Create index on workspace_id for joining/filtering by templates. +CREATE INDEX workspace_app_stats_workspace_id_idx ON workspace_app_stats (workspace_id); diff --git a/coderd/database/models.go b/coderd/database/models.go index 4e34989b09ae9..b2d052df2215c 100644 --- a/coderd/database/models.go +++ b/coderd/database/models.go @@ -1953,6 +1953,28 @@ type WorkspaceApp struct { External bool `db:"external" json:"external"` } +// A record of workspace app usage statistics +type WorkspaceAppStat struct { + // The unique identifier for the workspace app stat record + ID uuid.UUID `db:"id" json:"id"` + // The user who used the workspace app + UserID uuid.UUID `db:"user_id" json:"user_id"` + // The workspace that the workspace app was used in + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + // The workspace agent that was used + AgentID uuid.UUID `db:"agent_id" json:"agent_id"` + // The method used to access the workspace app + AccessMethod string `db:"access_method" json:"access_method"` + // The slug or port used to to identify the app + SlugOrPort string `db:"slug_or_port" json:"slug_or_port"` + // The unique identifier for the session + SessionID uuid.UUID `db:"session_id" json:"session_id"` + // The time the session started + SessionStartedAt time.Time `db:"session_started_at" json:"session_started_at"` + // The time the session ended + SessionEndedAt sql.NullTime `db:"session_ended_at" json:"session_ended_at"` +} + // Joins in the username + avatar url of the initiated by user. type WorkspaceBuild struct { ID uuid.UUID `db:"id" json:"id"` diff --git a/coderd/database/querier.go b/coderd/database/querier.go index b308589ffc350..4f80b8fe3a30a 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -227,6 +227,7 @@ type sqlcQuerier interface { InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error) InsertWorkspaceAgentStats(ctx context.Context, arg InsertWorkspaceAgentStatsParams) error InsertWorkspaceApp(ctx context.Context, arg InsertWorkspaceAppParams) (WorkspaceApp, error) + InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error InsertWorkspaceBuild(ctx context.Context, arg InsertWorkspaceBuildParams) error InsertWorkspaceBuildParameters(ctx context.Context, arg InsertWorkspaceBuildParametersParams) error InsertWorkspaceProxy(ctx context.Context, arg InsertWorkspaceProxyParams) (WorkspaceProxy, error) diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 8748e4d7ca9ed..1cef7cce6dad7 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7766,6 +7766,59 @@ func (q *sqlQuerier) UpdateWorkspaceAppHealthByID(ctx context.Context, arg Updat return err } +const insertWorkspaceAppStats = `-- name: InsertWorkspaceAppStats :exec +INSERT INTO + workspace_app_stats ( + id, + user_id, + workspace_id, + agent_id, + access_method, + slug_or_port, + session_id, + session_started_at, + session_ended_at + ) +VALUES + ($1, $2, $3, $4, $5, $6, $7, $8, $9) +ON CONFLICT + (agent_id, session_id) +DO + UPDATE SET + -- Only session end can be updated. + session_ended_at = $9 + WHERE + workspace_app_stats.agent_id = $4 + AND workspace_app_stats.session_id = $7 +` + +type InsertWorkspaceAppStatsParams struct { + ID uuid.UUID `db:"id" json:"id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + AgentID uuid.UUID `db:"agent_id" json:"agent_id"` + AccessMethod string `db:"access_method" json:"access_method"` + SlugOrPort string `db:"slug_or_port" json:"slug_or_port"` + SessionID uuid.UUID `db:"session_id" json:"session_id"` + SessionStartedAt time.Time `db:"session_started_at" json:"session_started_at"` + SessionEndedAt sql.NullTime `db:"session_ended_at" json:"session_ended_at"` +} + +func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error { + _, err := q.db.ExecContext(ctx, insertWorkspaceAppStats, + arg.ID, + arg.UserID, + arg.WorkspaceID, + arg.AgentID, + arg.AccessMethod, + arg.SlugOrPort, + arg.SessionID, + arg.SessionStartedAt, + arg.SessionEndedAt, + ) + return err +} + const getWorkspaceBuildParameters = `-- name: GetWorkspaceBuildParameters :many SELECT workspace_build_id, name, value diff --git a/coderd/database/queries/workspaceappstats.sql b/coderd/database/queries/workspaceappstats.sql new file mode 100644 index 0000000000000..32244cc2b7fad --- /dev/null +++ b/coderd/database/queries/workspaceappstats.sql @@ -0,0 +1,24 @@ +-- name: InsertWorkspaceAppStats :exec +INSERT INTO + workspace_app_stats ( + id, + user_id, + workspace_id, + agent_id, + access_method, + slug_or_port, + session_id, + session_started_at, + session_ended_at + ) +VALUES + ($1, $2, $3, $4, $5, $6, $7, $8, $9) +ON CONFLICT + (agent_id, session_id) +DO + UPDATE SET + -- Only session end can be updated. + session_ended_at = $9 + WHERE + workspace_app_stats.agent_id = $4 + AND workspace_app_stats.session_id = $7; diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go index c8dbc831e8651..2b7cb1450050f 100644 --- a/coderd/database/unique_constraint.go +++ b/coderd/database/unique_constraint.go @@ -32,6 +32,7 @@ const ( UniqueTemplatesOrganizationIDNameIndex UniqueConstraint = "templates_organization_id_name_idx" // CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false); UniqueUsersEmailLowerIndex UniqueConstraint = "users_email_lower_idx" // CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false); UniqueUsersUsernameLowerIndex UniqueConstraint = "users_username_lower_idx" // CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false); + UniqueWorkspaceAppStatsUserAgentSessionIndex UniqueConstraint = "workspace_app_stats_user_agent_session_idx" // CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats USING btree (agent_id, session_id); UniqueWorkspaceProxiesLowerNameIndex UniqueConstraint = "workspace_proxies_lower_name_idx" // CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false); UniqueWorkspacesOwnerIDLowerIndex UniqueConstraint = "workspaces_owner_id_lower_idx" // CREATE UNIQUE INDEX workspaces_owner_id_lower_idx ON workspaces USING btree (owner_id, lower((name)::text)) WHERE (deleted = false); ) diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index d75970bcfa8d9..10dc639fa3a28 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -19,6 +19,7 @@ import ( "cdr.dev/slog" "github.com/coder/coder/agent/agentssh" + "github.com/coder/coder/coderd/database" "github.com/coder/coder/coderd/httpapi" "github.com/coder/coder/coderd/httpmw" "github.com/coder/coder/coderd/tracing" @@ -109,7 +110,8 @@ type Server struct { DisablePathApps bool SecureAuthCookie bool - AgentProvider AgentProvider + AgentProvider AgentProvider + StatsCollector *StatsCollector websocketWaitMutex sync.Mutex websocketWaitGroup sync.WaitGroup @@ -586,7 +588,13 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT // end span so we don't get long lived trace data tracing.EndHTTPSpan(r, http.StatusOK, trace.SpanFromContext(ctx)) + report := newStatsReportFromSignedToken(appToken) + s.StatsCollector.Collect(report) + proxy.ServeHTTP(rw, r) + + report.SessionEndTime = codersdk.NewNullTime(database.Now(), true) + s.StatsCollector.Collect(report) } // workspaceAgentPTY spawns a PTY and pipes it over a WebSocket. @@ -678,8 +686,15 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { } defer ptNetConn.Close() log.Debug(ctx, "obtained PTY") + + report := newStatsReportFromSignedToken(*appToken) + s.StatsCollector.Collect(report) + agentssh.Bicopy(ctx, wsNetConn, ptNetConn) log.Debug(ctx, "pty Bicopy finished") + + report.SessionEndTime = codersdk.NewNullTime(database.Now(), true) + s.StatsCollector.Collect(report) } // wsNetConn wraps net.Conn created by websocket.NetConn(). Cancel func diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go new file mode 100644 index 0000000000000..59f7936cec01b --- /dev/null +++ b/coderd/workspaceapps/stats.go @@ -0,0 +1,196 @@ +package workspaceapps + +import ( + "context" + "sync" + "time" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "cdr.dev/slog" + + "github.com/coder/coder/coderd/database" + "github.com/coder/coder/codersdk" +) + +const ( + DefaultStatsCollectorReportInterval = 30 * time.Second +) + +// StatsReport is a report of a workspace app session. +type StatsReport struct { + UserID uuid.UUID `json:"user_id"` + WorkspaceID uuid.UUID `json:"workspace_id"` + AgentID uuid.UUID `json:"agent_id"` + AccessMethod AccessMethod `json:"access_method"` + SlugOrPort string `json:"slug"` + SessionID uuid.UUID `json:"session_id"` + SessionStartTime time.Time `json:"session_start_time"` + SessionEndTime codersdk.NullTime `json:"session_end_time"` +} + +func newStatsReportFromSignedToken(token SignedToken) StatsReport { + return StatsReport{ + UserID: token.UserID, + WorkspaceID: token.WorkspaceID, + AgentID: token.AgentID, + AccessMethod: token.AccessMethod, + SlugOrPort: token.AppSlugOrPort, + SessionID: uuid.New(), + SessionStartTime: database.Now(), + } +} + +// StatsReporter reports workspace app StatsReports. +type StatsReporter interface { + Report(context.Context, []StatsReport) error +} + +// StatsDBReporter writes workspace app StatsReports to the database. +type StatsDBReporter struct { + Database database.Store +} + +// NewStatsDBReporter returns a new StatsDBReporter. +func NewStatsDBReporter(db database.Store) *StatsDBReporter { + return &StatsDBReporter{ + Database: db, + } +} + +// Report writes the given StatsReports to the database. +func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error { + err := r.Database.InTx(func(tx database.Store) error { + for _, stat := range stats { + err := tx.InsertWorkspaceAppStats(ctx, database.InsertWorkspaceAppStatsParams{ + ID: uuid.New(), + UserID: stat.UserID, + WorkspaceID: stat.WorkspaceID, + AgentID: stat.AgentID, + AccessMethod: string(stat.AccessMethod), + SlugOrPort: stat.SlugOrPort, + SessionID: stat.SessionID, + SessionStartedAt: stat.SessionStartTime, + SessionEndedAt: stat.SessionEndTime.NullTime, + }) + if err != nil { + return err + } + } + + return nil + }, nil) + if err != nil { + return xerrors.Errorf("insert workspace app stats failed: %w", err) + } + + return nil +} + +// StatsCollector collects workspace app StatsReports and reports them +// in batches, stats compaction is performed for short-lived sessions. +type StatsCollector struct { + logger slog.Logger + reporter StatsReporter + + ctx context.Context + cancel context.CancelFunc + done chan struct{} + reportInterval time.Duration + + mu sync.Mutex // Protects following. + stats []StatsReport +} + +// Collect the given StatsReport for later reporting (non-blocking). +func (s *StatsCollector) Collect(report StatsReport) { + s.mu.Lock() + defer s.mu.Unlock() + s.stats = append(s.stats, report) +} + +func (s *StatsCollector) flush(ctx context.Context) error { + s.mu.Lock() + stats := s.stats + s.stats = nil + s.mu.Unlock() + + if len(stats) == 0 { + return nil + } + + // Compaction of stats, reduce payload by up to 50%. + compacted := make([]StatsReport, 0, len(stats)) + m := make(map[StatsReport]int) + for _, stat := range stats { + if !stat.SessionEndTime.IsZero() { + // Zero the time for map key equality. + cmp := stat + cmp.SessionEndTime = codersdk.NullTime{} + if j, ok := m[cmp]; ok { + compacted[j].SessionEndTime = stat.SessionEndTime + continue + } + } + m[stat] = len(compacted) + compacted = append(compacted, stat) + } + + err := s.reporter.Report(ctx, stats) + return err +} + +func (s *StatsCollector) Close() error { + s.cancel() + <-s.done + return nil +} + +func NewStatsCollector(logger slog.Logger, reporter StatsReporter, reportInterval time.Duration) *StatsCollector { + ctx, cancel := context.WithCancel(context.Background()) + c := &StatsCollector{ + logger: logger, + reporter: reporter, + + ctx: ctx, + cancel: cancel, + done: make(chan struct{}), + reportInterval: reportInterval, + } + + go c.start() + return c +} + +func (s *StatsCollector) start() { + defer func() { + close(s.done) + s.logger.Info(s.ctx, "workspace app stats collector stopped") + }() + s.logger.Info(s.ctx, "workspace app stats collector started") + + ticker := time.NewTicker(s.reportInterval) + defer ticker.Stop() + + done := false + for !done { + select { + case <-s.ctx.Done(): + ticker.Stop() + done = true + case <-ticker.C: + } + + s.logger.Debug(s.ctx, "flushing workspace app stats") + + // Ensure we don't hold up this request for too long. + ctx, cancel := context.WithTimeout(context.Background(), s.reportInterval+5*time.Second) + err := s.flush(ctx) + cancel() + if err != nil { + s.logger.Error(ctx, "failed to flush workspace app stats", slog.Error(err)) + continue + } + } +} diff --git a/docs/api/schemas.md b/docs/api/schemas.md index 2f09bfb6728ae..3263fb81e8874 100644 --- a/docs/api/schemas.md +++ b/docs/api/schemas.md @@ -7472,6 +7472,34 @@ _None_ | `username_or_id` | string | false | | For the following fields, if the AccessMethod is AccessMethodTerminal, then only AgentNameOrID may be set and it must be a UUID. The other fields must be left blank. | | `workspace_name_or_id` | string | false | | | +## workspaceapps.StatsReport + +```json +{ + "access_method": "path", + "agent_id": "string", + "session_end_time": "string", + "session_id": "string", + "session_start_time": "string", + "slug": "string", + "user_id": "string", + "workspace_id": "string" +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| -------------------- | -------------------------------------------------------- | -------- | ------------ | ----------- | +| `access_method` | [workspaceapps.AccessMethod](#workspaceappsaccessmethod) | false | | | +| `agent_id` | string | false | | | +| `session_end_time` | string | false | | | +| `session_id` | string | false | | | +| `session_start_time` | string | false | | | +| `slug` | string | false | | | +| `user_id` | string | false | | | +| `workspace_id` | string | false | | | + ## wsproxysdk.AgentIsLegacyResponse ```json @@ -7576,3 +7604,28 @@ _None_ | `derp_mesh_key` | string | false | | | | `derp_region_id` | integer | false | | | | `sibling_replicas` | array of [codersdk.Replica](#codersdkreplica) | false | | Sibling replicas is a list of all other replicas of the proxy that have not timed out. | + +## wsproxysdk.ReportAppStatsRequest + +```json +{ + "stats": [ + { + "access_method": "path", + "agent_id": "string", + "session_end_time": "string", + "session_id": "string", + "session_start_time": "string", + "slug": "string", + "user_id": "string", + "workspace_id": "string" + } + ] +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ------- | --------------------------------------------------------------- | -------- | ------------ | ----------- | +| `stats` | array of [workspaceapps.StatsReport](#workspaceappsstatsreport) | false | | | diff --git a/enterprise/coderd/coderd.go b/enterprise/coderd/coderd.go index 71d975e3ef1d6..3e884f216e3fc 100644 --- a/enterprise/coderd/coderd.go +++ b/enterprise/coderd/coderd.go @@ -167,6 +167,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { ) r.Get("/coordinate", api.workspaceProxyCoordinate) r.Post("/issue-signed-app-token", api.workspaceProxyIssueSignedAppToken) + r.Post("/app-stats", api.workspaceProxyReportAppStats) r.Post("/register", api.workspaceProxyRegister) r.Post("/deregister", api.workspaceProxyDeregister) }) diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index 591f2fa33374f..59a2752c8db60 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -492,6 +492,35 @@ func (api *API) workspaceProxyIssueSignedAppToken(rw http.ResponseWriter, r *htt }) } +// @Summary Report workspace app stats +// @ID report-workspace-app-stats +// @Security CoderSessionToken +// @Accept json +// @Produce json +// @Tags Enterprise +// @Param request body wsproxysdk.ReportAppStatsRequest true "Report app stats request" +// @Success 204 +// @Router /workspaceproxies/me/app-stats [post] +// @x-apidocgen {"skip": true} +func (api *API) workspaceProxyReportAppStats(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // TODO(mafredri): Is auth needed? + + var stats []workspaceapps.StatsReport + if !httpapi.Read(ctx, rw, r, &stats) { + return + } + + if err := workspaceapps.NewStatsDBReporter(api.Database).Report(ctx, stats); err != nil { + api.Logger.Error(ctx, "report app stats failed", slog.Error(err)) + httpapi.InternalServerError(rw, err) + return + } + + httpapi.Write(ctx, rw, http.StatusNoContent, nil) +} + // workspaceProxyRegister is used to register a new workspace proxy. When a proxy // comes online, it will announce itself to this endpoint. This updates its values // in the database and returns a signed token that can be used to authenticate diff --git a/enterprise/wsproxy/appstatsreporter.go b/enterprise/wsproxy/appstatsreporter.go new file mode 100644 index 0000000000000..ba3cb92df93cc --- /dev/null +++ b/enterprise/wsproxy/appstatsreporter.go @@ -0,0 +1,21 @@ +package wsproxy + +import ( + "context" + + "github.com/coder/coder/coderd/workspaceapps" + "github.com/coder/coder/enterprise/wsproxy/wsproxysdk" +) + +var _ workspaceapps.StatsReporter = (*appStatsReporter)(nil) + +type appStatsReporter struct { + Client *wsproxysdk.Client +} + +func (r *appStatsReporter) Report(ctx context.Context, stats []workspaceapps.StatsReport) error { + err := r.Client.ReportAppStats(ctx, wsproxysdk.ReportAppStatsRequest{ + Stats: stats, + }) + return err +} diff --git a/enterprise/wsproxy/wsproxy.go b/enterprise/wsproxy/wsproxy.go index 843b79ee394bf..0b6df6da821f2 100644 --- a/enterprise/wsproxy/wsproxy.go +++ b/enterprise/wsproxy/wsproxy.go @@ -278,9 +278,15 @@ func New(ctx context.Context, opts *Options) (*Server, error) { }, AppSecurityKey: secKey, - AgentProvider: agentProvider, DisablePathApps: opts.DisablePathApps, SecureAuthCookie: opts.SecureAuthCookie, + + AgentProvider: agentProvider, + StatsCollector: workspaceapps.NewStatsCollector( + opts.Logger.Named("stats_collector"), + &appStatsReporter{Client: client}, + workspaceapps.DefaultStatsCollectorReportInterval, + ), } derpHandler := derphttp.Handler(derpServer) diff --git a/enterprise/wsproxy/wsproxysdk/wsproxysdk.go b/enterprise/wsproxy/wsproxysdk/wsproxysdk.go index 6d1a4b1227b5d..d9b60d311eb0b 100644 --- a/enterprise/wsproxy/wsproxysdk/wsproxysdk.go +++ b/enterprise/wsproxy/wsproxysdk/wsproxysdk.go @@ -152,6 +152,25 @@ func (c *Client) IssueSignedAppTokenHTML(ctx context.Context, rw http.ResponseWr return res, true } +type ReportAppStatsRequest struct { + Stats []workspaceapps.StatsReport `json:"stats"` +} + +// ReportAppStats reports the given app stats to the primary coder server. +func (c *Client) ReportAppStats(ctx context.Context, req ReportAppStatsRequest) error { + resp, err := c.Request(ctx, http.MethodPost, "/api/v2/workspaceproxies/me/app-stats", req) + if err != nil { + return xerrors.Errorf("make request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + return codersdk.ReadBodyAsError(resp) + } + + return nil +} + type RegisterWorkspaceProxyRequest struct { // AccessURL that hits the workspace proxy api. AccessURL string `json:"access_url"` From 0bb672ac5295d54d25c50af468305c585032fed9 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Thu, 10 Aug 2023 10:21:00 +0000 Subject: [PATCH 02/25] amend pr comments --- coderd/coderd.go | 2 +- coderd/database/queries.sql.go | 54 +++++++++++-------- coderd/database/queries/workspaceappstats.sql | 18 +++++-- coderd/database/sqlc.yaml | 7 +++ coderd/workspaceapps/stats.go | 47 ++++++++++------ enterprise/coderd/workspaceproxy.go | 2 +- 6 files changed, 85 insertions(+), 45 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index 29e350cd60985..bed3608528b1a 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -432,7 +432,7 @@ func New(options *Options) *API { AppSecurityKey: options.AppSecurityKey, StatsCollector: workspaceapps.NewStatsCollector( workspaceAppsLogger.Named("stats_collector"), - workspaceapps.NewStatsDBReporter(options.Database), + workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), workspaceapps.DefaultStatsCollectorReportInterval, ), diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 1cef7cce6dad7..4a3a45a26084a 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7779,42 +7779,50 @@ INSERT INTO session_started_at, session_ended_at ) -VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9) +SELECT + unnest($1::uuid[]) AS id, + unnest($2::uuid[]) AS user_id, + unnest($3::uuid[]) AS workspace_id, + unnest($4::uuid[]) AS agent_id, + unnest($5::text[]) AS access_method, + unnest($6::text[]) AS slug_or_port, + unnest($7::uuid[]) AS session_id, + unnest($8::timestamptz[]) AS session_started_at, + unnest($9::nulltimestamptz[]) AS session_ended_at ON CONFLICT (agent_id, session_id) DO UPDATE SET -- Only session end can be updated. - session_ended_at = $9 + session_ended_at = EXCLUDED.session_ended_at WHERE - workspace_app_stats.agent_id = $4 - AND workspace_app_stats.session_id = $7 + workspace_app_stats.agent_id = EXCLUDED.agent_id + AND workspace_app_stats.session_id = EXCLUDED.session_id ` type InsertWorkspaceAppStatsParams struct { - ID uuid.UUID `db:"id" json:"id"` - UserID uuid.UUID `db:"user_id" json:"user_id"` - WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` - AgentID uuid.UUID `db:"agent_id" json:"agent_id"` - AccessMethod string `db:"access_method" json:"access_method"` - SlugOrPort string `db:"slug_or_port" json:"slug_or_port"` - SessionID uuid.UUID `db:"session_id" json:"session_id"` - SessionStartedAt time.Time `db:"session_started_at" json:"session_started_at"` - SessionEndedAt sql.NullTime `db:"session_ended_at" json:"session_ended_at"` + ID []uuid.UUID `db:"id" json:"id"` + UserID []uuid.UUID `db:"user_id" json:"user_id"` + WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"` + AgentID []uuid.UUID `db:"agent_id" json:"agent_id"` + AccessMethod []string `db:"access_method" json:"access_method"` + SlugOrPort []string `db:"slug_or_port" json:"slug_or_port"` + SessionID []uuid.UUID `db:"session_id" json:"session_id"` + SessionStartedAt []time.Time `db:"session_started_at" json:"session_started_at"` + SessionEndedAt []sql.NullTime `db:"session_ended_at" json:"session_ended_at"` } func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error { _, err := q.db.ExecContext(ctx, insertWorkspaceAppStats, - arg.ID, - arg.UserID, - arg.WorkspaceID, - arg.AgentID, - arg.AccessMethod, - arg.SlugOrPort, - arg.SessionID, - arg.SessionStartedAt, - arg.SessionEndedAt, + pq.Array(arg.ID), + pq.Array(arg.UserID), + pq.Array(arg.WorkspaceID), + pq.Array(arg.AgentID), + pq.Array(arg.AccessMethod), + pq.Array(arg.SlugOrPort), + pq.Array(arg.SessionID), + pq.Array(arg.SessionStartedAt), + pq.Array(arg.SessionEndedAt), ) return err } diff --git a/coderd/database/queries/workspaceappstats.sql b/coderd/database/queries/workspaceappstats.sql index 32244cc2b7fad..bf375f4d70f76 100644 --- a/coderd/database/queries/workspaceappstats.sql +++ b/coderd/database/queries/workspaceappstats.sql @@ -11,14 +11,22 @@ INSERT INTO session_started_at, session_ended_at ) -VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9) +SELECT + unnest(@id::uuid[]) AS id, + unnest(@user_id::uuid[]) AS user_id, + unnest(@workspace_id::uuid[]) AS workspace_id, + unnest(@agent_id::uuid[]) AS agent_id, + unnest(@access_method::text[]) AS access_method, + unnest(@slug_or_port::text[]) AS slug_or_port, + unnest(@session_id::uuid[]) AS session_id, + unnest(@session_started_at::timestamptz[]) AS session_started_at, + unnest(@session_ended_at::nulltimestamptz[]) AS session_ended_at ON CONFLICT (agent_id, session_id) DO UPDATE SET -- Only session end can be updated. - session_ended_at = $9 + session_ended_at = EXCLUDED.session_ended_at WHERE - workspace_app_stats.agent_id = $4 - AND workspace_app_stats.session_id = $7; + workspace_app_stats.agent_id = EXCLUDED.agent_id + AND workspace_app_stats.session_id = EXCLUDED.session_id; diff --git a/coderd/database/sqlc.yaml b/coderd/database/sqlc.yaml index 526f62d3a5ca5..f5c6ac6996473 100644 --- a/coderd/database/sqlc.yaml +++ b/coderd/database/sqlc.yaml @@ -27,6 +27,13 @@ overrides: - column: "template_with_users.group_acl" go_type: type: "TemplateACL" + # Type alias to allow batch inserts of timestamptz[] arrays containing + # nullable values. + - db_type: "nulltimestamptz" + engine: "postgresql" + go_type: + import: "database/sql" + type: "NullTime" rename: template: TemplateTable template_with_user: Template diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 59f7936cec01b..a5f9fc06c472b 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -16,6 +16,7 @@ import ( const ( DefaultStatsCollectorReportInterval = 30 * time.Second + DefaultStatsDBReporterBatchSize = 1024 ) // StatsReport is a report of a workspace app session. @@ -47,33 +48,49 @@ type StatsReporter interface { Report(context.Context, []StatsReport) error } +var _ StatsReporter = (*StatsDBReporter)(nil) + // StatsDBReporter writes workspace app StatsReports to the database. type StatsDBReporter struct { - Database database.Store + db database.Store + batchSize int } // NewStatsDBReporter returns a new StatsDBReporter. -func NewStatsDBReporter(db database.Store) *StatsDBReporter { +func NewStatsDBReporter(db database.Store, batchSize int) *StatsDBReporter { return &StatsDBReporter{ - Database: db, + db: db, + batchSize: batchSize, } } // Report writes the given StatsReports to the database. func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error { - err := r.Database.InTx(func(tx database.Store) error { + err := r.db.InTx(func(tx database.Store) error { + var batch database.InsertWorkspaceAppStatsParams for _, stat := range stats { - err := tx.InsertWorkspaceAppStats(ctx, database.InsertWorkspaceAppStatsParams{ - ID: uuid.New(), - UserID: stat.UserID, - WorkspaceID: stat.WorkspaceID, - AgentID: stat.AgentID, - AccessMethod: string(stat.AccessMethod), - SlugOrPort: stat.SlugOrPort, - SessionID: stat.SessionID, - SessionStartedAt: stat.SessionStartTime, - SessionEndedAt: stat.SessionEndTime.NullTime, - }) + batch.ID = append(batch.ID, uuid.New()) + batch.UserID = append(batch.UserID, stat.UserID) + batch.WorkspaceID = append(batch.WorkspaceID, stat.WorkspaceID) + batch.AgentID = append(batch.AgentID, stat.AgentID) + batch.AccessMethod = append(batch.AccessMethod, string(stat.AccessMethod)) + batch.SlugOrPort = append(batch.SlugOrPort, stat.SlugOrPort) + batch.SessionID = append(batch.SessionID, stat.SessionID) + batch.SessionStartedAt = append(batch.SessionStartedAt, stat.StartTime) + batch.SessionEndedAt = append(batch.SessionEndedAt, stat.EndTime.NullTime) + + if len(batch.ID) >= r.batchSize { + err := tx.InsertWorkspaceAppStats(ctx, batch) + if err != nil { + return err + } + + // Reset batch. + batch = database.InsertWorkspaceAppStatsParams{} + } + } + if len(batch.ID) > 0 { + err := tx.InsertWorkspaceAppStats(ctx, batch) if err != nil { return err } diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index 59a2752c8db60..a6cac4e8bb723 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -512,7 +512,7 @@ func (api *API) workspaceProxyReportAppStats(rw http.ResponseWriter, r *http.Req return } - if err := workspaceapps.NewStatsDBReporter(api.Database).Report(ctx, stats); err != nil { + if err := workspaceapps.NewStatsDBReporter(api.Database, workspaceapps.DefaultStatsDBReporterBatchSize).Report(ctx, stats); err != nil { api.Logger.Error(ctx, "report app stats failed", slog.Error(err)) httpapi.InternalServerError(rw, err) return From 1171a9db5ff79044707d606a5f8fae5c9bb5bd1a Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Thu, 10 Aug 2023 21:56:12 +0000 Subject: [PATCH 03/25] refactor stats with rollups, add tests --- coderd/apidoc/docs.go | 10 +- coderd/apidoc/swagger.json | 10 +- coderd/coderd.go | 9 +- coderd/database/dbfake/dbfake.go | 35 +- coderd/database/dump.sql | 25 +- .../000148_workspace_app_stats.up.sql | 17 +- coderd/database/models.go | 8 +- coderd/database/queries.sql.go | 51 +-- coderd/database/queries/workspaceappstats.sql | 17 +- coderd/database/unique_constraint.go | 2 +- coderd/workspaceapps/proxy.go | 4 +- coderd/workspaceapps/stats.go | 297 +++++++++++---- coderd/workspaceapps/stats_test.go | 353 ++++++++++++++++++ docs/api/schemas.md | 35 +- enterprise/wsproxy/wsproxy.go | 12 +- 15 files changed, 720 insertions(+), 165 deletions(-) create mode 100644 coderd/workspaceapps/stats_test.go diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 0b3e86fb9a666..a67a6b366a11c 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -11727,16 +11727,20 @@ const docTemplate = `{ "agent_id": { "type": "string" }, - "session_end_time": { + "requests": { + "type": "integer" + }, + "session_ended_at": { + "description": "Updated periodically while app is in use active and when the last connection is closed.", "type": "string" }, "session_id": { "type": "string" }, - "session_start_time": { + "session_started_at": { "type": "string" }, - "slug": { + "slug_or_port": { "type": "string" }, "user_id": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index 92d911ac96d9d..5e514048990a3 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -10701,16 +10701,20 @@ "agent_id": { "type": "string" }, - "session_end_time": { + "requests": { + "type": "integer" + }, + "session_ended_at": { + "description": "Updated periodically while app is in use active and when the last connection is closed.", "type": "string" }, "session_id": { "type": "string" }, - "session_start_time": { + "session_started_at": { "type": "string" }, - "slug": { + "slug_or_port": { "type": "string" }, "user_id": { diff --git a/coderd/coderd.go b/coderd/coderd.go index bed3608528b1a..ccf65368eea47 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -430,11 +430,10 @@ func New(options *Options) *API { SignedTokenProvider: api.WorkspaceAppsProvider, AgentProvider: api.agentProvider, AppSecurityKey: options.AppSecurityKey, - StatsCollector: workspaceapps.NewStatsCollector( - workspaceAppsLogger.Named("stats_collector"), - workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), - workspaceapps.DefaultStatsCollectorReportInterval, - ), + StatsCollector: workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Logger: workspaceAppsLogger.Named("stats_collector"), + Reporter: workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), + }), DisablePathApps: options.DeploymentValues.DisablePathApps.Value(), SecureAuthCookie: options.DeploymentValues.SecureAuthCookie.Value(), diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index aac77175cf8be..acfda3583cb66 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -135,6 +135,7 @@ type data struct { workspaceAgentMetadata []database.WorkspaceAgentMetadatum workspaceAgentLogs []database.WorkspaceAgentLog workspaceApps []database.WorkspaceApp + workspaceAppStats []database.WorkspaceAppStat workspaceBuilds []database.WorkspaceBuildTable workspaceBuildParameters []database.WorkspaceBuildParameter workspaceResourceMetadata []database.WorkspaceResourceMetadatum @@ -4266,7 +4267,39 @@ func (q *FakeQuerier) InsertWorkspaceAppStats(ctx context.Context, arg database. return err } - panic("not implemented") + q.mutex.Lock() + defer q.mutex.Unlock() + + nextID := int64(1) + if len(q.workspaceAppStats) > 0 { + nextID = q.workspaceAppStats[len(q.workspaceAppStats)-1].ID + 1 + } +InsertWorkspaceAppStatsLoop: + for i := 0; i < len(arg.UserID); i++ { + stat := database.WorkspaceAppStat{ + ID: nextID, + UserID: arg.UserID[i], + WorkspaceID: arg.WorkspaceID[i], + AgentID: arg.AgentID[i], + AccessMethod: arg.AccessMethod[i], + SlugOrPort: arg.SlugOrPort[i], + SessionID: arg.SessionID[i], + SessionStartedAt: arg.SessionStartedAt[i], + SessionEndedAt: arg.SessionEndedAt[i], + Requests: arg.Requests[i], + } + for j, s := range q.workspaceAppStats { + // Check unique constraint for upsert. + if s.UserID == stat.UserID && s.AgentID == stat.AgentID && s.SessionID == stat.SessionID { + q.workspaceAppStats[j].SessionEndedAt = stat.SessionEndedAt + q.workspaceAppStats[j].Requests = stat.Requests + continue InsertWorkspaceAppStatsLoop + } + } + q.workspaceAppStats = append(q.workspaceAppStats, stat) + } + + return nil } func (q *FakeQuerier) InsertWorkspaceBuild(_ context.Context, arg database.InsertWorkspaceBuildParams) error { diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index 22f325920c37f..fa171c0d18cd5 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -803,7 +803,7 @@ COMMENT ON COLUMN workspace_agents.started_at IS 'The time the agent entered the COMMENT ON COLUMN workspace_agents.ready_at IS 'The time the agent entered the ready or start_error lifecycle state'; CREATE TABLE workspace_app_stats ( - id uuid NOT NULL, + id bigint NOT NULL, user_id uuid NOT NULL, workspace_id uuid NOT NULL, agent_id uuid NOT NULL, @@ -811,12 +811,13 @@ CREATE TABLE workspace_app_stats ( slug_or_port text NOT NULL, session_id uuid NOT NULL, session_started_at timestamp with time zone NOT NULL, - session_ended_at timestamp with time zone + session_ended_at timestamp with time zone NOT NULL, + requests integer NOT NULL ); COMMENT ON TABLE workspace_app_stats IS 'A record of workspace app usage statistics'; -COMMENT ON COLUMN workspace_app_stats.id IS 'The unique identifier for the workspace app stat record'; +COMMENT ON COLUMN workspace_app_stats.id IS 'The ID of the record'; COMMENT ON COLUMN workspace_app_stats.user_id IS 'The user who used the workspace app'; @@ -834,6 +835,17 @@ COMMENT ON COLUMN workspace_app_stats.session_started_at IS 'The time the sessio COMMENT ON COLUMN workspace_app_stats.session_ended_at IS 'The time the session ended'; +COMMENT ON COLUMN workspace_app_stats.requests IS 'The number of requests made during the session, a number larger than 1 indicates that multiple sessions were rolled up into one'; + +CREATE SEQUENCE workspace_app_stats_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +ALTER SEQUENCE workspace_app_stats_id_seq OWNED BY workspace_app_stats.id; + CREATE TABLE workspace_apps ( id uuid NOT NULL, created_at timestamp with time zone NOT NULL, @@ -991,6 +1003,8 @@ ALTER TABLE ONLY provisioner_job_logs ALTER COLUMN id SET DEFAULT nextval('provi ALTER TABLE ONLY workspace_agent_logs ALTER COLUMN id SET DEFAULT nextval('workspace_agent_startup_logs_id_seq'::regclass); +ALTER TABLE ONLY workspace_app_stats ALTER COLUMN id SET DEFAULT nextval('workspace_app_stats_id_seq'::regclass); + ALTER TABLE ONLY workspace_proxies ALTER COLUMN region_id SET DEFAULT nextval('workspace_proxies_region_id_seq'::regclass); ALTER TABLE ONLY workspace_resource_metadata ALTER COLUMN id SET DEFAULT nextval('workspace_resource_metadata_id_seq'::regclass); @@ -1106,6 +1120,9 @@ ALTER TABLE ONLY workspace_agents ALTER TABLE ONLY workspace_app_stats ADD CONSTRAINT workspace_app_stats_pkey PRIMARY KEY (id); +ALTER TABLE ONLY workspace_app_stats + ADD CONSTRAINT workspace_app_stats_user_id_agent_id_session_id_key UNIQUE (user_id, agent_id, session_id); + ALTER TABLE ONLY workspace_apps ADD CONSTRAINT workspace_apps_agent_id_slug_idx UNIQUE (agent_id, slug); @@ -1192,8 +1209,6 @@ CREATE INDEX workspace_agents_auth_token_idx ON workspace_agents USING btree (au CREATE INDEX workspace_agents_resource_id_idx ON workspace_agents USING btree (resource_id); -CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats USING btree (agent_id, session_id); - CREATE INDEX workspace_app_stats_workspace_id_idx ON workspace_app_stats USING btree (workspace_id); CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false); diff --git a/coderd/database/migrations/000148_workspace_app_stats.up.sql b/coderd/database/migrations/000148_workspace_app_stats.up.sql index dbd5e920bb560..70d7bc6fb70a0 100644 --- a/coderd/database/migrations/000148_workspace_app_stats.up.sql +++ b/coderd/database/migrations/000148_workspace_app_stats.up.sql @@ -1,18 +1,23 @@ CREATE TABLE workspace_app_stats ( - id uuid PRIMARY KEY, + id BIGSERIAL PRIMARY KEY, user_id uuid NOT NULL REFERENCES users (id), workspace_id uuid NOT NULL REFERENCES workspaces (id), agent_id uuid NOT NULL REFERENCES workspace_agents (id), access_method text NOT NULL, slug_or_port text NOT NULL, session_id uuid NOT NULL, - session_started_at timestamp with time zone NOT NULL, - session_ended_at timestamp with time zone + session_started_at timestamptz NOT NULL, + session_ended_at timestamptz NOT NULL, + requests integer NOT NULL, + + -- Set a unique constraint to allow upserting the session_ended_at + -- and requests fields without risk of collisions. + UNIQUE(user_id, agent_id, session_id) ); COMMENT ON TABLE workspace_app_stats IS 'A record of workspace app usage statistics'; -COMMENT ON COLUMN workspace_app_stats.id IS 'The unique identifier for the workspace app stat record'; +COMMENT ON COLUMN workspace_app_stats.id IS 'The ID of the record'; COMMENT ON COLUMN workspace_app_stats.user_id IS 'The user who used the workspace app'; COMMENT ON COLUMN workspace_app_stats.workspace_id IS 'The workspace that the workspace app was used in'; COMMENT ON COLUMN workspace_app_stats.agent_id IS 'The workspace agent that was used'; @@ -21,9 +26,7 @@ COMMENT ON COLUMN workspace_app_stats.slug_or_port IS 'The slug or port used to COMMENT ON COLUMN workspace_app_stats.session_id IS 'The unique identifier for the session'; COMMENT ON COLUMN workspace_app_stats.session_started_at IS 'The time the session started'; COMMENT ON COLUMN workspace_app_stats.session_ended_at IS 'The time the session ended'; - --- Create a unique index to prevent duplicate records (scoped to agent to ensure no collisions). -CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats (agent_id, session_id); +COMMENT ON COLUMN workspace_app_stats.requests IS 'The number of requests made during the session, a number larger than 1 indicates that multiple sessions were rolled up into one'; -- Create index on workspace_id for joining/filtering by templates. CREATE INDEX workspace_app_stats_workspace_id_idx ON workspace_app_stats (workspace_id); diff --git a/coderd/database/models.go b/coderd/database/models.go index b2d052df2215c..350a6f1dece79 100644 --- a/coderd/database/models.go +++ b/coderd/database/models.go @@ -1955,8 +1955,8 @@ type WorkspaceApp struct { // A record of workspace app usage statistics type WorkspaceAppStat struct { - // The unique identifier for the workspace app stat record - ID uuid.UUID `db:"id" json:"id"` + // The ID of the record + ID int64 `db:"id" json:"id"` // The user who used the workspace app UserID uuid.UUID `db:"user_id" json:"user_id"` // The workspace that the workspace app was used in @@ -1972,7 +1972,9 @@ type WorkspaceAppStat struct { // The time the session started SessionStartedAt time.Time `db:"session_started_at" json:"session_started_at"` // The time the session ended - SessionEndedAt sql.NullTime `db:"session_ended_at" json:"session_ended_at"` + SessionEndedAt time.Time `db:"session_ended_at" json:"session_ended_at"` + // The number of requests made during the session, a number larger than 1 indicates that multiple sessions were rolled up into one + Requests int32 `db:"requests" json:"requests"` } // Joins in the username + avatar url of the initiated by user. diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 4a3a45a26084a..936e3f4f06bd6 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7769,7 +7769,6 @@ func (q *sqlQuerier) UpdateWorkspaceAppHealthByID(ctx context.Context, arg Updat const insertWorkspaceAppStats = `-- name: InsertWorkspaceAppStats :exec INSERT INTO workspace_app_stats ( - id, user_id, workspace_id, agent_id, @@ -7777,44 +7776,45 @@ INSERT INTO slug_or_port, session_id, session_started_at, - session_ended_at + session_ended_at, + requests ) SELECT - unnest($1::uuid[]) AS id, - unnest($2::uuid[]) AS user_id, - unnest($3::uuid[]) AS workspace_id, - unnest($4::uuid[]) AS agent_id, - unnest($5::text[]) AS access_method, - unnest($6::text[]) AS slug_or_port, - unnest($7::uuid[]) AS session_id, - unnest($8::timestamptz[]) AS session_started_at, - unnest($9::nulltimestamptz[]) AS session_ended_at + unnest($1::uuid[]) AS user_id, + unnest($2::uuid[]) AS workspace_id, + unnest($3::uuid[]) AS agent_id, + unnest($4::text[]) AS access_method, + unnest($5::text[]) AS slug_or_port, + unnest($6::uuid[]) AS session_id, + unnest($7::timestamptz[]) AS session_started_at, + unnest($8::timestamptz[]) AS session_ended_at, + unnest($9::int[]) AS requests ON CONFLICT - (agent_id, session_id) + (user_id, agent_id, session_id) DO UPDATE SET - -- Only session end can be updated. - session_ended_at = EXCLUDED.session_ended_at + session_ended_at = EXCLUDED.session_ended_at, + requests = EXCLUDED.requests WHERE - workspace_app_stats.agent_id = EXCLUDED.agent_id + workspace_app_stats.user_id = EXCLUDED.user_id + AND workspace_app_stats.agent_id = EXCLUDED.agent_id AND workspace_app_stats.session_id = EXCLUDED.session_id ` type InsertWorkspaceAppStatsParams struct { - ID []uuid.UUID `db:"id" json:"id"` - UserID []uuid.UUID `db:"user_id" json:"user_id"` - WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"` - AgentID []uuid.UUID `db:"agent_id" json:"agent_id"` - AccessMethod []string `db:"access_method" json:"access_method"` - SlugOrPort []string `db:"slug_or_port" json:"slug_or_port"` - SessionID []uuid.UUID `db:"session_id" json:"session_id"` - SessionStartedAt []time.Time `db:"session_started_at" json:"session_started_at"` - SessionEndedAt []sql.NullTime `db:"session_ended_at" json:"session_ended_at"` + UserID []uuid.UUID `db:"user_id" json:"user_id"` + WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"` + AgentID []uuid.UUID `db:"agent_id" json:"agent_id"` + AccessMethod []string `db:"access_method" json:"access_method"` + SlugOrPort []string `db:"slug_or_port" json:"slug_or_port"` + SessionID []uuid.UUID `db:"session_id" json:"session_id"` + SessionStartedAt []time.Time `db:"session_started_at" json:"session_started_at"` + SessionEndedAt []time.Time `db:"session_ended_at" json:"session_ended_at"` + Requests []int32 `db:"requests" json:"requests"` } func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error { _, err := q.db.ExecContext(ctx, insertWorkspaceAppStats, - pq.Array(arg.ID), pq.Array(arg.UserID), pq.Array(arg.WorkspaceID), pq.Array(arg.AgentID), @@ -7823,6 +7823,7 @@ func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWork pq.Array(arg.SessionID), pq.Array(arg.SessionStartedAt), pq.Array(arg.SessionEndedAt), + pq.Array(arg.Requests), ) return err } diff --git a/coderd/database/queries/workspaceappstats.sql b/coderd/database/queries/workspaceappstats.sql index bf375f4d70f76..6436f6a699f38 100644 --- a/coderd/database/queries/workspaceappstats.sql +++ b/coderd/database/queries/workspaceappstats.sql @@ -1,7 +1,6 @@ -- name: InsertWorkspaceAppStats :exec INSERT INTO workspace_app_stats ( - id, user_id, workspace_id, agent_id, @@ -9,10 +8,10 @@ INSERT INTO slug_or_port, session_id, session_started_at, - session_ended_at + session_ended_at, + requests ) SELECT - unnest(@id::uuid[]) AS id, unnest(@user_id::uuid[]) AS user_id, unnest(@workspace_id::uuid[]) AS workspace_id, unnest(@agent_id::uuid[]) AS agent_id, @@ -20,13 +19,15 @@ SELECT unnest(@slug_or_port::text[]) AS slug_or_port, unnest(@session_id::uuid[]) AS session_id, unnest(@session_started_at::timestamptz[]) AS session_started_at, - unnest(@session_ended_at::nulltimestamptz[]) AS session_ended_at + unnest(@session_ended_at::timestamptz[]) AS session_ended_at, + unnest(@requests::int[]) AS requests ON CONFLICT - (agent_id, session_id) + (user_id, agent_id, session_id) DO UPDATE SET - -- Only session end can be updated. - session_ended_at = EXCLUDED.session_ended_at + session_ended_at = EXCLUDED.session_ended_at, + requests = EXCLUDED.requests WHERE - workspace_app_stats.agent_id = EXCLUDED.agent_id + workspace_app_stats.user_id = EXCLUDED.user_id + AND workspace_app_stats.agent_id = EXCLUDED.agent_id AND workspace_app_stats.session_id = EXCLUDED.session_id; diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go index 2b7cb1450050f..294b4b12d51af 100644 --- a/coderd/database/unique_constraint.go +++ b/coderd/database/unique_constraint.go @@ -18,6 +18,7 @@ const ( UniqueTemplateVersionParametersTemplateVersionIDNameKey UniqueConstraint = "template_version_parameters_template_version_id_name_key" // ALTER TABLE ONLY template_version_parameters ADD CONSTRAINT template_version_parameters_template_version_id_name_key UNIQUE (template_version_id, name); UniqueTemplateVersionVariablesTemplateVersionIDNameKey UniqueConstraint = "template_version_variables_template_version_id_name_key" // ALTER TABLE ONLY template_version_variables ADD CONSTRAINT template_version_variables_template_version_id_name_key UNIQUE (template_version_id, name); UniqueTemplateVersionsTemplateIDNameKey UniqueConstraint = "template_versions_template_id_name_key" // ALTER TABLE ONLY template_versions ADD CONSTRAINT template_versions_template_id_name_key UNIQUE (template_id, name); + UniqueWorkspaceAppStatsUserIDAgentIDSessionIDKey UniqueConstraint = "workspace_app_stats_user_id_agent_id_session_id_key" // ALTER TABLE ONLY workspace_app_stats ADD CONSTRAINT workspace_app_stats_user_id_agent_id_session_id_key UNIQUE (user_id, agent_id, session_id); UniqueWorkspaceAppsAgentIDSlugIndex UniqueConstraint = "workspace_apps_agent_id_slug_idx" // ALTER TABLE ONLY workspace_apps ADD CONSTRAINT workspace_apps_agent_id_slug_idx UNIQUE (agent_id, slug); UniqueWorkspaceBuildParametersWorkspaceBuildIDNameKey UniqueConstraint = "workspace_build_parameters_workspace_build_id_name_key" // ALTER TABLE ONLY workspace_build_parameters ADD CONSTRAINT workspace_build_parameters_workspace_build_id_name_key UNIQUE (workspace_build_id, name); UniqueWorkspaceBuildsJobIDKey UniqueConstraint = "workspace_builds_job_id_key" // ALTER TABLE ONLY workspace_builds ADD CONSTRAINT workspace_builds_job_id_key UNIQUE (job_id); @@ -32,7 +33,6 @@ const ( UniqueTemplatesOrganizationIDNameIndex UniqueConstraint = "templates_organization_id_name_idx" // CREATE UNIQUE INDEX templates_organization_id_name_idx ON templates USING btree (organization_id, lower((name)::text)) WHERE (deleted = false); UniqueUsersEmailLowerIndex UniqueConstraint = "users_email_lower_idx" // CREATE UNIQUE INDEX users_email_lower_idx ON users USING btree (lower(email)) WHERE (deleted = false); UniqueUsersUsernameLowerIndex UniqueConstraint = "users_username_lower_idx" // CREATE UNIQUE INDEX users_username_lower_idx ON users USING btree (lower(username)) WHERE (deleted = false); - UniqueWorkspaceAppStatsUserAgentSessionIndex UniqueConstraint = "workspace_app_stats_user_agent_session_idx" // CREATE UNIQUE INDEX workspace_app_stats_user_agent_session_idx ON workspace_app_stats USING btree (agent_id, session_id); UniqueWorkspaceProxiesLowerNameIndex UniqueConstraint = "workspace_proxies_lower_name_idx" // CREATE UNIQUE INDEX workspace_proxies_lower_name_idx ON workspace_proxies USING btree (lower(name)) WHERE (deleted = false); UniqueWorkspacesOwnerIDLowerIndex UniqueConstraint = "workspaces_owner_id_lower_idx" // CREATE UNIQUE INDEX workspaces_owner_id_lower_idx ON workspaces USING btree (owner_id, lower((name)::text)) WHERE (deleted = false); ) diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index 10dc639fa3a28..4f25f7edc5c6b 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -593,7 +593,7 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT proxy.ServeHTTP(rw, r) - report.SessionEndTime = codersdk.NewNullTime(database.Now(), true) + report.SessionEndedAt = database.Now() s.StatsCollector.Collect(report) } @@ -693,7 +693,7 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { agentssh.Bicopy(ctx, wsNetConn, ptNetConn) log.Debug(ctx, "pty Bicopy finished") - report.SessionEndTime = codersdk.NewNullTime(database.Now(), true) + report.SessionEndedAt = database.Now() s.StatsCollector.Collect(report) } diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index a5f9fc06c472b..30fe324f9bece 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -11,24 +11,27 @@ import ( "cdr.dev/slog" "github.com/coder/coder/coderd/database" - "github.com/coder/coder/codersdk" ) const ( DefaultStatsCollectorReportInterval = 30 * time.Second + DefaultStatsCollectorRollupWindow = 1 * time.Minute DefaultStatsDBReporterBatchSize = 1024 ) // StatsReport is a report of a workspace app session. type StatsReport struct { - UserID uuid.UUID `json:"user_id"` - WorkspaceID uuid.UUID `json:"workspace_id"` - AgentID uuid.UUID `json:"agent_id"` - AccessMethod AccessMethod `json:"access_method"` - SlugOrPort string `json:"slug"` - SessionID uuid.UUID `json:"session_id"` - SessionStartTime time.Time `json:"session_start_time"` - SessionEndTime codersdk.NullTime `json:"session_end_time"` + UserID uuid.UUID `json:"user_id"` + WorkspaceID uuid.UUID `json:"workspace_id"` + AgentID uuid.UUID `json:"agent_id"` + AccessMethod AccessMethod `json:"access_method"` + SlugOrPort string `json:"slug_or_port"` + SessionID uuid.UUID `json:"session_id"` + SessionStartedAt time.Time `json:"session_started_at"` + SessionEndedAt time.Time `json:"session_ended_at"` // Updated periodically while app is in use active and when the last connection is closed. + Requests int `json:"requests"` + + rolledUp bool // Indicates if this report has been rolled up. } func newStatsReportFromSignedToken(token SignedToken) StatsReport { @@ -39,7 +42,8 @@ func newStatsReportFromSignedToken(token SignedToken) StatsReport { AccessMethod: token.AccessMethod, SlugOrPort: token.AppSlugOrPort, SessionID: uuid.New(), - SessionStartTime: database.Now(), + SessionStartedAt: database.Now(), + Requests: 1, } } @@ -69,17 +73,17 @@ func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error err := r.db.InTx(func(tx database.Store) error { var batch database.InsertWorkspaceAppStatsParams for _, stat := range stats { - batch.ID = append(batch.ID, uuid.New()) batch.UserID = append(batch.UserID, stat.UserID) batch.WorkspaceID = append(batch.WorkspaceID, stat.WorkspaceID) batch.AgentID = append(batch.AgentID, stat.AgentID) batch.AccessMethod = append(batch.AccessMethod, string(stat.AccessMethod)) batch.SlugOrPort = append(batch.SlugOrPort, stat.SlugOrPort) batch.SessionID = append(batch.SessionID, stat.SessionID) - batch.SessionStartedAt = append(batch.SessionStartedAt, stat.StartTime) - batch.SessionEndedAt = append(batch.SessionEndedAt, stat.EndTime.NullTime) + batch.SessionStartedAt = append(batch.SessionStartedAt, stat.SessionStartedAt) + batch.SessionEndedAt = append(batch.SessionEndedAt, stat.SessionEndedAt) + batch.Requests = append(batch.Requests, int32(stat.Requests)) - if len(batch.ID) >= r.batchSize { + if len(batch.UserID) >= r.batchSize { err := tx.InsertWorkspaceAppStats(ctx, batch) if err != nil { return err @@ -89,7 +93,7 @@ func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error batch = database.InsertWorkspaceAppStatsParams{} } } - if len(batch.ID) > 0 { + if len(batch.UserID) > 0 { err := tx.InsertWorkspaceAppStats(ctx, batch) if err != nil { return err @@ -105,109 +109,242 @@ func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error return nil } +// This should match the database unique constraint. +type statsGroupKey struct { + StartTimeTrunc time.Time + UserID uuid.UUID + WorkspaceID uuid.UUID + AgentID uuid.UUID + AccessMethod AccessMethod + SlugOrPort string +} + +func (s StatsReport) groupKey(windowSize time.Duration) statsGroupKey { + return statsGroupKey{ + StartTimeTrunc: s.SessionStartedAt.Truncate(windowSize), + UserID: s.UserID, + WorkspaceID: s.WorkspaceID, + AgentID: s.AgentID, + AccessMethod: s.AccessMethod, + SlugOrPort: s.SlugOrPort, + } +} + // StatsCollector collects workspace app StatsReports and reports them // in batches, stats compaction is performed for short-lived sessions. type StatsCollector struct { - logger slog.Logger - reporter StatsReporter + opts StatsCollectorOptions - ctx context.Context - cancel context.CancelFunc - done chan struct{} - reportInterval time.Duration + ctx context.Context + cancel context.CancelFunc + done chan struct{} - mu sync.Mutex // Protects following. - stats []StatsReport + mu sync.Mutex // Protects following. + statsBySessionID map[uuid.UUID]*StatsReport // Track unique sessions. + groupedStats map[statsGroupKey][]*StatsReport // Rolled up stats for sessions in close proximity. } -// Collect the given StatsReport for later reporting (non-blocking). -func (s *StatsCollector) Collect(report StatsReport) { - s.mu.Lock() - defer s.mu.Unlock() - s.stats = append(s.stats, report) +type StatsCollectorOptions struct { + Logger slog.Logger + Reporter StatsReporter + // ReportInterval is the interval at which stats are reported, both partial + // and fully formed stats. + ReportInterval time.Duration + // RollupWindow is the window size for rolling up stats, session shorter + // than this will be rolled up and longer than this will be tracked + // individually. + RollupWindow time.Duration + + // Options for tests. + Flush <-chan chan<- struct{} + Now func() time.Time } -func (s *StatsCollector) flush(ctx context.Context) error { - s.mu.Lock() - stats := s.stats - s.stats = nil - s.mu.Unlock() +func NewStatsCollector(opts StatsCollectorOptions) *StatsCollector { + if opts.ReportInterval == 0 { + opts.ReportInterval = DefaultStatsCollectorReportInterval + } + if opts.RollupWindow == 0 { + opts.RollupWindow = DefaultStatsCollectorRollupWindow + } + if opts.Now == nil { + opts.Now = time.Now + } + + ctx, cancel := context.WithCancel(context.Background()) + sc := &StatsCollector{ + ctx: ctx, + cancel: cancel, + done: make(chan struct{}), + opts: opts, + + statsBySessionID: make(map[uuid.UUID]*StatsReport), + groupedStats: make(map[statsGroupKey][]*StatsReport), + } - if len(stats) == 0 { - return nil + go sc.start() + return sc +} + +// Collect the given StatsReport for later reporting (non-blocking). +func (sc *StatsCollector) Collect(report StatsReport) { + sc.mu.Lock() + defer sc.mu.Unlock() + + r := &report + if _, ok := sc.statsBySessionID[report.SessionID]; !ok { + groupKey := r.groupKey(sc.opts.RollupWindow) + sc.groupedStats[groupKey] = append(sc.groupedStats[groupKey], r) } - // Compaction of stats, reduce payload by up to 50%. - compacted := make([]StatsReport, 0, len(stats)) - m := make(map[StatsReport]int) - for _, stat := range stats { - if !stat.SessionEndTime.IsZero() { - // Zero the time for map key equality. - cmp := stat - cmp.SessionEndTime = codersdk.NullTime{} - if j, ok := m[cmp]; ok { - compacted[j].SessionEndTime = stat.SessionEndTime + if r.SessionEndedAt.IsZero() { + sc.statsBySessionID[report.SessionID] = r + } else { + if stat, ok := sc.statsBySessionID[report.SessionID]; ok { + // Update in-place. + *stat = *r + } + delete(sc.statsBySessionID, report.SessionID) + } +} + +// rollup performs stats rollup for sessions that fall within the +// configured rollup window. For sessions longer than the window, +// we report them individually. +func (sc *StatsCollector) rollup() []StatsReport { + sc.mu.Lock() + defer sc.mu.Unlock() + + var report []StatsReport + + for g, group := range sc.groupedStats { + if len(group) == 0 { + // Safety check, this should not happen. + sc.opts.Logger.Error(sc.ctx, "empty stats group", "group", g) + delete(sc.groupedStats, g) + continue + } + + var rolledUp *StatsReport + if group[0].rolledUp { + rolledUp = group[0] + group = group[1:] + } else { + rolledUp = &StatsReport{ + UserID: g.UserID, + WorkspaceID: g.WorkspaceID, + AgentID: g.AgentID, + AccessMethod: g.AccessMethod, + SlugOrPort: g.SlugOrPort, + SessionStartedAt: g.StartTimeTrunc, + SessionEndedAt: g.StartTimeTrunc.Add(sc.opts.RollupWindow), + Requests: 0, + rolledUp: true, + } + } + + newGroup := []*StatsReport{rolledUp} // Must be first in slice for future iterations (see group[0] above). + for _, stat := range group { + if !stat.SessionEndedAt.IsZero() && stat.SessionEndedAt.Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { + // This is a short-lived session, roll it up. + if rolledUp.SessionID == uuid.Nil { + rolledUp.SessionID = stat.SessionID // Borrow the first session ID, useful in tests. + } + rolledUp.Requests += stat.Requests + continue + } + if stat.SessionEndedAt.IsZero() && sc.opts.Now().Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { + // This is an incomplete session, wait and see if it'll be rolled up or not. + newGroup = append(newGroup, stat) continue } + + // This is a long-lived session, report it individually. + // Make a copy of stat for reporting. + r := *stat + if r.SessionEndedAt.IsZero() { + // Report an end time for incomplete sessions, it will + // be updated later. This ensures that data in the DB + // will have an end time even if the service is stopped. + r.SessionEndedAt = sc.opts.Now().UTC() // Use UTC like database.Now(). + } + report = append(report, r) // Report it (ended or incomplete). + if stat.SessionEndedAt.IsZero() { + newGroup = append(newGroup, stat) // Keep it for future updates. + } + } + if rolledUp.Requests > 0 { + report = append(report, *rolledUp) + } + + // Future rollups should only consider the compacted group. + sc.groupedStats[g] = newGroup + + // Keep the group around until the next rollup window has passed + // in case data was collected late. + if len(newGroup) == 1 && rolledUp.SessionEndedAt.Add(sc.opts.RollupWindow).Before(sc.opts.Now()) { + delete(sc.groupedStats, g) } - m[stat] = len(compacted) - compacted = append(compacted, stat) } - err := s.reporter.Report(ctx, stats) - return err + return report } -func (s *StatsCollector) Close() error { - s.cancel() - <-s.done - return nil -} +func (sc *StatsCollector) flush(ctx context.Context) (err error) { + sc.opts.Logger.Debug(sc.ctx, "flushing workspace app stats") + defer func() { + if err != nil { + sc.opts.Logger.Error(sc.ctx, "failed to flush workspace app stats", "error", err) + } else { + sc.opts.Logger.Debug(sc.ctx, "flushed workspace app stats") + } + }() -func NewStatsCollector(logger slog.Logger, reporter StatsReporter, reportInterval time.Duration) *StatsCollector { - ctx, cancel := context.WithCancel(context.Background()) - c := &StatsCollector{ - logger: logger, - reporter: reporter, - - ctx: ctx, - cancel: cancel, - done: make(chan struct{}), - reportInterval: reportInterval, + stats := sc.rollup() + if len(stats) == 0 { + return nil } - go c.start() - return c + return sc.opts.Reporter.Report(ctx, stats) } -func (s *StatsCollector) start() { +func (sc *StatsCollector) Close() error { + sc.cancel() + <-sc.done + return nil +} + +func (sc *StatsCollector) start() { defer func() { - close(s.done) - s.logger.Info(s.ctx, "workspace app stats collector stopped") + close(sc.done) + sc.opts.Logger.Info(sc.ctx, "workspace app stats collector stopped") }() - s.logger.Info(s.ctx, "workspace app stats collector started") + sc.opts.Logger.Info(sc.ctx, "workspace app stats collector started") - ticker := time.NewTicker(s.reportInterval) + ticker := time.NewTicker(sc.opts.ReportInterval) defer ticker.Stop() + var flushDone chan<- struct{} done := false for !done { select { - case <-s.ctx.Done(): + case <-sc.ctx.Done(): ticker.Stop() done = true case <-ticker.C: + case flushDone = <-sc.opts.Flush: } - s.logger.Debug(s.ctx, "flushing workspace app stats") - - // Ensure we don't hold up this request for too long. - ctx, cancel := context.WithTimeout(context.Background(), s.reportInterval+5*time.Second) - err := s.flush(ctx) + // Ensure we don't hold up this request for too long. Add a few + // seconds to prevent very short intervals from causing a timeout. + ctx, cancel := context.WithTimeout(context.Background(), sc.opts.ReportInterval+5*time.Second) + _ = sc.flush(ctx) cancel() - if err != nil { - s.logger.Error(ctx, "failed to flush workspace app stats", slog.Error(err)) - continue + + // For tests. + if flushDone != nil { + flushDone <- struct{}{} + flushDone = nil } } } diff --git a/coderd/workspaceapps/stats_test.go b/coderd/workspaceapps/stats_test.go new file mode 100644 index 0000000000000..4a7932d32e9ec --- /dev/null +++ b/coderd/workspaceapps/stats_test.go @@ -0,0 +1,353 @@ +package workspaceapps_test + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + + "github.com/coder/coder/coderd/database" + "github.com/coder/coder/coderd/workspaceapps" + "github.com/coder/coder/testutil" +) + +type fakeReporter struct { + mu sync.Mutex + s []workspaceapps.StatsReport +} + +func (r *fakeReporter) stats() []workspaceapps.StatsReport { + r.mu.Lock() + defer r.mu.Unlock() + return r.s +} + +func (r *fakeReporter) Report(_ context.Context, stats []workspaceapps.StatsReport) error { + r.mu.Lock() + r.s = append(r.s, stats...) + r.mu.Unlock() + return nil +} + +func TestStatsCollector(t *testing.T) { + t.Parallel() + + rollupUUID := uuid.New() + rollupUUID2 := uuid.New() + someUUID := uuid.New() + + rollupWindow := time.Minute + start := database.Now().Truncate(time.Minute).UTC() + end := start.Add(10 * time.Second) + + tests := []struct { + name string + flushIncrement time.Duration + flushCount int + stats []workspaceapps.StatsReport + want []workspaceapps.StatsReport + }{ + { + name: "Single stat rolled up and reported once", + flushIncrement: 2*rollupWindow + time.Second, + flushCount: 10, // Only reported once. + stats: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: end, + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow), + Requests: 1, + }, + }, + }, + { + name: "Two unique stat rolled up", + flushIncrement: 2*rollupWindow + time.Second, + flushCount: 10, // Only reported once. + stats: []workspaceapps.StatsReport{ + { + AccessMethod: workspaceapps.AccessMethodPath, + SlugOrPort: "code-server", + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: end, + Requests: 1, + }, + { + AccessMethod: workspaceapps.AccessMethodTerminal, + SessionID: rollupUUID2, + SessionStartedAt: start, + SessionEndedAt: end, + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + AccessMethod: workspaceapps.AccessMethodPath, + SlugOrPort: "code-server", + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow), + Requests: 1, + }, + { + AccessMethod: workspaceapps.AccessMethodTerminal, + SessionID: rollupUUID2, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow), + Requests: 1, + }, + }, + }, + { + name: "Multiple stats rolled up", + flushIncrement: 2*rollupWindow + time.Second, + flushCount: 2, + stats: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: end, + Requests: 1, + }, + { + SessionID: uuid.New(), + SessionStartedAt: start, + SessionEndedAt: end, + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow), + Requests: 2, + }, + }, + }, + { + name: "Long sessions not rolled up but reported multiple times", + flushIncrement: rollupWindow + time.Second, + flushCount: 4, + stats: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow + time.Second), + Requests: 1, + }, + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(2 * (rollupWindow + time.Second)), + Requests: 1, + }, + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(3 * (rollupWindow + time.Second)), + Requests: 1, + }, + { + SessionID: rollupUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(4 * (rollupWindow + time.Second)), + Requests: 1, + }, + }, + }, + { + name: "Incomplete stats not reported until it exceeds rollup window", + flushIncrement: rollupWindow / 4, + flushCount: 6, + stats: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow / 4 * 5), + Requests: 1, + }, + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow / 4 * 6), + Requests: 1, + }, + }, + }, + { + name: "Same stat reported without and with end time and rolled up", + flushIncrement: rollupWindow + time.Second, + flushCount: 1, + stats: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + Requests: 1, + }, + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(10 * time.Second), + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow), + Requests: 1, + }, + }, + }, + { + name: "Same non-rolled up stat reported without and with end time", + flushIncrement: rollupWindow * 2, + flushCount: 1, + stats: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + Requests: 1, + }, + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow * 2), + Requests: 1, + }, + }, + want: []workspaceapps.StatsReport{ + { + SessionID: someUUID, + SessionStartedAt: start, + SessionEndedAt: start.Add(rollupWindow * 2), + Requests: 1, + }, + }, + }, + } + + // Run tests. + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + flush := make(chan chan<- struct{}, 1) + var now atomic.Pointer[time.Time] + now.Store(&start) + + reporter := &fakeReporter{} + collector := workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Reporter: reporter, + ReportInterval: time.Hour, + RollupWindow: rollupWindow, + + Flush: flush, + Now: func() time.Time { return *now.Load() }, + }) + + // Collect reports. + for _, report := range tt.stats { + collector.Collect(report) + } + + // Advance time. + flushTime := start.Add(tt.flushIncrement) + for i := 0; i < tt.flushCount; i++ { + now.Store(&flushTime) + flushDone := make(chan struct{}, 1) + flush <- flushDone + <-flushDone + flushTime = flushTime.Add(tt.flushIncrement) + } + + var gotStats []workspaceapps.StatsReport + require.Eventually(t, func() bool { + gotStats = reporter.stats() + t.Logf("len(reporter.stats()) = %d, len(tt.want) = %d", len(gotStats), len(tt.want)) + return len(gotStats) == len(tt.want) + }, testutil.WaitMedium, testutil.IntervalFast) + + // Order is not guaranteed. + sortBySessionID := func(a, b workspaceapps.StatsReport) bool { + if a.SessionID == b.SessionID { + if !a.SessionEndedAt.IsZero() && !b.SessionEndedAt.IsZero() { + return a.SessionEndedAt.Before(b.SessionEndedAt) + } + if a.SessionEndedAt.IsZero() { + return true + } + return false + } + return a.SessionID.String() < b.SessionID.String() + } + slices.SortFunc(tt.want, sortBySessionID) + slices.SortFunc(gotStats, sortBySessionID) + + // Verify reported stats. + for i, got := range gotStats { + want := tt.want[i] + + assert.Equal(t, want.SessionID, got.SessionID, "session ID; i = %d", i) + assert.Equal(t, want.SessionStartedAt, got.SessionStartedAt, "session started at; i = %d", i) + assert.Equal(t, want.SessionEndedAt, got.SessionEndedAt, "session ended at; i = %d", i) + assert.Equal(t, want.Requests, got.Requests, "requests; i = %d", i) + } + }) + } +} + +func TestStatsCollector_Close(t *testing.T) { + t.Parallel() + + reporter := &fakeReporter{} + collector := workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Reporter: reporter, + ReportInterval: time.Hour, + RollupWindow: time.Minute, + }) + + collector.Collect(workspaceapps.StatsReport{ + SessionID: uuid.New(), + SessionStartedAt: database.Now(), + SessionEndedAt: database.Now(), + Requests: 1, + }) + + collector.Close() + + // Verify that stats are reported after close. + assert.NotEmpty(t, reporter.stats()) +} diff --git a/docs/api/schemas.md b/docs/api/schemas.md index 3263fb81e8874..3645d670601be 100644 --- a/docs/api/schemas.md +++ b/docs/api/schemas.md @@ -7478,10 +7478,11 @@ _None_ { "access_method": "path", "agent_id": "string", - "session_end_time": "string", + "requests": 0, + "session_ended_at": "string", "session_id": "string", - "session_start_time": "string", - "slug": "string", + "session_started_at": "string", + "slug_or_port": "string", "user_id": "string", "workspace_id": "string" } @@ -7489,16 +7490,17 @@ _None_ ### Properties -| Name | Type | Required | Restrictions | Description | -| -------------------- | -------------------------------------------------------- | -------- | ------------ | ----------- | -| `access_method` | [workspaceapps.AccessMethod](#workspaceappsaccessmethod) | false | | | -| `agent_id` | string | false | | | -| `session_end_time` | string | false | | | -| `session_id` | string | false | | | -| `session_start_time` | string | false | | | -| `slug` | string | false | | | -| `user_id` | string | false | | | -| `workspace_id` | string | false | | | +| Name | Type | Required | Restrictions | Description | +| -------------------- | -------------------------------------------------------- | -------- | ------------ | --------------------------------------------------------------------------------------- | +| `access_method` | [workspaceapps.AccessMethod](#workspaceappsaccessmethod) | false | | | +| `agent_id` | string | false | | | +| `requests` | integer | false | | | +| `session_ended_at` | string | false | | Updated periodically while app is in use active and when the last connection is closed. | +| `session_id` | string | false | | | +| `session_started_at` | string | false | | | +| `slug_or_port` | string | false | | | +| `user_id` | string | false | | | +| `workspace_id` | string | false | | | ## wsproxysdk.AgentIsLegacyResponse @@ -7613,10 +7615,11 @@ _None_ { "access_method": "path", "agent_id": "string", - "session_end_time": "string", + "requests": 0, + "session_ended_at": "string", "session_id": "string", - "session_start_time": "string", - "slug": "string", + "session_started_at": "string", + "slug_or_port": "string", "user_id": "string", "workspace_id": "string" } diff --git a/enterprise/wsproxy/wsproxy.go b/enterprise/wsproxy/wsproxy.go index 0b6df6da821f2..b675be7c52027 100644 --- a/enterprise/wsproxy/wsproxy.go +++ b/enterprise/wsproxy/wsproxy.go @@ -261,8 +261,9 @@ func New(ctx context.Context, opts *Options) (*Server, error) { } } + workspaceAppsLogger := opts.Logger.Named("workspaceapps") s.AppServer = &workspaceapps.Server{ - Logger: opts.Logger.Named("workspaceapps"), + Logger: workspaceAppsLogger, DashboardURL: opts.DashboardURL, AccessURL: opts.AccessURL, Hostname: opts.AppHostname, @@ -282,11 +283,10 @@ func New(ctx context.Context, opts *Options) (*Server, error) { SecureAuthCookie: opts.SecureAuthCookie, AgentProvider: agentProvider, - StatsCollector: workspaceapps.NewStatsCollector( - opts.Logger.Named("stats_collector"), - &appStatsReporter{Client: client}, - workspaceapps.DefaultStatsCollectorReportInterval, - ), + StatsCollector: workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Logger: workspaceAppsLogger.Named("stats_collector"), + Reporter: &appStatsReporter{Client: client}, + }), } derpHandler := derphttp.Handler(derpServer) From 858264a3af6561d429ef423543c637b28fae5935 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 10:48:58 +0000 Subject: [PATCH 04/25] add close --- coderd/coderd.go | 1 + coderd/workspaceapps/proxy.go | 18 ++++++++++++++---- coderd/workspaceapps/stats.go | 10 +++++----- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index ccf65368eea47..233a9e0e820e8 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -1026,6 +1026,7 @@ func (api *API) Close() error { if api.updateChecker != nil { api.updateChecker.Close() } + _ = api.workspaceAppServer.Close() coordinator := api.TailnetCoordinator.Load() if coordinator != nil { _ = (*coordinator).Close() diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index 4f25f7edc5c6b..f02c08c16fcd2 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -124,6 +124,10 @@ func (s *Server) Close() error { s.websocketWaitGroup.Wait() s.websocketWaitMutex.Unlock() + if s.StatsCollector != nil { + _ = s.StatsCollector.Close() + } + // The caller must close the SignedTokenProvider and the AgentProvider (if // necessary). @@ -589,12 +593,12 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT tracing.EndHTTPSpan(r, http.StatusOK, trace.SpanFromContext(ctx)) report := newStatsReportFromSignedToken(appToken) - s.StatsCollector.Collect(report) + s.collectStats(report) proxy.ServeHTTP(rw, r) report.SessionEndedAt = database.Now() - s.StatsCollector.Collect(report) + s.collectStats(report) } // workspaceAgentPTY spawns a PTY and pipes it over a WebSocket. @@ -688,13 +692,19 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { log.Debug(ctx, "obtained PTY") report := newStatsReportFromSignedToken(*appToken) - s.StatsCollector.Collect(report) + s.collectStats(report) agentssh.Bicopy(ctx, wsNetConn, ptNetConn) log.Debug(ctx, "pty Bicopy finished") report.SessionEndedAt = database.Now() - s.StatsCollector.Collect(report) + s.collectStats(report) +} + +func (s *Server) collectStats(stats StatsReport) { + if s.StatsCollector != nil { + s.StatsCollector.Collect(stats) + } } // wsNetConn wraps net.Conn created by websocket.NetConn(). Cancel func diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 30fe324f9bece..a21739e55eebf 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -324,7 +324,7 @@ func (sc *StatsCollector) start() { ticker := time.NewTicker(sc.opts.ReportInterval) defer ticker.Stop() - var flushDone chan<- struct{} + var reportFlushDone chan<- struct{} done := false for !done { select { @@ -332,7 +332,7 @@ func (sc *StatsCollector) start() { ticker.Stop() done = true case <-ticker.C: - case flushDone = <-sc.opts.Flush: + case reportFlushDone = <-sc.opts.Flush: } // Ensure we don't hold up this request for too long. Add a few @@ -342,9 +342,9 @@ func (sc *StatsCollector) start() { cancel() // For tests. - if flushDone != nil { - flushDone <- struct{}{} - flushDone = nil + if reportFlushDone != nil { + reportFlushDone <- struct{}{} + reportFlushDone = nil } } } From e8969d32da0b2b81bc67f67b2c97c45ac2c8031c Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 10:49:23 +0000 Subject: [PATCH 05/25] fix wsproxy endpoint auth todo --- enterprise/coderd/workspaceproxy.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index a6cac4e8bb723..f78d8b7c0ce4e 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -504,15 +504,16 @@ func (api *API) workspaceProxyIssueSignedAppToken(rw http.ResponseWriter, r *htt // @x-apidocgen {"skip": true} func (api *API) workspaceProxyReportAppStats(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() - - // TODO(mafredri): Is auth needed? + _ = httpmw.WorkspaceProxy(r) // Ensure the proxy is authenticated. var stats []workspaceapps.StatsReport if !httpapi.Read(ctx, rw, r, &stats) { return } - if err := workspaceapps.NewStatsDBReporter(api.Database, workspaceapps.DefaultStatsDBReporterBatchSize).Report(ctx, stats); err != nil { + // Reporter has no state, so we can use an ephemeral instance here. + reporter := workspaceapps.NewStatsDBReporter(api.Database, workspaceapps.DefaultStatsDBReporterBatchSize) + if err := reporter.Report(ctx, stats); err != nil { api.Logger.Error(ctx, "report app stats failed", slog.Error(err)) httpapi.InternalServerError(rw, err) return From 7dc4ff226baffcd2f5bbfc89a98013df7db88a18 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 10:51:44 +0000 Subject: [PATCH 06/25] add backlog for re-reporting in case of failure --- coderd/database/queries.sql.go | 4 + coderd/database/queries/workspaceappstats.sql | 6 +- coderd/workspaceapps/stats.go | 29 ++++++- coderd/workspaceapps/stats_test.go | 82 ++++++++++++++++++- 4 files changed, 114 insertions(+), 7 deletions(-) diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 936e3f4f06bd6..5e4fb9a3228ad 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7799,6 +7799,10 @@ DO workspace_app_stats.user_id = EXCLUDED.user_id AND workspace_app_stats.agent_id = EXCLUDED.agent_id AND workspace_app_stats.session_id = EXCLUDED.session_id + -- Since stats are updated in place as time progresses, we only + -- want to update this row if it's fresh. + AND workspace_app_stats.session_ended_at <= EXCLUDED.session_ended_at + AND workspace_app_stats.requests <= EXCLUDED.requests ` type InsertWorkspaceAppStatsParams struct { diff --git a/coderd/database/queries/workspaceappstats.sql b/coderd/database/queries/workspaceappstats.sql index 6436f6a699f38..98da75e6972c7 100644 --- a/coderd/database/queries/workspaceappstats.sql +++ b/coderd/database/queries/workspaceappstats.sql @@ -30,4 +30,8 @@ DO WHERE workspace_app_stats.user_id = EXCLUDED.user_id AND workspace_app_stats.agent_id = EXCLUDED.agent_id - AND workspace_app_stats.session_id = EXCLUDED.session_id; + AND workspace_app_stats.session_id = EXCLUDED.session_id + -- Since stats are updated in place as time progresses, we only + -- want to update this row if it's fresh. + AND workspace_app_stats.session_ended_at <= EXCLUDED.session_ended_at + AND workspace_app_stats.requests <= EXCLUDED.requests; diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index a21739e55eebf..e4632bc078942 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -142,6 +142,7 @@ type StatsCollector struct { mu sync.Mutex // Protects following. statsBySessionID map[uuid.UUID]*StatsReport // Track unique sessions. groupedStats map[statsGroupKey][]*StatsReport // Rolled up stats for sessions in close proximity. + backlog []StatsReport // Stats that have not been reported yet (due to error). } type StatsCollectorOptions struct { @@ -242,7 +243,7 @@ func (sc *StatsCollector) rollup() []StatsReport { rolledUp: true, } } - + rollupChanged := false newGroup := []*StatsReport{rolledUp} // Must be first in slice for future iterations (see group[0] above). for _, stat := range group { if !stat.SessionEndedAt.IsZero() && stat.SessionEndedAt.Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { @@ -251,6 +252,7 @@ func (sc *StatsCollector) rollup() []StatsReport { rolledUp.SessionID = stat.SessionID // Borrow the first session ID, useful in tests. } rolledUp.Requests += stat.Requests + rollupChanged = true continue } if stat.SessionEndedAt.IsZero() && sc.opts.Now().Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { @@ -273,7 +275,7 @@ func (sc *StatsCollector) rollup() []StatsReport { newGroup = append(newGroup, stat) // Keep it for future updates. } } - if rolledUp.Requests > 0 { + if rollupChanged { report = append(report, *rolledUp) } @@ -300,12 +302,33 @@ func (sc *StatsCollector) flush(ctx context.Context) (err error) { } }() + // We keep the backlog as a simple slice so that we don't need to + // attempt to merge it with the stats we're about to report. This + // is because the rollup is a one-way operation and the backlog may + // contain stats that are still in the statsBySessionID map and will + // be reported again in the future. It is possible to merge the + // backlog and the stats we're about to report, but it's not worth + // the complexity. + if len(sc.backlog) > 0 { + err = sc.opts.Reporter.Report(ctx, sc.backlog) + if err != nil { + return xerrors.Errorf("report workspace app stats from backlog failed: %w", err) + } + sc.backlog = nil + } + stats := sc.rollup() if len(stats) == 0 { return nil } - return sc.opts.Reporter.Report(ctx, stats) + err = sc.opts.Reporter.Report(ctx, stats) + if err != nil { + sc.backlog = stats + return xerrors.Errorf("report workspace app stats failed: %w", err) + } + + return nil } func (sc *StatsCollector) Close() error { diff --git a/coderd/workspaceapps/stats_test.go b/coderd/workspaceapps/stats_test.go index 4a7932d32e9ec..2f806a8652a68 100644 --- a/coderd/workspaceapps/stats_test.go +++ b/coderd/workspaceapps/stats_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" + "golang.org/x/xerrors" "github.com/coder/coder/coderd/database" "github.com/coder/coder/coderd/workspaceapps" @@ -18,8 +19,10 @@ import ( ) type fakeReporter struct { - mu sync.Mutex - s []workspaceapps.StatsReport + mu sync.Mutex + s []workspaceapps.StatsReport + err error + errN int } func (r *fakeReporter) stats() []workspaceapps.StatsReport { @@ -28,8 +31,25 @@ func (r *fakeReporter) stats() []workspaceapps.StatsReport { return r.s } +func (r *fakeReporter) errors() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.errN +} + +func (r *fakeReporter) setError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + r.err = err +} + func (r *fakeReporter) Report(_ context.Context, stats []workspaceapps.StatsReport) error { r.mu.Lock() + if r.err != nil { + r.errN++ + r.mu.Unlock() + return r.err + } r.s = append(r.s, stats...) r.mu.Unlock() return nil @@ -296,7 +316,6 @@ func TestStatsCollector(t *testing.T) { var gotStats []workspaceapps.StatsReport require.Eventually(t, func() bool { gotStats = reporter.stats() - t.Logf("len(reporter.stats()) = %d, len(tt.want) = %d", len(gotStats), len(tt.want)) return len(gotStats) == len(tt.want) }, testutil.WaitMedium, testutil.IntervalFast) @@ -329,6 +348,63 @@ func TestStatsCollector(t *testing.T) { } } +func TestStatsCollector_backlog(t *testing.T) { + t.Parallel() + + rollupWindow := time.Minute + flush := make(chan chan<- struct{}, 1) + + start := database.Now().Truncate(time.Minute).UTC() + var now atomic.Pointer[time.Time] + now.Store(&start) + + reporter := &fakeReporter{} + collector := workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Reporter: reporter, + ReportInterval: time.Hour, + RollupWindow: rollupWindow, + + Flush: flush, + Now: func() time.Time { return *now.Load() }, + }) + + reporter.setError(xerrors.New("some error")) + + // The first collected stat is "rolled up" and moved into the + // backlog during the first flush. On the second flush nothing is + // rolled up due to being unable to report the backlog. + for i := 0; i < 2; i++ { + collector.Collect(workspaceapps.StatsReport{ + SessionID: uuid.New(), + SessionStartedAt: start, + SessionEndedAt: start.Add(10 * time.Second), + Requests: 1, + }) + start = start.Add(time.Minute) + now.Store(&start) + + flushDone := make(chan struct{}, 1) + flush <- flushDone + <-flushDone + } + + // Flush was performed 2 times, 2 reports should have failed. + wantErrors := 2 + assert.Equal(t, wantErrors, reporter.errors()) + assert.Empty(t, reporter.stats()) + + reporter.setError(nil) + + // Flush again, this time the backlog should be reported in addition + // to the second collected stat being rolled up and reported. + flushDone := make(chan struct{}, 1) + flush <- flushDone + <-flushDone + + assert.Equal(t, wantErrors, reporter.errors()) + assert.Len(t, reporter.stats(), 2) +} + func TestStatsCollector_Close(t *testing.T) { t.Parallel() From 0561f8df2b9d4d3a7b7acb7295cb8e738a1ac4fa Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 11:57:07 +0000 Subject: [PATCH 07/25] update dbauthz --- coderd/database/dbauthz/dbauthz.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 28ed877a4d3e0..b2cdfa82583c0 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -2032,7 +2032,9 @@ func (q *querier) InsertWorkspaceApp(ctx context.Context, arg database.InsertWor } func (q *querier) InsertWorkspaceAppStats(ctx context.Context, arg database.InsertWorkspaceAppStatsParams) error { - // TODO(mafredri): Add auth. + if err := q.authorizeContext(ctx, rbac.ActionCreate, rbac.ResourceSystem); err != nil { + return err + } return q.db.InsertWorkspaceAppStats(ctx, arg) } From 59d69acdaf3914cfc7f0b1f3dfb21ae42f6836d8 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 12:51:33 +0000 Subject: [PATCH 08/25] use dbauthz.AsSystemRestricted for collector flush --- coderd/workspaceapps/stats.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index e4632bc078942..e9a0e5ac48f38 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -11,6 +11,7 @@ import ( "cdr.dev/slog" "github.com/coder/coder/coderd/database" + "github.com/coder/coder/coderd/database/dbauthz" ) const ( @@ -293,12 +294,12 @@ func (sc *StatsCollector) rollup() []StatsReport { } func (sc *StatsCollector) flush(ctx context.Context) (err error) { - sc.opts.Logger.Debug(sc.ctx, "flushing workspace app stats") + sc.opts.Logger.Debug(ctx, "flushing workspace app stats") defer func() { if err != nil { - sc.opts.Logger.Error(sc.ctx, "failed to flush workspace app stats", "error", err) + sc.opts.Logger.Error(ctx, "failed to flush workspace app stats", "error", err) } else { - sc.opts.Logger.Debug(sc.ctx, "flushed workspace app stats") + sc.opts.Logger.Debug(ctx, "flushed workspace app stats") } }() @@ -361,7 +362,8 @@ func (sc *StatsCollector) start() { // Ensure we don't hold up this request for too long. Add a few // seconds to prevent very short intervals from causing a timeout. ctx, cancel := context.WithTimeout(context.Background(), sc.opts.ReportInterval+5*time.Second) - _ = sc.flush(ctx) + //nolint:gocritic // Inserting app stats is a system function. + _ = sc.flush(dbauthz.AsSystemRestricted(ctx)) cancel() // For tests. From 97ef37b2446da708c97344e51edf6890a4122c19 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 12:52:00 +0000 Subject: [PATCH 09/25] add stats collection test to apptest --- coderd/coderd.go | 14 +++++--- coderd/coderdtest/coderdtest.go | 3 ++ coderd/workspaceapps/apptest/apptest.go | 48 +++++++++++++++++++++++++ coderd/workspaceapps/apptest/setup.go | 3 ++ coderd/workspaceapps_test.go | 1 + enterprise/wsproxy/wsproxy_test.go | 2 ++ 6 files changed, 66 insertions(+), 5 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index 233a9e0e820e8..78a23ce444c24 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -162,6 +162,8 @@ type Options struct { UpdateAgentMetrics func(ctx context.Context, username, workspaceName, agentName string, metrics []agentsdk.AgentMetric) StatsBatcher *batchstats.Batcher + + WorkspaceAppsStatsCollector *workspaceapps.StatsCollector } // @title Coder API @@ -416,8 +418,13 @@ func New(options *Options) *API { Cache: wsconncache.New(api._dialWorkspaceAgentTailnet, 0), } } - workspaceAppsLogger := options.Logger.Named("workspaceapps") + if options.WorkspaceAppsStatsCollector == nil { + options.WorkspaceAppsStatsCollector = workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Logger: workspaceAppsLogger.Named("stats_collector"), + Reporter: workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), + }) + } api.workspaceAppServer = &workspaceapps.Server{ Logger: workspaceAppsLogger, @@ -430,10 +437,7 @@ func New(options *Options) *API { SignedTokenProvider: api.WorkspaceAppsProvider, AgentProvider: api.agentProvider, AppSecurityKey: options.AppSecurityKey, - StatsCollector: workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ - Logger: workspaceAppsLogger.Named("stats_collector"), - Reporter: workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), - }), + StatsCollector: options.WorkspaceAppsStatsCollector, DisablePathApps: options.DeploymentValues.DisablePathApps.Value(), SecureAuthCookie: options.DeploymentValues.SecureAuthCookie.Value(), diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go index 71e3336ab2e87..775aaed1e7f72 100644 --- a/coderd/coderdtest/coderdtest.go +++ b/coderd/coderdtest/coderdtest.go @@ -144,6 +144,8 @@ type Options struct { // as part of your test. Logger *slog.Logger StatsBatcher *batchstats.Batcher + + WorkspaceAppsStatsCollector *workspaceapps.StatsCollector } // New constructs a codersdk client connected to an in-memory API instance. @@ -425,6 +427,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can HealthcheckTimeout: options.HealthcheckTimeout, HealthcheckRefresh: options.HealthcheckRefresh, StatsBatcher: options.StatsBatcher, + WorkspaceAppsStatsCollector: options.WorkspaceAppsStatsCollector, } } diff --git a/coderd/workspaceapps/apptest/apptest.go b/coderd/workspaceapps/apptest/apptest.go index f64ba7c30bf31..8cc8607a9fbba 100644 --- a/coderd/workspaceapps/apptest/apptest.go +++ b/coderd/workspaceapps/apptest/apptest.go @@ -15,6 +15,7 @@ import ( "runtime" "strconv" "strings" + "sync" "testing" "time" @@ -1406,4 +1407,51 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { require.Equal(t, []string{"Origin", "X-Foobar"}, deduped) require.Equal(t, []string{"baz"}, resp.Header.Values("X-Foobar")) }) + + t.Run("ReportStats", func(t *testing.T) { + t.Parallel() + + reporter := &fakeStatsReporter{} + collector := workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ + Reporter: reporter, + ReportInterval: time.Second, + RollupWindow: time.Minute, + }) + appDetails := setupProxyTest(t, &DeploymentOptions{ + StatsCollector: collector, + }) + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + u := appDetails.PathAppURL(appDetails.Apps.Owner) + resp, err := requestWithRetries(ctx, t, appDetails.AppClient(t), http.MethodGet, u.String(), nil) + require.NoError(t, err) + defer resp.Body.Close() + _, err = io.Copy(io.Discard, resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + require.Eventually(t, func() bool { + return len(reporter.stats()) > 0 + }, testutil.WaitLong, testutil.IntervalMedium, "stats not reported") + }) +} + +type fakeStatsReporter struct { + mu sync.Mutex + s []workspaceapps.StatsReport +} + +func (r *fakeStatsReporter) stats() []workspaceapps.StatsReport { + r.mu.Lock() + defer r.mu.Unlock() + return r.s +} + +func (r *fakeStatsReporter) Report(_ context.Context, stats []workspaceapps.StatsReport) error { + r.mu.Lock() + r.s = append(r.s, stats...) + r.mu.Unlock() + return nil } diff --git a/coderd/workspaceapps/apptest/setup.go b/coderd/workspaceapps/apptest/setup.go index 4245204268391..e6a13ccb609ea 100644 --- a/coderd/workspaceapps/apptest/setup.go +++ b/coderd/workspaceapps/apptest/setup.go @@ -21,6 +21,7 @@ import ( "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/agent" "github.com/coder/coder/coderd/coderdtest" + "github.com/coder/coder/coderd/workspaceapps" "github.com/coder/coder/codersdk" "github.com/coder/coder/codersdk/agentsdk" "github.com/coder/coder/provisioner/echo" @@ -51,6 +52,8 @@ type DeploymentOptions struct { DangerousAllowPathAppSiteOwnerAccess bool ServeHTTPS bool + StatsCollector *workspaceapps.StatsCollector + // The following fields are only used by setupProxyTestWithFactory. noWorkspace bool port uint16 diff --git a/coderd/workspaceapps_test.go b/coderd/workspaceapps_test.go index 8f31bed4d27d2..49bf5e6ccb12d 100644 --- a/coderd/workspaceapps_test.go +++ b/coderd/workspaceapps_test.go @@ -275,6 +275,7 @@ func TestWorkspaceApps(t *testing.T) { "CF-Connecting-IP", }, }, + WorkspaceAppsStatsCollector: opts.StatsCollector, }) user := coderdtest.CreateFirstUser(t, client) diff --git a/enterprise/wsproxy/wsproxy_test.go b/enterprise/wsproxy/wsproxy_test.go index afcb3d1f16143..a77875100a0bb 100644 --- a/enterprise/wsproxy/wsproxy_test.go +++ b/enterprise/wsproxy/wsproxy_test.go @@ -596,6 +596,7 @@ func TestWorkspaceProxyWorkspaceApps_Wsconncache(t *testing.T) { "CF-Connecting-IP", }, }, + WorkspaceAppsStatsCollector: opts.StatsCollector, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{ @@ -654,6 +655,7 @@ func TestWorkspaceProxyWorkspaceApps_SingleTailnet(t *testing.T) { "CF-Connecting-IP", }, }, + WorkspaceAppsStatsCollector: opts.StatsCollector, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{ From 8926dd038904eef4919398e054f0df8adda1cc46 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 12:54:30 +0000 Subject: [PATCH 10/25] fix migrations --- ...ace_app_stats.down.sql => 000150_workspace_app_stats.down.sql} | 0 ...rkspace_app_stats.up.sql => 000150_workspace_app_stats.up.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename coderd/database/migrations/{000148_workspace_app_stats.down.sql => 000150_workspace_app_stats.down.sql} (100%) rename coderd/database/migrations/{000148_workspace_app_stats.up.sql => 000150_workspace_app_stats.up.sql} (100%) diff --git a/coderd/database/migrations/000148_workspace_app_stats.down.sql b/coderd/database/migrations/000150_workspace_app_stats.down.sql similarity index 100% rename from coderd/database/migrations/000148_workspace_app_stats.down.sql rename to coderd/database/migrations/000150_workspace_app_stats.down.sql diff --git a/coderd/database/migrations/000148_workspace_app_stats.up.sql b/coderd/database/migrations/000150_workspace_app_stats.up.sql similarity index 100% rename from coderd/database/migrations/000148_workspace_app_stats.up.sql rename to coderd/database/migrations/000150_workspace_app_stats.up.sql From 1705138bff24aac542eb8c912b0ac2eec4a8726c Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 14:01:02 +0000 Subject: [PATCH 11/25] update plumbing to fix wsproxy tests --- coderd/coderd.go | 17 ++--- coderd/coderdtest/coderdtest.go | 66 ++++++++++---------- coderd/workspaceapps/apptest/apptest.go | 22 ++++--- coderd/workspaceapps/apptest/setup.go | 2 +- coderd/workspaceapps/stats.go | 5 +- enterprise/coderd/coderdenttest/proxytest.go | 5 ++ enterprise/coderd/workspaceproxy.go | 11 ++-- enterprise/wsproxy/wsproxy.go | 17 +++-- enterprise/wsproxy/wsproxy_test.go | 4 +- 9 files changed, 88 insertions(+), 61 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index a7dc443219845..74c669cf8670a 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -163,7 +163,7 @@ type Options struct { UpdateAgentMetrics func(ctx context.Context, username, workspaceName, agentName string, metrics []agentsdk.AgentMetric) StatsBatcher *batchstats.Batcher - WorkspaceAppsStatsCollector *workspaceapps.StatsCollector + WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions } // @title Coder API @@ -418,13 +418,16 @@ func New(options *Options) *API { Cache: wsconncache.New(api._dialWorkspaceAgentTailnet, 0), } } + workspaceAppsLogger := options.Logger.Named("workspaceapps") - if options.WorkspaceAppsStatsCollector == nil { - options.WorkspaceAppsStatsCollector = workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ - Logger: workspaceAppsLogger.Named("stats_collector"), - Reporter: workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize), - }) + if options.WorkspaceAppsStatsCollectorOptions.Logger == nil { + named := workspaceAppsLogger.Named("stats_collector") + options.WorkspaceAppsStatsCollectorOptions.Logger = &named } + if options.WorkspaceAppsStatsCollectorOptions.Reporter == nil { + options.WorkspaceAppsStatsCollectorOptions.Reporter = workspaceapps.NewStatsDBReporter(options.Database, workspaceapps.DefaultStatsDBReporterBatchSize) + } + api.workspaceAppServer = &workspaceapps.Server{ Logger: workspaceAppsLogger, @@ -437,7 +440,7 @@ func New(options *Options) *API { SignedTokenProvider: api.WorkspaceAppsProvider, AgentProvider: api.agentProvider, AppSecurityKey: options.AppSecurityKey, - StatsCollector: options.WorkspaceAppsStatsCollector, + StatsCollector: workspaceapps.NewStatsCollector(options.WorkspaceAppsStatsCollectorOptions), DisablePathApps: options.DeploymentValues.DisablePathApps.Value(), SecureAuthCookie: options.DeploymentValues.SecureAuthCookie.Value(), diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go index 5dac122fe4f47..a53fc75353cae 100644 --- a/coderd/coderdtest/coderdtest.go +++ b/coderd/coderdtest/coderdtest.go @@ -145,7 +145,7 @@ type Options struct { Logger *slog.Logger StatsBatcher *batchstats.Batcher - WorkspaceAppsStatsCollector *workspaceapps.StatsCollector + WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions } // New constructs a codersdk client connected to an in-memory API instance. @@ -396,38 +396,38 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can Pubsub: options.Pubsub, GitAuthConfigs: options.GitAuthConfigs, - Auditor: options.Auditor, - AWSCertificates: options.AWSCertificates, - AzureCertificates: options.AzureCertificates, - GithubOAuth2Config: options.GithubOAuth2Config, - RealIPConfig: options.RealIPConfig, - OIDCConfig: options.OIDCConfig, - GoogleTokenValidator: options.GoogleTokenValidator, - SSHKeygenAlgorithm: options.SSHKeygenAlgorithm, - DERPServer: derpServer, - APIRateLimit: options.APIRateLimit, - LoginRateLimit: options.LoginRateLimit, - FilesRateLimit: options.FilesRateLimit, - Authorizer: options.Authorizer, - Telemetry: telemetry.NewNoop(), - TemplateScheduleStore: &templateScheduleStore, - TLSCertificates: options.TLSCertificates, - TrialGenerator: options.TrialGenerator, - TailnetCoordinator: options.Coordinator, - BaseDERPMap: derpMap, - DERPMapUpdateFrequency: 150 * time.Millisecond, - MetricsCacheRefreshInterval: options.MetricsCacheRefreshInterval, - AgentStatsRefreshInterval: options.AgentStatsRefreshInterval, - DeploymentValues: options.DeploymentValues, - UpdateCheckOptions: options.UpdateCheckOptions, - SwaggerEndpoint: options.SwaggerEndpoint, - AppSecurityKey: AppSecurityKey, - SSHConfig: options.ConfigSSH, - HealthcheckFunc: options.HealthcheckFunc, - HealthcheckTimeout: options.HealthcheckTimeout, - HealthcheckRefresh: options.HealthcheckRefresh, - StatsBatcher: options.StatsBatcher, - WorkspaceAppsStatsCollector: options.WorkspaceAppsStatsCollector, + Auditor: options.Auditor, + AWSCertificates: options.AWSCertificates, + AzureCertificates: options.AzureCertificates, + GithubOAuth2Config: options.GithubOAuth2Config, + RealIPConfig: options.RealIPConfig, + OIDCConfig: options.OIDCConfig, + GoogleTokenValidator: options.GoogleTokenValidator, + SSHKeygenAlgorithm: options.SSHKeygenAlgorithm, + DERPServer: derpServer, + APIRateLimit: options.APIRateLimit, + LoginRateLimit: options.LoginRateLimit, + FilesRateLimit: options.FilesRateLimit, + Authorizer: options.Authorizer, + Telemetry: telemetry.NewNoop(), + TemplateScheduleStore: &templateScheduleStore, + TLSCertificates: options.TLSCertificates, + TrialGenerator: options.TrialGenerator, + TailnetCoordinator: options.Coordinator, + BaseDERPMap: derpMap, + DERPMapUpdateFrequency: 150 * time.Millisecond, + MetricsCacheRefreshInterval: options.MetricsCacheRefreshInterval, + AgentStatsRefreshInterval: options.AgentStatsRefreshInterval, + DeploymentValues: options.DeploymentValues, + UpdateCheckOptions: options.UpdateCheckOptions, + SwaggerEndpoint: options.SwaggerEndpoint, + AppSecurityKey: AppSecurityKey, + SSHConfig: options.ConfigSSH, + HealthcheckFunc: options.HealthcheckFunc, + HealthcheckTimeout: options.HealthcheckTimeout, + HealthcheckRefresh: options.HealthcheckRefresh, + StatsBatcher: options.StatsBatcher, + WorkspaceAppsStatsCollectorOptions: options.WorkspaceAppsStatsCollectorOptions, } } diff --git a/coderd/workspaceapps/apptest/apptest.go b/coderd/workspaceapps/apptest/apptest.go index 8cc8607a9fbba..d85d16ca6aed9 100644 --- a/coderd/workspaceapps/apptest/apptest.go +++ b/coderd/workspaceapps/apptest/apptest.go @@ -1411,14 +1411,17 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { t.Run("ReportStats", func(t *testing.T) { t.Parallel() + flush := make(chan chan<- struct{}, 1) + reporter := &fakeStatsReporter{} - collector := workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ - Reporter: reporter, - ReportInterval: time.Second, - RollupWindow: time.Minute, - }) appDetails := setupProxyTest(t, &DeploymentOptions{ - StatsCollector: collector, + StatsCollectorOptions: workspaceapps.StatsCollectorOptions{ + Reporter: reporter, + ReportInterval: time.Hour, + RollupWindow: time.Minute, + + Flush: flush, + }, }) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) @@ -1433,8 +1436,13 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { require.Equal(t, http.StatusOK, resp.StatusCode) require.Eventually(t, func() bool { + // Keep flushing until we get a non-empty stats report. + flushDone := make(chan struct{}, 1) + flush <- flushDone + <-flushDone + return len(reporter.stats()) > 0 - }, testutil.WaitLong, testutil.IntervalMedium, "stats not reported") + }, testutil.WaitLong, testutil.IntervalFast, "stats not reported") }) } diff --git a/coderd/workspaceapps/apptest/setup.go b/coderd/workspaceapps/apptest/setup.go index e6a13ccb609ea..dc3b0e0e1c40f 100644 --- a/coderd/workspaceapps/apptest/setup.go +++ b/coderd/workspaceapps/apptest/setup.go @@ -52,7 +52,7 @@ type DeploymentOptions struct { DangerousAllowPathAppSiteOwnerAccess bool ServeHTTPS bool - StatsCollector *workspaceapps.StatsCollector + StatsCollectorOptions workspaceapps.StatsCollectorOptions // The following fields are only used by setupProxyTestWithFactory. noWorkspace bool diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index e9a0e5ac48f38..c0bf55783de40 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -147,7 +147,7 @@ type StatsCollector struct { } type StatsCollectorOptions struct { - Logger slog.Logger + Logger *slog.Logger Reporter StatsReporter // ReportInterval is the interval at which stats are reported, both partial // and fully formed stats. @@ -163,6 +163,9 @@ type StatsCollectorOptions struct { } func NewStatsCollector(opts StatsCollectorOptions) *StatsCollector { + if opts.Logger == nil { + opts.Logger = &slog.Logger{} + } if opts.ReportInterval == 0 { opts.ReportInterval = DefaultStatsCollectorReportInterval } diff --git a/enterprise/coderd/coderdenttest/proxytest.go b/enterprise/coderd/coderdenttest/proxytest.go index c544e14b44a46..a9031e90a5f32 100644 --- a/enterprise/coderd/coderdenttest/proxytest.go +++ b/enterprise/coderd/coderdenttest/proxytest.go @@ -110,6 +110,10 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie }) require.NoError(t, err, "failed to create workspace proxy") + // Inherit collector options from coderd, but keep the wsproxy reporter. + statsCollectorOptions := coderdAPI.Options.WorkspaceAppsStatsCollectorOptions + statsCollectorOptions.Reporter = nil + wssrv, err := wsproxy.New(ctx, &wsproxy.Options{ Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug), Experiments: options.Experiments, @@ -129,6 +133,7 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie DERPEnabled: !options.DerpDisabled, DERPOnly: options.DerpOnly, DERPServerRelayAddress: accessURL.String(), + StatsCollectorOptions: statsCollectorOptions, }) require.NoError(t, err) t.Cleanup(func() { diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index 871d34d7080fb..8685ca971a2d9 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -511,14 +511,15 @@ func (api *API) workspaceProxyReportAppStats(rw http.ResponseWriter, r *http.Req ctx := r.Context() _ = httpmw.WorkspaceProxy(r) // Ensure the proxy is authenticated. - var stats []workspaceapps.StatsReport - if !httpapi.Read(ctx, rw, r, &stats) { + var req wsproxysdk.ReportAppStatsRequest + if !httpapi.Read(ctx, rw, r, &req) { return } - // Reporter has no state, so we can use an ephemeral instance here. - reporter := workspaceapps.NewStatsDBReporter(api.Database, workspaceapps.DefaultStatsDBReporterBatchSize) - if err := reporter.Report(ctx, stats); err != nil { + api.Logger.Debug(ctx, "report app stats", slog.F("stats", req.Stats)) + + reporter := api.WorkspaceAppsStatsCollectorOptions.Reporter + if err := reporter.Report(ctx, req.Stats); err != nil { api.Logger.Error(ctx, "report app stats failed", slog.Error(err)) httpapi.InternalServerError(rw, err) return diff --git a/enterprise/wsproxy/wsproxy.go b/enterprise/wsproxy/wsproxy.go index b675be7c52027..4f80b9eec9ebd 100644 --- a/enterprise/wsproxy/wsproxy.go +++ b/enterprise/wsproxy/wsproxy.go @@ -79,6 +79,8 @@ type Options struct { // By default, CORs is set to accept external requests // from the dashboardURL. This should only be used in development. AllowAllCors bool + + StatsCollectorOptions workspaceapps.StatsCollectorOptions } func (o *Options) Validate() error { @@ -262,6 +264,14 @@ func New(ctx context.Context, opts *Options) (*Server, error) { } workspaceAppsLogger := opts.Logger.Named("workspaceapps") + if opts.StatsCollectorOptions.Logger == nil { + named := workspaceAppsLogger.Named("stats_collector") + opts.StatsCollectorOptions.Logger = &named + } + if opts.StatsCollectorOptions.Reporter == nil { + opts.StatsCollectorOptions.Reporter = &appStatsReporter{Client: client} + } + s.AppServer = &workspaceapps.Server{ Logger: workspaceAppsLogger, DashboardURL: opts.DashboardURL, @@ -282,11 +292,8 @@ func New(ctx context.Context, opts *Options) (*Server, error) { DisablePathApps: opts.DisablePathApps, SecureAuthCookie: opts.SecureAuthCookie, - AgentProvider: agentProvider, - StatsCollector: workspaceapps.NewStatsCollector(workspaceapps.StatsCollectorOptions{ - Logger: workspaceAppsLogger.Named("stats_collector"), - Reporter: &appStatsReporter{Client: client}, - }), + AgentProvider: agentProvider, + StatsCollector: workspaceapps.NewStatsCollector(opts.StatsCollectorOptions), } derpHandler := derphttp.Handler(derpServer) diff --git a/enterprise/wsproxy/wsproxy_test.go b/enterprise/wsproxy/wsproxy_test.go index b7fa8191b3949..64fe414fe5fac 100644 --- a/enterprise/wsproxy/wsproxy_test.go +++ b/enterprise/wsproxy/wsproxy_test.go @@ -478,7 +478,7 @@ func TestWorkspaceProxyWorkspaceApps_Wsconncache(t *testing.T) { "CF-Connecting-IP", }, }, - WorkspaceAppsStatsCollector: opts.StatsCollector, + WorkspaceAppsStatsCollectorOptions: opts.StatsCollectorOptions, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{ @@ -537,7 +537,7 @@ func TestWorkspaceProxyWorkspaceApps_SingleTailnet(t *testing.T) { "CF-Connecting-IP", }, }, - WorkspaceAppsStatsCollector: opts.StatsCollector, + WorkspaceAppsStatsCollectorOptions: opts.StatsCollectorOptions, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{ From d709e681379951925aa91d21b92783c93bea0a0f Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 14:11:25 +0000 Subject: [PATCH 12/25] test the stat output in apptest --- coderd/workspaceapps/apptest/apptest.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/coderd/workspaceapps/apptest/apptest.go b/coderd/workspaceapps/apptest/apptest.go index d85d16ca6aed9..cf644148840ee 100644 --- a/coderd/workspaceapps/apptest/apptest.go +++ b/coderd/workspaceapps/apptest/apptest.go @@ -1435,14 +1435,20 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) { require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) + var stats []workspaceapps.StatsReport require.Eventually(t, func() bool { // Keep flushing until we get a non-empty stats report. flushDone := make(chan struct{}, 1) flush <- flushDone <-flushDone - return len(reporter.stats()) > 0 + stats = reporter.stats() + return len(stats) > 0 }, testutil.WaitLong, testutil.IntervalFast, "stats not reported") + + assert.Equal(t, workspaceapps.AccessMethodPath, stats[0].AccessMethod) + assert.Equal(t, "test-app-owner", stats[0].SlugOrPort) + assert.Equal(t, 1, stats[0].Requests) }) } From 6ec178bec7f100c9a1a0027eb67e9adc1a4f7cf0 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 14:39:30 +0000 Subject: [PATCH 13/25] fix issues --- coderd/database/dbfake/dbfake.go | 2 +- coderd/workspaceapps/stats_test.go | 15 ++++++--------- coderd/workspaceapps_test.go | 2 +- enterprise/coderd/workspaceproxy.go | 1 - 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index c76775a004bf3..3034c741198fb 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -4309,7 +4309,7 @@ func (q *FakeQuerier) InsertWorkspaceApp(_ context.Context, arg database.InsertW return workspaceApp, nil } -func (q *FakeQuerier) InsertWorkspaceAppStats(ctx context.Context, arg database.InsertWorkspaceAppStatsParams) error { +func (q *FakeQuerier) InsertWorkspaceAppStats(_ context.Context, arg database.InsertWorkspaceAppStatsParams) error { err := validateDatabaseType(arg) if err != nil { return err diff --git a/coderd/workspaceapps/stats_test.go b/coderd/workspaceapps/stats_test.go index 2f806a8652a68..2ad0c5556c52c 100644 --- a/coderd/workspaceapps/stats_test.go +++ b/coderd/workspaceapps/stats_test.go @@ -320,17 +320,14 @@ func TestStatsCollector(t *testing.T) { }, testutil.WaitMedium, testutil.IntervalFast) // Order is not guaranteed. - sortBySessionID := func(a, b workspaceapps.StatsReport) bool { + sortBySessionID := func(a, b workspaceapps.StatsReport) int { if a.SessionID == b.SessionID { - if !a.SessionEndedAt.IsZero() && !b.SessionEndedAt.IsZero() { - return a.SessionEndedAt.Before(b.SessionEndedAt) - } - if a.SessionEndedAt.IsZero() { - return true - } - return false + return int(a.SessionEndedAt.Sub(b.SessionEndedAt)) } - return a.SessionID.String() < b.SessionID.String() + if a.SessionID.String() < b.SessionID.String() { + return -1 + } + return 1 } slices.SortFunc(tt.want, sortBySessionID) slices.SortFunc(gotStats, sortBySessionID) diff --git a/coderd/workspaceapps_test.go b/coderd/workspaceapps_test.go index 49bf5e6ccb12d..1312a9d1e3198 100644 --- a/coderd/workspaceapps_test.go +++ b/coderd/workspaceapps_test.go @@ -275,7 +275,7 @@ func TestWorkspaceApps(t *testing.T) { "CF-Connecting-IP", }, }, - WorkspaceAppsStatsCollector: opts.StatsCollector, + WorkspaceAppsStatsCollectorOptions: opts.StatsCollectorOptions, }) user := coderdtest.CreateFirstUser(t, client) diff --git a/enterprise/coderd/workspaceproxy.go b/enterprise/coderd/workspaceproxy.go index 8685ca971a2d9..87a4f8898872c 100644 --- a/enterprise/coderd/workspaceproxy.go +++ b/enterprise/coderd/workspaceproxy.go @@ -501,7 +501,6 @@ func (api *API) workspaceProxyIssueSignedAppToken(rw http.ResponseWriter, r *htt // @ID report-workspace-app-stats // @Security CoderSessionToken // @Accept json -// @Produce json // @Tags Enterprise // @Param request body wsproxysdk.ReportAppStatsRequest true "Report app stats request" // @Success 204 From 7db34ba479fe53fd1c571b457699d946fa07e2a6 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 14:52:37 +0000 Subject: [PATCH 14/25] fix id in dbfake --- coderd/database/dbfake/dbfake.go | 1 + 1 file changed, 1 insertion(+) diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index 3034c741198fb..9e9b38be43366 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -4345,6 +4345,7 @@ InsertWorkspaceAppStatsLoop: } } q.workspaceAppStats = append(q.workspaceAppStats, stat) + nextID++ } return nil From ca83430c8ba73a3ad8551358702985cee685d719 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Mon, 14 Aug 2023 15:01:30 +0000 Subject: [PATCH 15/25] fix gen --- coderd/apidoc/docs.go | 3 --- coderd/apidoc/swagger.json | 1 - 2 files changed, 4 deletions(-) diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 41c5e23ddb92d..fec23dde6045d 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -5489,9 +5489,6 @@ const docTemplate = `{ "consumes": [ "application/json" ], - "produces": [ - "application/json" - ], "tags": [ "Enterprise" ], diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index bef31742a1336..cba1285cf0de7 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -4839,7 +4839,6 @@ } ], "consumes": ["application/json"], - "produces": ["application/json"], "tags": ["Enterprise"], "summary": "Report workspace app stats", "operationId": "report-workspace-app-stats", From 8039a4d635476d4b34667f5411b1f89b7587813a Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Tue, 15 Aug 2023 10:48:18 +0000 Subject: [PATCH 16/25] add fixture --- .../000150_workspace_app_usage_stats.up.sql | 133 ++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 coderd/database/migrations/testdata/fixtures/000150_workspace_app_usage_stats.up.sql diff --git a/coderd/database/migrations/testdata/fixtures/000150_workspace_app_usage_stats.up.sql b/coderd/database/migrations/testdata/fixtures/000150_workspace_app_usage_stats.up.sql new file mode 100644 index 0000000000000..9a9a8f0fa72dc --- /dev/null +++ b/coderd/database/migrations/testdata/fixtures/000150_workspace_app_usage_stats.up.sql @@ -0,0 +1,133 @@ +INSERT INTO public.workspace_app_stats ( + id, + user_id, + workspace_id, + agent_id, + access_method, + slug_or_port, + session_id, + session_started_at, + session_ended_at, + requests +) +VALUES + ( + 1498, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + '562cbfb8-3d9a-4018-9c04-e8159d5aa43e', + '2023-08-14 20:15:00+00', + '2023-08-14 20:16:00+00', + 1 + ), + ( + 59, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'terminal', + '', + '281919d0-5d99-48fb-8a93-2c3019010387', + '2023-08-14 14:15:40.085827+00', + '2023-08-14 14:17:41.295989+00', + 1 + ), + ( + 58, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + '5b7c9d43-19e6-4401-997b-c26de2c86c55', + '2023-08-14 14:15:34.620496+00', + '2023-08-14 23:58:37.158964+00', + 1 + ), + ( + 57, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + 'fe546a68-0921-4a2b-bced-5dc5c5635576', + '2023-08-14 14:15:34.129002+00', + '2023-08-14 23:58:37.158901+00', + 1 + ), + ( + 56, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + '96e4e857-598c-4881-bc40-e13008b48bb0', + '2023-08-14 14:15:00+00', + '2023-08-14 14:16:00+00', + 36 + ), + ( + 7, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'terminal', + '', + '95d22d41-0fde-447b-9743-0b8583edb60a', + '2023-08-14 13:00:28.732837+00', + '2023-08-14 13:09:23.990797+00', + 1 + ), + ( + 4, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + '442688ce-f9e7-46df-ba3d-623ef9a1d30d', + '2023-08-14 13:00:12.843977+00', + '2023-08-14 13:09:26.276696+00', + 1 + ), + ( + 3, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + 'f963c4f0-55b7-4813-8b61-ea58536754db', + '2023-08-14 13:00:12.323196+00', + '2023-08-14 13:09:26.277073+00', + 1 + ), + ( + 2, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'terminal', + '', + '5a034459-73e4-4642-91b8-80b0f718f29e', + '2023-08-14 13:00:00+00', + '2023-08-14 13:01:00+00', + 4 + ), + ( + 1, + '30095c71-380b-457a-8995-97b8ee6e5307', + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', + '7a1ce5f8-8d00-431c-ad1b-97a846512804', + 'path', + 'code-server', + 'd7a0d8e1-069e-421d-b876-b5d0ddbcaf6d', + '2023-08-14 13:00:00+00', + '2023-08-14 13:01:00+00', + 36 + ); From 7e2ec9f7221b2722d128031ed36623f00936dffe Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:05:41 +0000 Subject: [PATCH 17/25] use 15s timeout and timer reset --- coderd/workspaceapps/stats.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index c0bf55783de40..0b25490e66830 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -348,27 +348,31 @@ func (sc *StatsCollector) start() { }() sc.opts.Logger.Info(sc.ctx, "workspace app stats collector started") - ticker := time.NewTicker(sc.opts.ReportInterval) - defer ticker.Stop() + t := time.NewTimer(sc.opts.ReportInterval) + defer t.Stop() var reportFlushDone chan<- struct{} done := false for !done { select { case <-sc.ctx.Done(): - ticker.Stop() + t.Stop() done = true - case <-ticker.C: + case <-t.C: case reportFlushDone = <-sc.opts.Flush: } // Ensure we don't hold up this request for too long. Add a few // seconds to prevent very short intervals from causing a timeout. - ctx, cancel := context.WithTimeout(context.Background(), sc.opts.ReportInterval+5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) //nolint:gocritic // Inserting app stats is a system function. _ = sc.flush(dbauthz.AsSystemRestricted(ctx)) cancel() + if !done { + t.Reset(sc.opts.ReportInterval) + } + // For tests. if reportFlushDone != nil { reportFlushDone <- struct{}{} From 70f624984c55f4139edbc40380e547299568de66 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:08:00 +0000 Subject: [PATCH 18/25] pass now to rollup --- coderd/workspaceapps/stats.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 0b25490e66830..46230a20701e5 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -216,7 +216,7 @@ func (sc *StatsCollector) Collect(report StatsReport) { // rollup performs stats rollup for sessions that fall within the // configured rollup window. For sessions longer than the window, // we report them individually. -func (sc *StatsCollector) rollup() []StatsReport { +func (sc *StatsCollector) rollup(now time.Time) []StatsReport { sc.mu.Lock() defer sc.mu.Unlock() @@ -259,7 +259,7 @@ func (sc *StatsCollector) rollup() []StatsReport { rollupChanged = true continue } - if stat.SessionEndedAt.IsZero() && sc.opts.Now().Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { + if stat.SessionEndedAt.IsZero() && now.Sub(stat.SessionStartedAt) <= sc.opts.RollupWindow { // This is an incomplete session, wait and see if it'll be rolled up or not. newGroup = append(newGroup, stat) continue @@ -272,7 +272,7 @@ func (sc *StatsCollector) rollup() []StatsReport { // Report an end time for incomplete sessions, it will // be updated later. This ensures that data in the DB // will have an end time even if the service is stopped. - r.SessionEndedAt = sc.opts.Now().UTC() // Use UTC like database.Now(). + r.SessionEndedAt = now.UTC() // Use UTC like database.Now(). } report = append(report, r) // Report it (ended or incomplete). if stat.SessionEndedAt.IsZero() { @@ -288,7 +288,7 @@ func (sc *StatsCollector) rollup() []StatsReport { // Keep the group around until the next rollup window has passed // in case data was collected late. - if len(newGroup) == 1 && rolledUp.SessionEndedAt.Add(sc.opts.RollupWindow).Before(sc.opts.Now()) { + if len(newGroup) == 1 && rolledUp.SessionEndedAt.Add(sc.opts.RollupWindow).Before(now) { delete(sc.groupedStats, g) } } @@ -321,7 +321,8 @@ func (sc *StatsCollector) flush(ctx context.Context) (err error) { sc.backlog = nil } - stats := sc.rollup() + now := sc.opts.Now() + stats := sc.rollup(now) if len(stats) == 0 { return nil } From 5615e27122696c1a3d3b96df3c511992d8744dab Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:19:01 +0000 Subject: [PATCH 19/25] track last insert id in fakedb --- coderd/database/dbfake/dbfake.go | 65 +++++++++++++++----------------- 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index a4579bd4cc42e..8de3ee49be5c1 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -114,34 +114,35 @@ type data struct { userLinks []database.UserLink // New tables - workspaceAgentStats []database.WorkspaceAgentStat - auditLogs []database.AuditLog - files []database.File - gitAuthLinks []database.GitAuthLink - gitSSHKey []database.GitSSHKey - groupMembers []database.GroupMember - groups []database.Group - licenses []database.License - parameterSchemas []database.ParameterSchema - provisionerDaemons []database.ProvisionerDaemon - provisionerJobLogs []database.ProvisionerJobLog - provisionerJobs []database.ProvisionerJob - replicas []database.Replica - templateVersions []database.TemplateVersionTable - templateVersionParameters []database.TemplateVersionParameter - templateVersionVariables []database.TemplateVersionVariable - templates []database.TemplateTable - workspaceAgents []database.WorkspaceAgent - workspaceAgentMetadata []database.WorkspaceAgentMetadatum - workspaceAgentLogs []database.WorkspaceAgentLog - workspaceApps []database.WorkspaceApp - workspaceAppStats []database.WorkspaceAppStat - workspaceBuilds []database.WorkspaceBuildTable - workspaceBuildParameters []database.WorkspaceBuildParameter - workspaceResourceMetadata []database.WorkspaceResourceMetadatum - workspaceResources []database.WorkspaceResource - workspaces []database.Workspace - workspaceProxies []database.WorkspaceProxy + workspaceAgentStats []database.WorkspaceAgentStat + auditLogs []database.AuditLog + files []database.File + gitAuthLinks []database.GitAuthLink + gitSSHKey []database.GitSSHKey + groupMembers []database.GroupMember + groups []database.Group + licenses []database.License + parameterSchemas []database.ParameterSchema + provisionerDaemons []database.ProvisionerDaemon + provisionerJobLogs []database.ProvisionerJobLog + provisionerJobs []database.ProvisionerJob + replicas []database.Replica + templateVersions []database.TemplateVersionTable + templateVersionParameters []database.TemplateVersionParameter + templateVersionVariables []database.TemplateVersionVariable + templates []database.TemplateTable + workspaceAgents []database.WorkspaceAgent + workspaceAgentMetadata []database.WorkspaceAgentMetadatum + workspaceAgentLogs []database.WorkspaceAgentLog + workspaceApps []database.WorkspaceApp + workspaceAppStatsLastInsertID int64 + workspaceAppStats []database.WorkspaceAppStat + workspaceBuilds []database.WorkspaceBuildTable + workspaceBuildParameters []database.WorkspaceBuildParameter + workspaceResourceMetadata []database.WorkspaceResourceMetadatum + workspaceResources []database.WorkspaceResource + workspaces []database.Workspace + workspaceProxies []database.WorkspaceProxy // Locks is a map of lock names. Any keys within the map are currently // locked. locks map[int64]struct{} @@ -4346,14 +4347,10 @@ func (q *FakeQuerier) InsertWorkspaceAppStats(_ context.Context, arg database.In q.mutex.Lock() defer q.mutex.Unlock() - nextID := int64(1) - if len(q.workspaceAppStats) > 0 { - nextID = q.workspaceAppStats[len(q.workspaceAppStats)-1].ID + 1 - } InsertWorkspaceAppStatsLoop: for i := 0; i < len(arg.UserID); i++ { stat := database.WorkspaceAppStat{ - ID: nextID, + ID: q.workspaceAppStatsLastInsertID + 1, UserID: arg.UserID[i], WorkspaceID: arg.WorkspaceID[i], AgentID: arg.AgentID[i], @@ -4373,7 +4370,7 @@ InsertWorkspaceAppStatsLoop: } } q.workspaceAppStats = append(q.workspaceAppStats, stat) - nextID++ + q.workspaceAppStatsLastInsertID++ } return nil From 9418abd01742210b5557b6929d33cd054f28d95c Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:20:35 +0000 Subject: [PATCH 20/25] fix migration indentation --- .../migrations/000150_workspace_app_stats.up.sql | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/coderd/database/migrations/000150_workspace_app_stats.up.sql b/coderd/database/migrations/000150_workspace_app_stats.up.sql index 70d7bc6fb70a0..ace09e52760f6 100644 --- a/coderd/database/migrations/000150_workspace_app_stats.up.sql +++ b/coderd/database/migrations/000150_workspace_app_stats.up.sql @@ -1,13 +1,13 @@ CREATE TABLE workspace_app_stats ( - id BIGSERIAL PRIMARY KEY, - user_id uuid NOT NULL REFERENCES users (id), + id BIGSERIAL PRIMARY KEY, + user_id uuid NOT NULL REFERENCES users (id), workspace_id uuid NOT NULL REFERENCES workspaces (id), - agent_id uuid NOT NULL REFERENCES workspace_agents (id), + agent_id uuid NOT NULL REFERENCES workspace_agents (id), access_method text NOT NULL, slug_or_port text NOT NULL, session_id uuid NOT NULL, - session_started_at timestamptz NOT NULL, - session_ended_at timestamptz NOT NULL, + session_started_at timestamptz NOT NULL, + session_ended_at timestamptz NOT NULL, requests integer NOT NULL, -- Set a unique constraint to allow upserting the session_ended_at From a032f8a8b97f346bbb26e7bac9072157ca9dd543 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:20:54 +0000 Subject: [PATCH 21/25] remove sqlc type alias --- coderd/database/sqlc.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/coderd/database/sqlc.yaml b/coderd/database/sqlc.yaml index f5c6ac6996473..526f62d3a5ca5 100644 --- a/coderd/database/sqlc.yaml +++ b/coderd/database/sqlc.yaml @@ -27,13 +27,6 @@ overrides: - column: "template_with_users.group_acl" go_type: type: "TemplateACL" - # Type alias to allow batch inserts of timestamptz[] arrays containing - # nullable values. - - db_type: "nulltimestamptz" - engine: "postgresql" - go_type: - import: "database/sql" - type: "NullTime" rename: template: TemplateTable template_with_user: Template From 94563ef701c727d7f18c2a072457060d7ec1df30 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:22:30 +0000 Subject: [PATCH 22/25] defer collect stats --- coderd/workspaceapps/proxy.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index f02c08c16fcd2..da2f5e6a098f1 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -594,11 +594,13 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT report := newStatsReportFromSignedToken(appToken) s.collectStats(report) + defer func() { + // We must use defer here because ServeHTTP may panic. + report.SessionEndedAt = database.Now() + s.collectStats(report) + }() proxy.ServeHTTP(rw, r) - - report.SessionEndedAt = database.Now() - s.collectStats(report) } // workspaceAgentPTY spawns a PTY and pipes it over a WebSocket. @@ -693,12 +695,13 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { report := newStatsReportFromSignedToken(*appToken) s.collectStats(report) + defer func() { + report.SessionEndedAt = database.Now() + s.collectStats(report) + }() agentssh.Bicopy(ctx, wsNetConn, ptNetConn) log.Debug(ctx, "pty Bicopy finished") - - report.SessionEndedAt = database.Now() - s.collectStats(report) } func (s *Server) collectStats(stats StatsReport) { From e27e9050922827927a1973f3738fd5765c3cbf50 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:26:09 +0000 Subject: [PATCH 23/25] remove stale comment --- coderd/workspaceapps/stats.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 46230a20701e5..8182e22c46f9e 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -363,8 +363,6 @@ func (sc *StatsCollector) start() { case reportFlushDone = <-sc.opts.Flush: } - // Ensure we don't hold up this request for too long. Add a few - // seconds to prevent very short intervals from causing a timeout. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) //nolint:gocritic // Inserting app stats is a system function. _ = sc.flush(dbauthz.AsSystemRestricted(ctx)) From be67aef0885cf692942b7d1c2c4c359a09ed0a59 Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 09:36:08 +0000 Subject: [PATCH 24/25] preallocate batch slices --- coderd/workspaceapps/stats.go | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 8182e22c46f9e..251e8ec67f350 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -72,7 +72,21 @@ func NewStatsDBReporter(db database.Store, batchSize int) *StatsDBReporter { // Report writes the given StatsReports to the database. func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error { err := r.db.InTx(func(tx database.Store) error { - var batch database.InsertWorkspaceAppStatsParams + maxBatchSize := r.batchSize + if len(stats) < maxBatchSize { + maxBatchSize = len(stats) + } + batch := database.InsertWorkspaceAppStatsParams{ + UserID: make([]uuid.UUID, 0, maxBatchSize), + WorkspaceID: make([]uuid.UUID, 0, maxBatchSize), + AgentID: make([]uuid.UUID, 0, maxBatchSize), + AccessMethod: make([]string, 0, maxBatchSize), + SlugOrPort: make([]string, 0, maxBatchSize), + SessionID: make([]uuid.UUID, 0, maxBatchSize), + SessionStartedAt: make([]time.Time, 0, maxBatchSize), + SessionEndedAt: make([]time.Time, 0, maxBatchSize), + Requests: make([]int32, 0, maxBatchSize), + } for _, stat := range stats { batch.UserID = append(batch.UserID, stat.UserID) batch.WorkspaceID = append(batch.WorkspaceID, stat.WorkspaceID) @@ -91,7 +105,15 @@ func (r *StatsDBReporter) Report(ctx context.Context, stats []StatsReport) error } // Reset batch. - batch = database.InsertWorkspaceAppStatsParams{} + batch.UserID = batch.UserID[:0] + batch.WorkspaceID = batch.WorkspaceID[:0] + batch.AgentID = batch.AgentID[:0] + batch.AccessMethod = batch.AccessMethod[:0] + batch.SlugOrPort = batch.SlugOrPort[:0] + batch.SessionID = batch.SessionID[:0] + batch.SessionStartedAt = batch.SessionStartedAt[:0] + batch.SessionEndedAt = batch.SessionEndedAt[:0] + batch.Requests = batch.Requests[:0] } } if len(batch.UserID) > 0 { From c9272c53126702335c616d302e6e38c84f4986db Mon Sep 17 00:00:00 2001 From: Mathias Fredriksson Date: Wed, 16 Aug 2023 10:10:07 +0000 Subject: [PATCH 25/25] lower log level to debug for start/stop --- coderd/workspaceapps/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coderd/workspaceapps/stats.go b/coderd/workspaceapps/stats.go index 251e8ec67f350..72a8154d5e89c 100644 --- a/coderd/workspaceapps/stats.go +++ b/coderd/workspaceapps/stats.go @@ -367,9 +367,9 @@ func (sc *StatsCollector) Close() error { func (sc *StatsCollector) start() { defer func() { close(sc.done) - sc.opts.Logger.Info(sc.ctx, "workspace app stats collector stopped") + sc.opts.Logger.Debug(sc.ctx, "workspace app stats collector stopped") }() - sc.opts.Logger.Info(sc.ctx, "workspace app stats collector started") + sc.opts.Logger.Debug(sc.ctx, "workspace app stats collector started") t := time.NewTimer(sc.opts.ReportInterval) defer t.Stop()