diff --git a/cli/testdata/coder_list_--output_json.golden b/cli/testdata/coder_list_--output_json.golden index 822998329be5b..ba560a39f59d7 100644 --- a/cli/testdata/coder_list_--output_json.golden +++ b/cli/testdata/coder_list_--output_json.golden @@ -55,7 +55,8 @@ "template_name": "", "template_display_name": "", "template_icon": "" - } + }, + "logs_overflowed": false }, "reason": "initiator", "resources": [], diff --git a/cli/testdata/coder_provisioner_jobs_list_--help.golden b/cli/testdata/coder_provisioner_jobs_list_--help.golden index f380a0334867c..8e22f78e978f2 100644 --- a/cli/testdata/coder_provisioner_jobs_list_--help.golden +++ b/cli/testdata/coder_provisioner_jobs_list_--help.golden @@ -11,7 +11,7 @@ OPTIONS: -O, --org string, $CODER_ORGANIZATION Select which organization (uuid or name) to use. - -c, --column [id|created at|started at|completed at|canceled at|error|error code|status|worker id|worker name|file id|tags|queue position|queue size|organization id|template version id|workspace build id|type|available workers|template version name|template id|template name|template display name|template icon|workspace id|workspace name|organization|queue] (default: created at,id,type,template display name,status,queue,tags) + -c, --column [id|created at|started at|completed at|canceled at|error|error code|status|worker id|worker name|file id|tags|queue position|queue size|organization id|template version id|workspace build id|type|available workers|template version name|template id|template name|template display name|template icon|workspace id|workspace name|logs overflowed|organization|queue] (default: created at,id,type,template display name,status,queue,tags) Columns to display in table output. -l, --limit int, $CODER_PROVISIONER_JOB_LIST_LIMIT (default: 50) diff --git a/cli/testdata/coder_provisioner_jobs_list_--output_json.golden b/cli/testdata/coder_provisioner_jobs_list_--output_json.golden index e36723765b4df..6ccf672360a55 100644 --- a/cli/testdata/coder_provisioner_jobs_list_--output_json.golden +++ b/cli/testdata/coder_provisioner_jobs_list_--output_json.golden @@ -26,6 +26,7 @@ "template_display_name": "", "template_icon": "" }, + "logs_overflowed": false, "organization_name": "Coder" }, { @@ -57,6 +58,7 @@ "workspace_id": "===========[workspace ID]===========", "workspace_name": "test-workspace" }, + "logs_overflowed": false, "organization_name": "Coder" } ] diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 7c723994d38d2..7087b5dcc977f 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -15261,6 +15261,9 @@ const docTemplate = `{ "input": { "$ref": "#/definitions/codersdk.ProvisionerJobInput" }, + "logs_overflowed": { + "type": "boolean" + }, "metadata": { "$ref": "#/definitions/codersdk.ProvisionerJobMetadata" }, diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index 28a38ffd32d70..3a1443cd76dec 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -13852,6 +13852,9 @@ "input": { "$ref": "#/definitions/codersdk.ProvisionerJobInput" }, + "logs_overflowed": { + "type": "boolean" + }, "metadata": { "$ref": "#/definitions/codersdk.ProvisionerJobMetadata" }, diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 257cbc6e6b142..72489ea92d572 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -4489,6 +4489,22 @@ func (q *querier) UpdateProvisionerJobByID(ctx context.Context, arg database.Upd return q.db.UpdateProvisionerJobByID(ctx, arg) } +func (q *querier) UpdateProvisionerJobLogsLength(ctx context.Context, arg database.UpdateProvisionerJobLogsLengthParams) error { + // Not sure what the rbac should be here, going with this for now + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceProvisionerJobs); err != nil { + return err + } + return q.db.UpdateProvisionerJobLogsLength(ctx, arg) +} + +func (q *querier) UpdateProvisionerJobLogsOverflowed(ctx context.Context, arg database.UpdateProvisionerJobLogsOverflowedParams) error { + // Not sure what the rbac should be here, going with this for now + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceProvisionerJobs); err != nil { + return err + } + return q.db.UpdateProvisionerJobLogsOverflowed(ctx, arg) +} + func (q *querier) UpdateProvisionerJobWithCancelByID(ctx context.Context, arg database.UpdateProvisionerJobWithCancelByIDParams) error { // TODO: Remove this once we have a proper rbac check for provisioner jobs. // Details in https://github.com/coder/coder/issues/16160 diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index dc86d598617fd..14ab09bada09a 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -4341,6 +4341,20 @@ func (s *MethodTestSuite) TestSystemFunctions() { UpdatedAt: time.Now(), }).Asserts(rbac.ResourceProvisionerJobs, policy.ActionUpdate) })) + s.Run("UpdateProvisionerJobLogsLength", s.Subtest(func(db database.Store, check *expects) { + j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{}) + check.Args(database.UpdateProvisionerJobLogsLengthParams{ + ID: j.ID, + LogsLength: 100, + }).Asserts(rbac.ResourceProvisionerJobs, policy.ActionUpdate) + })) + s.Run("UpdateProvisionerJobLogsOverflowed", s.Subtest(func(db database.Store, check *expects) { + j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{}) + check.Args(database.UpdateProvisionerJobLogsOverflowedParams{ + ID: j.ID, + LogsOverflowed: true, + }).Asserts(rbac.ResourceProvisionerJobs, policy.ActionUpdate) + })) s.Run("InsertProvisionerJob", s.Subtest(func(db database.Store, check *expects) { dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) check.Args(database.InsertProvisionerJobParams{ diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index 99d3c72ab4be3..e7c00255d47c2 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -179,6 +179,7 @@ func (b WorkspaceBuildBuilder) Do() WorkspaceResponse { Input: payload, Tags: map[string]string{}, TraceMetadata: pqtype.NullRawMessage{}, + LogsOverflowed: false, }) require.NoError(b.t, err, "insert job") diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go index 71a86c329a5ad..81d9efd1cd3e3 100644 --- a/coderd/database/dbgen/dbgen.go +++ b/coderd/database/dbgen/dbgen.go @@ -775,6 +775,7 @@ func ProvisionerJob(t testing.TB, db database.Store, ps pubsub.Pubsub, orig data Input: takeFirstSlice(orig.Input, []byte("{}")), Tags: tags, TraceMetadata: pqtype.NullRawMessage{}, + LogsOverflowed: false, }) require.NoError(t, err, "insert job") if ps != nil { diff --git a/coderd/database/dbgen/dbgen_test.go b/coderd/database/dbgen/dbgen_test.go index 7653176da8079..872704fa1dce0 100644 --- a/coderd/database/dbgen/dbgen_test.go +++ b/coderd/database/dbgen/dbgen_test.go @@ -168,6 +168,8 @@ func TestGenerator(t *testing.T) { DeletingAt: w.DeletingAt, AutomaticUpdates: w.AutomaticUpdates, Favorite: w.Favorite, + GroupACL: database.WorkspaceACL{}, + UserACL: database.WorkspaceACL{}, } require.Equal(t, exp, table) }) diff --git a/coderd/database/dbmetrics/querymetrics.go b/coderd/database/dbmetrics/querymetrics.go index 811d945ac7da9..3fffb29966735 100644 --- a/coderd/database/dbmetrics/querymetrics.go +++ b/coderd/database/dbmetrics/querymetrics.go @@ -2784,6 +2784,20 @@ func (m queryMetricsStore) UpdateProvisionerJobByID(ctx context.Context, arg dat return err } +func (m queryMetricsStore) UpdateProvisionerJobLogsLength(ctx context.Context, arg database.UpdateProvisionerJobLogsLengthParams) error { + start := time.Now() + r0 := m.s.UpdateProvisionerJobLogsLength(ctx, arg) + m.queryLatencies.WithLabelValues("UpdateProvisionerJobLogsLength").Observe(time.Since(start).Seconds()) + return r0 +} + +func (m queryMetricsStore) UpdateProvisionerJobLogsOverflowed(ctx context.Context, arg database.UpdateProvisionerJobLogsOverflowedParams) error { + start := time.Now() + r0 := m.s.UpdateProvisionerJobLogsOverflowed(ctx, arg) + m.queryLatencies.WithLabelValues("UpdateProvisionerJobLogsOverflowed").Observe(time.Since(start).Seconds()) + return r0 +} + func (m queryMetricsStore) UpdateProvisionerJobWithCancelByID(ctx context.Context, arg database.UpdateProvisionerJobWithCancelByIDParams) error { start := time.Now() err := m.s.UpdateProvisionerJobWithCancelByID(ctx, arg) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index b20c3d06209b5..20bc17117e0eb 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -5958,6 +5958,34 @@ func (mr *MockStoreMockRecorder) UpdateProvisionerJobByID(ctx, arg any) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProvisionerJobByID", reflect.TypeOf((*MockStore)(nil).UpdateProvisionerJobByID), ctx, arg) } +// UpdateProvisionerJobLogsLength mocks base method. +func (m *MockStore) UpdateProvisionerJobLogsLength(ctx context.Context, arg database.UpdateProvisionerJobLogsLengthParams) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateProvisionerJobLogsLength", ctx, arg) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateProvisionerJobLogsLength indicates an expected call of UpdateProvisionerJobLogsLength. +func (mr *MockStoreMockRecorder) UpdateProvisionerJobLogsLength(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProvisionerJobLogsLength", reflect.TypeOf((*MockStore)(nil).UpdateProvisionerJobLogsLength), ctx, arg) +} + +// UpdateProvisionerJobLogsOverflowed mocks base method. +func (m *MockStore) UpdateProvisionerJobLogsOverflowed(ctx context.Context, arg database.UpdateProvisionerJobLogsOverflowedParams) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateProvisionerJobLogsOverflowed", ctx, arg) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateProvisionerJobLogsOverflowed indicates an expected call of UpdateProvisionerJobLogsOverflowed. +func (mr *MockStoreMockRecorder) UpdateProvisionerJobLogsOverflowed(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProvisionerJobLogsOverflowed", reflect.TypeOf((*MockStore)(nil).UpdateProvisionerJobLogsOverflowed), ctx, arg) +} + // UpdateProvisionerJobWithCancelByID mocks base method. func (m *MockStore) UpdateProvisionerJobWithCancelByID(ctx context.Context, arg database.UpdateProvisionerJobWithCancelByIDParams) error { m.ctrl.T.Helper() diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index 49c12b123e998..053b5302d3e38 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -1419,11 +1419,18 @@ CASE WHEN (started_at IS NULL) THEN 'pending'::provisioner_job_status ELSE 'running'::provisioner_job_status END -END) STORED NOT NULL +END) STORED NOT NULL, + logs_length integer DEFAULT 0 NOT NULL, + logs_overflowed boolean DEFAULT false NOT NULL, + CONSTRAINT max_provisioner_logs_length CHECK ((logs_length <= 1048576)) ); COMMENT ON COLUMN provisioner_jobs.job_status IS 'Computed column to track the status of the job.'; +COMMENT ON COLUMN provisioner_jobs.logs_length IS 'Total length of provisioner logs'; + +COMMENT ON COLUMN provisioner_jobs.logs_overflowed IS 'Whether the provisioner logs overflowed in length'; + CREATE TABLE provisioner_keys ( id uuid NOT NULL, created_at timestamp with time zone NOT NULL, @@ -2262,7 +2269,9 @@ CREATE TABLE workspaces ( deleting_at timestamp with time zone, automatic_updates automatic_updates DEFAULT 'never'::automatic_updates NOT NULL, favorite boolean DEFAULT false NOT NULL, - next_start_at timestamp with time zone + next_start_at timestamp with time zone, + group_acl jsonb DEFAULT '{}'::jsonb NOT NULL, + user_acl jsonb DEFAULT '{}'::jsonb NOT NULL ); COMMENT ON COLUMN workspaces.favorite IS 'Favorite is true if the workspace owner has favorited the workspace.'; @@ -2441,6 +2450,8 @@ CREATE VIEW workspaces_expanded AS workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, + workspaces.group_acl, + workspaces.user_acl, visible_users.avatar_url AS owner_avatar_url, visible_users.username AS owner_username, visible_users.name AS owner_name, diff --git a/coderd/database/errors.go b/coderd/database/errors.go index 66c702de24445..0388ea2cbff49 100644 --- a/coderd/database/errors.go +++ b/coderd/database/errors.go @@ -79,3 +79,11 @@ func IsWorkspaceAgentLogsLimitError(err error) bool { return false } + +func IsProvisionerJobLogsLimitError(err error) bool { + var pqErr *pq.Error + if errors.As(err, &pqErr) { + return pqErr.Constraint == "max_provisioner_logs_length" && pqErr.Table == "provisioner_jobs" + } + return false +} diff --git a/coderd/database/migrations/000354_workspace_acl.down.sql b/coderd/database/migrations/000354_workspace_acl.down.sql new file mode 100644 index 0000000000000..97f0acc6b03c8 --- /dev/null +++ b/coderd/database/migrations/000354_workspace_acl.down.sql @@ -0,0 +1,40 @@ +DROP VIEW workspaces_expanded; + +ALTER TABLE workspaces + DROP COLUMN group_acl, + DROP COLUMN user_acl; + +CREATE VIEW workspaces_expanded AS + SELECT workspaces.id, + workspaces.created_at, + workspaces.updated_at, + workspaces.owner_id, + workspaces.organization_id, + workspaces.template_id, + workspaces.deleted, + workspaces.name, + workspaces.autostart_schedule, + workspaces.ttl, + workspaces.last_used_at, + workspaces.dormant_at, + workspaces.deleting_at, + workspaces.automatic_updates, + workspaces.favorite, + workspaces.next_start_at, + visible_users.avatar_url AS owner_avatar_url, + visible_users.username AS owner_username, + visible_users.name AS owner_name, + organizations.name AS organization_name, + organizations.display_name AS organization_display_name, + organizations.icon AS organization_icon, + organizations.description AS organization_description, + templates.name AS template_name, + templates.display_name AS template_display_name, + templates.icon AS template_icon, + templates.description AS template_description + FROM (((workspaces + JOIN visible_users ON ((workspaces.owner_id = visible_users.id))) + JOIN organizations ON ((workspaces.organization_id = organizations.id))) + JOIN templates ON ((workspaces.template_id = templates.id))); + +COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.'; diff --git a/coderd/database/migrations/000354_workspace_acl.up.sql b/coderd/database/migrations/000354_workspace_acl.up.sql new file mode 100644 index 0000000000000..6d6a375679aa5 --- /dev/null +++ b/coderd/database/migrations/000354_workspace_acl.up.sql @@ -0,0 +1,43 @@ +DROP VIEW workspaces_expanded; + +ALTER TABLE workspaces + ADD COLUMN group_acl jsonb not null default '{}'::jsonb, + ADD COLUMN user_acl jsonb not null default '{}'::jsonb; + +-- Recreate the view, now including the new columns +CREATE VIEW workspaces_expanded AS + SELECT workspaces.id, + workspaces.created_at, + workspaces.updated_at, + workspaces.owner_id, + workspaces.organization_id, + workspaces.template_id, + workspaces.deleted, + workspaces.name, + workspaces.autostart_schedule, + workspaces.ttl, + workspaces.last_used_at, + workspaces.dormant_at, + workspaces.deleting_at, + workspaces.automatic_updates, + workspaces.favorite, + workspaces.next_start_at, + workspaces.group_acl, + workspaces.user_acl, + visible_users.avatar_url AS owner_avatar_url, + visible_users.username AS owner_username, + visible_users.name AS owner_name, + organizations.name AS organization_name, + organizations.display_name AS organization_display_name, + organizations.icon AS organization_icon, + organizations.description AS organization_description, + templates.name AS template_name, + templates.display_name AS template_display_name, + templates.icon AS template_icon, + templates.description AS template_description + FROM (((workspaces + JOIN visible_users ON ((workspaces.owner_id = visible_users.id))) + JOIN organizations ON ((workspaces.organization_id = organizations.id))) + JOIN templates ON ((workspaces.template_id = templates.id))); + +COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.'; diff --git a/coderd/database/migrations/000355_add_provisioner_logs_overflowed.down.sql b/coderd/database/migrations/000355_add_provisioner_logs_overflowed.down.sql new file mode 100644 index 0000000000000..39f34a2b491ee --- /dev/null +++ b/coderd/database/migrations/000355_add_provisioner_logs_overflowed.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE provisioner_jobs DROP COLUMN logs_length; +ALTER TABLE provisioner_jobs DROP COLUMN logs_overflowed; \ No newline at end of file diff --git a/coderd/database/migrations/000355_add_provisioner_logs_overflowed.up.sql b/coderd/database/migrations/000355_add_provisioner_logs_overflowed.up.sql new file mode 100644 index 0000000000000..80f58cf5c6693 --- /dev/null +++ b/coderd/database/migrations/000355_add_provisioner_logs_overflowed.up.sql @@ -0,0 +1,6 @@ + -- Add logs length tracking and overflow flag, similar to workspace agents + ALTER TABLE provisioner_jobs ADD COLUMN logs_length integer NOT NULL DEFAULT 0 CONSTRAINT max_provisioner_logs_length CHECK (logs_length <= 1048576); + ALTER TABLE provisioner_jobs ADD COLUMN logs_overflowed boolean NOT NULL DEFAULT false; + + COMMENT ON COLUMN provisioner_jobs.logs_length IS 'Total length of provisioner logs'; + COMMENT ON COLUMN provisioner_jobs.logs_overflowed IS 'Whether the provisioner logs overflowed in length'; diff --git a/coderd/database/modelmethods.go b/coderd/database/modelmethods.go index b49fa113d4b12..5347e8de37ebe 100644 --- a/coderd/database/modelmethods.go +++ b/coderd/database/modelmethods.go @@ -242,6 +242,8 @@ func (w Workspace) WorkspaceTable() WorkspaceTable { AutomaticUpdates: w.AutomaticUpdates, Favorite: w.Favorite, NextStartAt: w.NextStartAt, + GroupACL: w.GroupACL, + UserACL: w.UserACL, } } diff --git a/coderd/database/modelqueries.go b/coderd/database/modelqueries.go index fc7cda1a506e2..2a0abbccfdd9b 100644 --- a/coderd/database/modelqueries.go +++ b/coderd/database/modelqueries.go @@ -298,6 +298,8 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, diff --git a/coderd/database/models.go b/coderd/database/models.go index aad50e397950d..8b13c8a8af057 100644 --- a/coderd/database/models.go +++ b/coderd/database/models.go @@ -3384,6 +3384,10 @@ type ProvisionerJob struct { TraceMetadata pqtype.NullRawMessage `db:"trace_metadata" json:"trace_metadata"` // Computed column to track the status of the job. JobStatus ProvisionerJobStatus `db:"job_status" json:"job_status"` + // Total length of provisioner logs + LogsLength int32 `db:"logs_length" json:"logs_length"` + // Whether the provisioner logs overflowed in length + LogsOverflowed bool `db:"logs_overflowed" json:"logs_overflowed"` } type ProvisionerJobLog struct { @@ -3851,6 +3855,8 @@ type Workspace struct { AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"` Favorite bool `db:"favorite" json:"favorite"` NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"` + GroupACL WorkspaceACL `db:"group_acl" json:"group_acl"` + UserACL WorkspaceACL `db:"user_acl" json:"user_acl"` OwnerAvatarUrl string `db:"owner_avatar_url" json:"owner_avatar_url"` OwnerUsername string `db:"owner_username" json:"owner_username"` OwnerName string `db:"owner_name" json:"owner_name"` @@ -4272,4 +4278,6 @@ type WorkspaceTable struct { // Favorite is true if the workspace owner has favorited the workspace. Favorite bool `db:"favorite" json:"favorite"` NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"` + GroupACL WorkspaceACL `db:"group_acl" json:"group_acl"` + UserACL WorkspaceACL `db:"user_acl" json:"user_acl"` } diff --git a/coderd/database/querier.go b/coderd/database/querier.go index baa5d8590b1d7..a2c6cda1afc4b 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -593,6 +593,8 @@ type sqlcQuerier interface { UpdatePresetPrebuildStatus(ctx context.Context, arg UpdatePresetPrebuildStatusParams) error UpdateProvisionerDaemonLastSeenAt(ctx context.Context, arg UpdateProvisionerDaemonLastSeenAtParams) error UpdateProvisionerJobByID(ctx context.Context, arg UpdateProvisionerJobByIDParams) error + UpdateProvisionerJobLogsLength(ctx context.Context, arg UpdateProvisionerJobLogsLengthParams) error + UpdateProvisionerJobLogsOverflowed(ctx context.Context, arg UpdateProvisionerJobLogsOverflowedParams) error UpdateProvisionerJobWithCancelByID(ctx context.Context, arg UpdateProvisionerJobWithCancelByIDParams) error UpdateProvisionerJobWithCompleteByID(ctx context.Context, arg UpdateProvisionerJobWithCompleteByIDParams) error UpdateProvisionerJobWithCompleteWithStartedAtByID(ctx context.Context, arg UpdateProvisionerJobWithCompleteWithStartedAtByIDParams) error diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 0fff220bb2ba2..6033ab728007d 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -8514,6 +8514,44 @@ func (q *sqlQuerier) InsertProvisionerJobLogs(ctx context.Context, arg InsertPro return items, nil } +const updateProvisionerJobLogsLength = `-- name: UpdateProvisionerJobLogsLength :exec +UPDATE + provisioner_jobs +SET + logs_length = logs_length + $2 +WHERE + id = $1 +` + +type UpdateProvisionerJobLogsLengthParams struct { + ID uuid.UUID `db:"id" json:"id"` + LogsLength int32 `db:"logs_length" json:"logs_length"` +} + +func (q *sqlQuerier) UpdateProvisionerJobLogsLength(ctx context.Context, arg UpdateProvisionerJobLogsLengthParams) error { + _, err := q.db.ExecContext(ctx, updateProvisionerJobLogsLength, arg.ID, arg.LogsLength) + return err +} + +const updateProvisionerJobLogsOverflowed = `-- name: UpdateProvisionerJobLogsOverflowed :exec +UPDATE + provisioner_jobs +SET + logs_overflowed = $2 +WHERE + id = $1 +` + +type UpdateProvisionerJobLogsOverflowedParams struct { + ID uuid.UUID `db:"id" json:"id"` + LogsOverflowed bool `db:"logs_overflowed" json:"logs_overflowed"` +} + +func (q *sqlQuerier) UpdateProvisionerJobLogsOverflowed(ctx context.Context, arg UpdateProvisionerJobLogsOverflowedParams) error { + _, err := q.db.ExecContext(ctx, updateProvisionerJobLogsOverflowed, arg.ID, arg.LogsOverflowed) + return err +} + const acquireProvisionerJob = `-- name: AcquireProvisionerJob :one UPDATE provisioner_jobs @@ -8543,7 +8581,7 @@ WHERE SKIP LOCKED LIMIT 1 - ) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + ) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed ` type AcquireProvisionerJobParams struct { @@ -8589,13 +8627,15 @@ func (q *sqlQuerier) AcquireProvisionerJob(ctx context.Context, arg AcquireProvi &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ) return i, err } const getProvisionerJobByID = `-- name: GetProvisionerJobByID :one SELECT - id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed FROM provisioner_jobs WHERE @@ -8625,13 +8665,15 @@ func (q *sqlQuerier) GetProvisionerJobByID(ctx context.Context, id uuid.UUID) (P &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ) return i, err } const getProvisionerJobByIDForUpdate = `-- name: GetProvisionerJobByIDForUpdate :one SELECT - id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed FROM provisioner_jobs WHERE @@ -8665,6 +8707,8 @@ func (q *sqlQuerier) GetProvisionerJobByIDForUpdate(ctx context.Context, id uuid &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ) return i, err } @@ -8708,7 +8752,7 @@ func (q *sqlQuerier) GetProvisionerJobTimingsByJobID(ctx context.Context, jobID const getProvisionerJobsByIDs = `-- name: GetProvisionerJobsByIDs :many SELECT - id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed FROM provisioner_jobs WHERE @@ -8744,6 +8788,8 @@ func (q *sqlQuerier) GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUI &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ); err != nil { return nil, err } @@ -8811,7 +8857,7 @@ SELECT -- Step 5: Final SELECT with INNER JOIN provisioner_jobs fj.id, fj.created_at, - pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status, + pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status, pj.logs_length, pj.logs_overflowed, fj.queue_position, fj.queue_size FROM @@ -8867,6 +8913,8 @@ func (q *sqlQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Contex &i.ProvisionerJob.ErrorCode, &i.ProvisionerJob.TraceMetadata, &i.ProvisionerJob.JobStatus, + &i.ProvisionerJob.LogsLength, + &i.ProvisionerJob.LogsOverflowed, &i.QueuePosition, &i.QueueSize, ); err != nil { @@ -8909,7 +8957,7 @@ queue_size AS ( SELECT COUNT(*) AS count FROM pending_jobs ) SELECT - pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status, + pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status, pj.logs_length, pj.logs_overflowed, COALESCE(qp.queue_position, 0) AS queue_position, COALESCE(qs.count, 0) AS queue_size, -- Use subquery to utilize ORDER BY in array_agg since it cannot be @@ -9045,6 +9093,8 @@ func (q *sqlQuerier) GetProvisionerJobsByOrganizationAndStatusWithQueuePositionA &i.ProvisionerJob.ErrorCode, &i.ProvisionerJob.TraceMetadata, &i.ProvisionerJob.JobStatus, + &i.ProvisionerJob.LogsLength, + &i.ProvisionerJob.LogsOverflowed, &i.QueuePosition, &i.QueueSize, pq.Array(&i.AvailableWorkers), @@ -9071,7 +9121,7 @@ func (q *sqlQuerier) GetProvisionerJobsByOrganizationAndStatusWithQueuePositionA } const getProvisionerJobsCreatedAfter = `-- name: GetProvisionerJobsCreatedAfter :many -SELECT id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status FROM provisioner_jobs WHERE created_at > $1 +SELECT id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed FROM provisioner_jobs WHERE created_at > $1 ` func (q *sqlQuerier) GetProvisionerJobsCreatedAfter(ctx context.Context, createdAt time.Time) ([]ProvisionerJob, error) { @@ -9103,6 +9153,8 @@ func (q *sqlQuerier) GetProvisionerJobsCreatedAfter(ctx context.Context, created &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ); err != nil { return nil, err } @@ -9119,7 +9171,7 @@ func (q *sqlQuerier) GetProvisionerJobsCreatedAfter(ctx context.Context, created const getProvisionerJobsToBeReaped = `-- name: GetProvisionerJobsToBeReaped :many SELECT - id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed FROM provisioner_jobs WHERE @@ -9176,6 +9228,8 @@ func (q *sqlQuerier) GetProvisionerJobsToBeReaped(ctx context.Context, arg GetPr &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ); err != nil { return nil, err } @@ -9204,10 +9258,11 @@ INSERT INTO "type", "input", tags, - trace_metadata + trace_metadata, + logs_overflowed ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status, logs_length, logs_overflowed ` type InsertProvisionerJobParams struct { @@ -9223,6 +9278,7 @@ type InsertProvisionerJobParams struct { Input json.RawMessage `db:"input" json:"input"` Tags StringMap `db:"tags" json:"tags"` TraceMetadata pqtype.NullRawMessage `db:"trace_metadata" json:"trace_metadata"` + LogsOverflowed bool `db:"logs_overflowed" json:"logs_overflowed"` } func (q *sqlQuerier) InsertProvisionerJob(ctx context.Context, arg InsertProvisionerJobParams) (ProvisionerJob, error) { @@ -9239,6 +9295,7 @@ func (q *sqlQuerier) InsertProvisionerJob(ctx context.Context, arg InsertProvisi arg.Input, arg.Tags, arg.TraceMetadata, + arg.LogsOverflowed, ) var i ProvisionerJob err := row.Scan( @@ -9261,6 +9318,8 @@ func (q *sqlQuerier) InsertProvisionerJob(ctx context.Context, arg InsertProvisi &i.ErrorCode, &i.TraceMetadata, &i.JobStatus, + &i.LogsLength, + &i.LogsOverflowed, ) return i, err } @@ -15382,7 +15441,7 @@ func (q *sqlQuerier) DeleteWorkspaceSubAgentByID(ctx context.Context, id uuid.UU const getWorkspaceAgentAndLatestBuildByAuthToken = `-- name: GetWorkspaceAgentAndLatestBuildByAuthToken :one SELECT - workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, + workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted, workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.ai_task_sidebar_app_id, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name FROM @@ -15444,6 +15503,8 @@ func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Cont &i.WorkspaceTable.AutomaticUpdates, &i.WorkspaceTable.Favorite, &i.WorkspaceTable.NextStartAt, + &i.WorkspaceTable.GroupACL, + &i.WorkspaceTable.UserACL, &i.WorkspaceAgent.ID, &i.WorkspaceAgent.CreatedAt, &i.WorkspaceAgent.UpdatedAt, @@ -19534,7 +19595,7 @@ func (q *sqlQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploy const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one SELECT - id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description + id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description FROM workspaces_expanded as workspaces WHERE @@ -19582,6 +19643,8 @@ func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUI &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -19599,7 +19662,7 @@ func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUI const getWorkspaceByID = `-- name: GetWorkspaceByID :one SELECT - id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description + id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description FROM workspaces_expanded WHERE @@ -19628,6 +19691,8 @@ func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Worksp &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -19645,7 +19710,7 @@ func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Worksp const getWorkspaceByOwnerIDAndName = `-- name: GetWorkspaceByOwnerIDAndName :one SELECT - id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description + id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description FROM workspaces_expanded as workspaces WHERE @@ -19681,6 +19746,8 @@ func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWo &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -19698,7 +19765,7 @@ func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWo const getWorkspaceByResourceID = `-- name: GetWorkspaceByResourceID :one SELECT - id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description + id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description FROM workspaces_expanded as workspaces WHERE @@ -19741,6 +19808,8 @@ func (q *sqlQuerier) GetWorkspaceByResourceID(ctx context.Context, resourceID uu &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -19758,7 +19827,7 @@ func (q *sqlQuerier) GetWorkspaceByResourceID(ctx context.Context, resourceID uu const getWorkspaceByWorkspaceAppID = `-- name: GetWorkspaceByWorkspaceAppID :one SELECT - id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description + id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description FROM workspaces_expanded as workspaces WHERE @@ -19813,6 +19882,8 @@ func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspace &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -19873,7 +19944,7 @@ SELECT ), filtered_workspaces AS ( SELECT - workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description, + workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description, latest_build.template_version_id, latest_build.template_version_name, latest_build.completed_at as latest_build_completed_at, @@ -20138,7 +20209,7 @@ WHERE -- @authorize_filter ), filtered_workspaces_order AS ( SELECT - fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task + fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task FROM filtered_workspaces fw ORDER BY @@ -20159,7 +20230,7 @@ WHERE $21 ), filtered_workspaces_order_with_summary AS ( SELECT - fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task + fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task FROM filtered_workspaces_order fwo -- Return a technical summary row with total count of workspaces. @@ -20182,6 +20253,8 @@ WHERE 'never'::automatic_updates, -- automatic_updates false, -- favorite '0001-01-01 00:00:00+00'::timestamptz, -- next_start_at + '{}'::jsonb, -- group_acl + '{}'::jsonb, -- user_acl '', -- owner_avatar_url '', -- owner_username '', -- owner_name @@ -20211,7 +20284,7 @@ WHERE filtered_workspaces ) SELECT - fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, + fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, tc.count FROM filtered_workspaces_order_with_summary fwos @@ -20262,6 +20335,8 @@ type GetWorkspacesRow struct { AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"` Favorite bool `db:"favorite" json:"favorite"` NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"` + GroupACL json.RawMessage `db:"group_acl" json:"group_acl"` + UserACL json.RawMessage `db:"user_acl" json:"user_acl"` OwnerAvatarUrl string `db:"owner_avatar_url" json:"owner_avatar_url"` OwnerUsername string `db:"owner_username" json:"owner_username"` OwnerName string `db:"owner_name" json:"owner_name"` @@ -20337,6 +20412,8 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams) &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, &i.OwnerAvatarUrl, &i.OwnerUsername, &i.OwnerName, @@ -20451,7 +20528,7 @@ func (q *sqlQuerier) GetWorkspacesAndAgentsByOwnerID(ctx context.Context, ownerI } const getWorkspacesByTemplateID = `-- name: GetWorkspacesByTemplateID :many -SELECT id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at FROM workspaces WHERE template_id = $1 AND deleted = false +SELECT id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl FROM workspaces WHERE template_id = $1 AND deleted = false ` func (q *sqlQuerier) GetWorkspacesByTemplateID(ctx context.Context, templateID uuid.UUID) ([]WorkspaceTable, error) { @@ -20480,6 +20557,8 @@ func (q *sqlQuerier) GetWorkspacesByTemplateID(ctx context.Context, templateID u &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, ); err != nil { return nil, err } @@ -20667,7 +20746,7 @@ INSERT INTO next_start_at ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl ` type InsertWorkspaceParams struct { @@ -20718,6 +20797,8 @@ func (q *sqlQuerier) InsertWorkspace(ctx context.Context, arg InsertWorkspacePar &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, ) return i, err } @@ -20757,7 +20838,7 @@ SET WHERE id = $1 AND deleted = false -RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at +RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl ` type UpdateWorkspaceParams struct { @@ -20785,6 +20866,8 @@ func (q *sqlQuerier) UpdateWorkspace(ctx context.Context, arg UpdateWorkspacePar &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, ) return i, err } @@ -20873,7 +20956,7 @@ WHERE workspaces.id = $1 AND templates.id = workspaces.template_id RETURNING - workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at + workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl ` type UpdateWorkspaceDormantDeletingAtParams struct { @@ -20901,6 +20984,8 @@ func (q *sqlQuerier) UpdateWorkspaceDormantDeletingAt(ctx context.Context, arg U &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, ) return i, err } @@ -20975,7 +21060,7 @@ WHERE template_id = $3 AND dormant_at IS NOT NULL -RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at +RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl ` type UpdateWorkspacesDormantDeletingAtByTemplateIDParams struct { @@ -21010,6 +21095,8 @@ func (q *sqlQuerier) UpdateWorkspacesDormantDeletingAtByTemplateID(ctx context.C &i.AutomaticUpdates, &i.Favorite, &i.NextStartAt, + &i.GroupACL, + &i.UserACL, ); err != nil { return nil, err } diff --git a/coderd/database/queries/provisionerjoblogs.sql b/coderd/database/queries/provisionerjoblogs.sql index b98cf471f0d1a..c0ef188bdd382 100644 --- a/coderd/database/queries/provisionerjoblogs.sql +++ b/coderd/database/queries/provisionerjoblogs.sql @@ -19,3 +19,19 @@ SELECT unnest(@level :: log_level [ ]) AS LEVEL, unnest(@stage :: VARCHAR(128) [ ]) AS stage, unnest(@output :: VARCHAR(1024) [ ]) AS output RETURNING *; + +-- name: UpdateProvisionerJobLogsOverflowed :exec +UPDATE + provisioner_jobs +SET + logs_overflowed = $2 +WHERE + id = $1; + +-- name: UpdateProvisionerJobLogsLength :exec +UPDATE + provisioner_jobs +SET + logs_length = logs_length + $2 +WHERE + id = $1; diff --git a/coderd/database/queries/provisionerjobs.sql b/coderd/database/queries/provisionerjobs.sql index fcf348e089def..3ba581646689e 100644 --- a/coderd/database/queries/provisionerjobs.sql +++ b/coderd/database/queries/provisionerjobs.sql @@ -247,10 +247,11 @@ INSERT INTO "type", "input", tags, - trace_metadata + trace_metadata, + logs_overflowed ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING *; + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) RETURNING *; -- name: UpdateProvisionerJobByID :exec UPDATE diff --git a/coderd/database/queries/workspaces.sql b/coderd/database/queries/workspaces.sql index f166d16f742cd..783cbc56e488c 100644 --- a/coderd/database/queries/workspaces.sql +++ b/coderd/database/queries/workspaces.sql @@ -418,6 +418,8 @@ WHERE 'never'::automatic_updates, -- automatic_updates false, -- favorite '0001-01-01 00:00:00+00'::timestamptz, -- next_start_at + '{}'::jsonb, -- group_acl + '{}'::jsonb, -- user_acl '', -- owner_avatar_url '', -- owner_username '', -- owner_name diff --git a/coderd/database/sqlc.yaml b/coderd/database/sqlc.yaml index c8e83e9f859b9..689eb1aaeb53b 100644 --- a/coderd/database/sqlc.yaml +++ b/coderd/database/sqlc.yaml @@ -73,6 +73,18 @@ sql: - column: "template_usage_stats.app_usage_mins" go_type: type: "StringMapOfInt" + - column: "workspaces.user_acl" + go_type: + type: "WorkspaceACL" + - column: "workspaces.group_acl" + go_type: + type: "WorkspaceACL" + - column: "workspaces_expanded.user_acl" + go_type: + type: "WorkspaceACL" + - column: "workspaces_expanded.group_acl" + go_type: + type: "WorkspaceACL" - column: "notification_templates.actions" go_type: type: "[]byte" diff --git a/coderd/database/types.go b/coderd/database/types.go index 6d0f036fe692c..11a0613965b8d 100644 --- a/coderd/database/types.go +++ b/coderd/database/types.go @@ -77,6 +77,28 @@ func (t TemplateACL) Value() (driver.Value, error) { return json.Marshal(t) } +type WorkspaceACL map[string]WorkspaceACLEntry + +func (t *WorkspaceACL) Scan(src interface{}) error { + switch v := src.(type) { + case string: + return json.Unmarshal([]byte(v), &t) + case []byte, json.RawMessage: + //nolint + return json.Unmarshal(v.([]byte), &t) + } + + return xerrors.Errorf("unexpected type %T", src) +} + +func (t WorkspaceACL) Value() (driver.Value, error) { + return json.Marshal(t) +} + +type WorkspaceACLEntry struct { + Permissions []policy.Action `json:"permissions"` +} + type ExternalAuthProvider struct { ID string `json:"id"` Optional bool `json:"optional,omitempty"` diff --git a/coderd/files.go b/coderd/files.go index f82d1aa926c22..eaab00c401481 100644 --- a/coderd/files.go +++ b/coderd/files.go @@ -118,11 +118,23 @@ func (api *API) postFile(rw http.ResponseWriter, r *http.Request) { Data: data, }) if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error saving file.", - Detail: err.Error(), - }) - return + if database.IsUniqueViolation(err, database.UniqueFilesHashCreatedByKey) { + // The file was uploaded by some concurrent process since the last time we checked for it, fetch it again. + file, err = api.Database.GetFileByHashAndCreator(ctx, database.GetFileByHashAndCreatorParams{ + Hash: hash, + CreatedBy: apiKey.UserID, + }) + api.Logger.Info(ctx, "postFile handler hit UniqueViolation trying to upload file after already checking for the file existence", slog.F("hash", hash), slog.F("created_by_id", apiKey.UserID)) + } + // At this point the first error was either not the UniqueViolation OR there's still an error even after we + // attempt to fetch the file again, so we should return here. + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error saving file.", + Detail: err.Error(), + }) + return + } } httpapi.Write(ctx, rw, http.StatusCreated, codersdk.UploadResponse{ diff --git a/coderd/files_test.go b/coderd/files_test.go index 974db6b18fc69..fb13cb30e48f1 100644 --- a/coderd/files_test.go +++ b/coderd/files_test.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "net/http" + "sync" "testing" "github.com/google/uuid" @@ -69,6 +70,30 @@ func TestPostFiles(t *testing.T) { _, err = client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader(data)) require.NoError(t, err) }) + t.Run("InsertConcurrent", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + var wg sync.WaitGroup + var end sync.WaitGroup + wg.Add(1) + end.Add(3) + for range 3 { + go func() { + wg.Wait() + data := make([]byte, 1024) + _, err := client.Upload(ctx, codersdk.ContentTypeTar, bytes.NewReader(data)) + end.Done() + require.NoError(t, err) + }() + } + wg.Done() + end.Wait() + }) } func TestDownload(t *testing.T) { diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go index 518b48d2fe04b..94173703c467d 100644 --- a/coderd/provisionerdserver/provisionerdserver.go +++ b/coderd/provisionerdserver/provisionerdserver.go @@ -902,29 +902,93 @@ func (s *server) UpdateJob(ctx context.Context, request *proto.UpdateJobRequest) return nil, xerrors.Errorf("update job: %w", err) } - if len(request.Logs) > 0 { + if len(request.Logs) > 0 && !job.LogsOverflowed { //nolint:exhaustruct // We append to the additional fields below. insertParams := database.InsertProvisionerJobLogsParams{ JobID: parsedID, } + + newLogSize := 0 + overflowedErrorMsg := "Provisioner logs exceeded the max size of 1MB. Will not continue to write provisioner logs for workspace build." + lenErrMsg := len(overflowedErrorMsg) + + var ( + createdAt time.Time + level database.LogLevel + stage string + source database.LogSource + output string + ) + for _, log := range request.Logs { - logLevel, err := convertLogLevel(log.Level) + // Build our log params + level, err = convertLogLevel(log.Level) if err != nil { return nil, xerrors.Errorf("convert log level: %w", err) } - logSource, err := convertLogSource(log.Source) + source, err = convertLogSource(log.Source) if err != nil { return nil, xerrors.Errorf("convert log source: %w", err) } - insertParams.CreatedAt = append(insertParams.CreatedAt, time.UnixMilli(log.CreatedAt)) - insertParams.Level = append(insertParams.Level, logLevel) - insertParams.Stage = append(insertParams.Stage, log.Stage) - insertParams.Source = append(insertParams.Source, logSource) - insertParams.Output = append(insertParams.Output, log.Output) + createdAt = time.UnixMilli(log.CreatedAt) + stage = log.Stage + output = log.Output + + // Check if we would overflow the job logs (not leaving enough room for the error message) + willOverflow := int64(job.LogsLength)+int64(newLogSize)+int64(lenErrMsg)+int64(len(output)) > 1048576 + if willOverflow { + s.Logger.Debug(ctx, "provisioner job logs overflowed 1MB size limit in database", slog.F("job_id", parsedID)) + err = s.Database.UpdateProvisionerJobLogsOverflowed(ctx, database.UpdateProvisionerJobLogsOverflowedParams{ + ID: parsedID, + LogsOverflowed: true, + }) + if err != nil { + s.Logger.Error(ctx, "failed to set logs overflowed flag", slog.F("job_id", parsedID), slog.Error(err)) + } + + level = database.LogLevelWarn + output = overflowedErrorMsg + } + + newLogSize += len(output) + + insertParams.CreatedAt = append(insertParams.CreatedAt, createdAt) + insertParams.Level = append(insertParams.Level, level) + insertParams.Stage = append(insertParams.Stage, stage) + insertParams.Source = append(insertParams.Source, source) + insertParams.Output = append(insertParams.Output, output) s.Logger.Debug(ctx, "job log", slog.F("job_id", parsedID), - slog.F("stage", log.Stage), - slog.F("output", log.Output)) + slog.F("stage", stage), + slog.F("output", output)) + + // Don't write any more logs because there's no room. + if willOverflow { + break + } + } + + err = s.Database.UpdateProvisionerJobLogsLength(ctx, database.UpdateProvisionerJobLogsLengthParams{ + ID: parsedID, + LogsLength: int32(newLogSize), // #nosec G115 - Log output length is limited to 1MB (2^20) which fits in an int32. + }) + if err != nil { + // Even though we do the runtime check for the overflow, we still check for the database error + // as well. + if database.IsProvisionerJobLogsLimitError(err) { + err = s.Database.UpdateProvisionerJobLogsOverflowed(ctx, database.UpdateProvisionerJobLogsOverflowedParams{ + ID: parsedID, + LogsOverflowed: true, + }) + if err != nil { + s.Logger.Error(ctx, "failed to set logs overflowed flag", slog.F("job_id", parsedID), slog.Error(err)) + } + return &proto.UpdateJobResponse{ + Canceled: job.CanceledAt.Valid, + }, nil + } + s.Logger.Error(ctx, "failed to update logs length", slog.F("job_id", parsedID), slog.Error(err)) + return nil, xerrors.Errorf("update logs length: %w", err) } logs, err := s.Database.InsertProvisionerJobLogs(ctx, insertParams) @@ -932,6 +996,7 @@ func (s *server) UpdateJob(ctx context.Context, request *proto.UpdateJobRequest) s.Logger.Error(ctx, "failed to insert job logs", slog.F("job_id", parsedID), slog.Error(err)) return nil, xerrors.Errorf("insert job logs: %w", err) } + // Publish by the lowest log ID inserted so the log stream will fetch // everything from that point. lowestID := logs[0].ID diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go index 66684835650a8..b6f9d82a597e7 100644 --- a/coderd/provisionerdserver/provisionerdserver_test.go +++ b/coderd/provisionerdserver/provisionerdserver_test.go @@ -928,6 +928,141 @@ func TestUpdateJob(t *testing.T) { require.Equal(t, workspaceTags[1].Key, "cat") require.Equal(t, workspaceTags[1].Value, "jinx") }) + + t.Run("LogSizeLimit", func(t *testing.T) { + t.Parallel() + srv, db, _, pd := setup(t, false, &overrides{}) + job := setupJob(t, db, pd.ID, pd.Tags) + + // Create a log message that exceeds the 1MB limit + largeOutput := strings.Repeat("a", 1048577) // 1MB + 1 byte + + _, err := srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: largeOutput, + }}, + }) + require.NoError(t, err) // Should succeed but trigger overflow + + // Verify the overflow flag is set + jobResult, err := db.GetProvisionerJobByID(ctx, job) + require.NoError(t, err) + require.True(t, jobResult.LogsOverflowed) + }) + + t.Run("IncrementalLogSizeOverflow", func(t *testing.T) { + t.Parallel() + srv, db, _, pd := setup(t, false, &overrides{}) + job := setupJob(t, db, pd.ID, pd.Tags) + + // Send logs that together exceed the limit + mediumOutput := strings.Repeat("b", 524289) // Half a MB + 1 byte + + // First log - should succeed + _, err := srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: mediumOutput, + }}, + }) + require.NoError(t, err) + + // Verify overflow flag not yet set + jobResult, err := db.GetProvisionerJobByID(ctx, job) + require.NoError(t, err) + require.False(t, jobResult.LogsOverflowed) + + // Second log - should trigger overflow + _, err = srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: mediumOutput, + }}, + }) + require.NoError(t, err) + + // Verify overflow flag is set + jobResult, err = db.GetProvisionerJobByID(ctx, job) + require.NoError(t, err) + require.True(t, jobResult.LogsOverflowed) + }) + + t.Run("LogSizeTracking", func(t *testing.T) { + t.Parallel() + srv, db, _, pd := setup(t, false, &overrides{}) + job := setupJob(t, db, pd.ID, pd.Tags) + + logOutput := "test log message" + expectedSize := int32(len(logOutput)) // #nosec G115 - Log length is 16. + + _, err := srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: logOutput, + }}, + }) + require.NoError(t, err) + + // Verify the logs_length is correctly tracked + jobResult, err := db.GetProvisionerJobByID(ctx, job) + require.NoError(t, err) + require.Equal(t, expectedSize, jobResult.LogsLength) + require.False(t, jobResult.LogsOverflowed) + }) + + t.Run("LogOverflowStopsProcessing", func(t *testing.T) { + t.Parallel() + srv, db, _, pd := setup(t, false, &overrides{}) + job := setupJob(t, db, pd.ID, pd.Tags) + + // First: trigger overflow + largeOutput := strings.Repeat("a", 1048577) // 1MB + 1 byte + _, err := srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: largeOutput, + }}, + }) + require.NoError(t, err) + + // Get the initial log count + initialLogs, err := db.GetProvisionerLogsAfterID(ctx, database.GetProvisionerLogsAfterIDParams{ + JobID: job, + CreatedAfter: -1, + }) + require.NoError(t, err) + initialCount := len(initialLogs) + + // Second: try to send more logs - should be ignored + _, err = srv.UpdateJob(ctx, &proto.UpdateJobRequest{ + JobId: job.String(), + Logs: []*proto.Log{{ + Source: proto.LogSource_PROVISIONER, + Level: sdkproto.LogLevel_INFO, + Output: "this should be ignored", + }}, + }) + require.NoError(t, err) + + // Verify no new logs were added + finalLogs, err := db.GetProvisionerLogsAfterID(ctx, database.GetProvisionerLogsAfterIDParams{ + JobID: job, + CreatedAfter: -1, + }) + require.NoError(t, err) + require.Equal(t, initialCount, len(finalLogs)) + }) } func TestFailJob(t *testing.T) { diff --git a/coderd/provisionerjobs.go b/coderd/provisionerjobs.go index 800b2916efef3..e9ab5260988d4 100644 --- a/coderd/provisionerjobs.go +++ b/coderd/provisionerjobs.go @@ -363,6 +363,7 @@ func convertProvisionerJob(pj database.GetProvisionerJobsByIDsWithQueuePositionR Tags: provisionerJob.Tags, QueuePosition: int(pj.QueuePosition), QueueSize: int(pj.QueueSize), + LogsOverflowed: provisionerJob.LogsOverflowed, } // Applying values optional to the struct. if provisionerJob.StartedAt.Valid { diff --git a/coderd/rbac/regosql/acl_group_var.go b/coderd/rbac/regosql/acl_group_var.go deleted file mode 100644 index 328dfbcd48d0a..0000000000000 --- a/coderd/rbac/regosql/acl_group_var.go +++ /dev/null @@ -1,104 +0,0 @@ -package regosql - -import ( - "fmt" - - "golang.org/x/xerrors" - - "github.com/open-policy-agent/opa/ast" - - "github.com/coder/coder/v2/coderd/rbac/regosql/sqltypes" -) - -var ( - _ sqltypes.VariableMatcher = ACLGroupVar{} - _ sqltypes.Node = ACLGroupVar{} -) - -// ACLGroupVar is a variable matcher that handles group_acl and user_acl. -// The sql type is a jsonb object with the following structure: -// -// "group_acl": { -// "": [""] -// } -// -// This is a custom variable matcher as json objects have arbitrary complexity. -type ACLGroupVar struct { - StructSQL string - // input.object.group_acl -> ["input", "object", "group_acl"] - StructPath []string - - // FieldReference handles referencing the subfields, which could be - // more variables. We pass one in as the global one might not be correctly - // scoped. - FieldReference sqltypes.VariableMatcher - - // Instance fields - Source sqltypes.RegoSource - GroupNode sqltypes.Node -} - -func ACLGroupMatcher(fieldReference sqltypes.VariableMatcher, structSQL string, structPath []string) ACLGroupVar { - return ACLGroupVar{StructSQL: structSQL, StructPath: structPath, FieldReference: fieldReference} -} - -func (ACLGroupVar) UseAs() sqltypes.Node { return ACLGroupVar{} } - -func (g ACLGroupVar) ConvertVariable(rego ast.Ref) (sqltypes.Node, bool) { - // "left" will be a map of group names to actions in rego. - // { - // "all_users": ["read"] - // } - left, err := sqltypes.RegoVarPath(g.StructPath, rego) - if err != nil { - return nil, false - } - - aclGrp := ACLGroupVar{ - StructSQL: g.StructSQL, - StructPath: g.StructPath, - FieldReference: g.FieldReference, - - Source: sqltypes.RegoSource(rego.String()), - } - - // We expect 1 more term. Either a ref or a string. - if len(left) != 1 { - return nil, false - } - - // If the remaining is a variable, then we need to convert it. - // Assuming we support variable fields. - ref, ok := left[0].Value.(ast.Ref) - if ok && g.FieldReference != nil { - groupNode, ok := g.FieldReference.ConvertVariable(ref) - if ok { - aclGrp.GroupNode = groupNode - return aclGrp, true - } - } - - // If it is a string, we assume it is a literal - groupName, ok := left[0].Value.(ast.String) - if ok { - aclGrp.GroupNode = sqltypes.String(string(groupName)) - return aclGrp, true - } - - // If we have not matched it yet, then it is something we do not recognize. - return nil, false -} - -func (g ACLGroupVar) SQLString(cfg *sqltypes.SQLGenerator) string { - return fmt.Sprintf("%s->%s", g.StructSQL, g.GroupNode.SQLString(cfg)) -} - -func (g ACLGroupVar) ContainsSQL(cfg *sqltypes.SQLGenerator, other sqltypes.Node) (string, error) { - switch other.UseAs().(type) { - // Only supports containing other strings. - case sqltypes.AstString: - return fmt.Sprintf("%s ? %s", g.SQLString(cfg), other.SQLString(cfg)), nil - default: - return "", xerrors.Errorf("unsupported acl group contains %T", other) - } -} diff --git a/coderd/rbac/regosql/acl_mapping_var.go b/coderd/rbac/regosql/acl_mapping_var.go new file mode 100644 index 0000000000000..172ac4cc56915 --- /dev/null +++ b/coderd/rbac/regosql/acl_mapping_var.go @@ -0,0 +1,126 @@ +package regosql + +import ( + "fmt" + + "golang.org/x/xerrors" + + "github.com/open-policy-agent/opa/ast" + + "github.com/coder/coder/v2/coderd/rbac/regosql/sqltypes" +) + +var ( + _ sqltypes.VariableMatcher = ACLMappingVar{} + _ sqltypes.Node = ACLMappingVar{} +) + +// ACLMappingVar is a variable matcher that handles group_acl and user_acl. +// The sql type is a jsonb object with the following structure: +// +// "group_acl": { +// "": [""] +// } +// +// This is a custom variable matcher as json objects have arbitrary complexity. +type ACLMappingVar struct { + // SelectSQL is used to `SELECT` the ACL mapping from the table for the + // given resource. ie. if the full query might look like `SELECT group_acl + // FROM things;` then you would want this to be `"group_acl"`. + SelectSQL string + // IndexMatcher handles variable references when indexing into the mapping. + // (ie. `input.object.acl_group_list[input.object.org_owner]`). We need one + // from the local context because the global one might not be correctly + // scoped. + IndexMatcher sqltypes.VariableMatcher + // Used if the action list isn't directly in the ACL entry. For example, in + // the `workspaces.group_acl` and `workspaces.user_acl` columns they're stored + // under a `"permissions"` key. + Subfield string + + // StructPath represents the path of the value in rego + // ie. input.object.group_acl -> ["input", "object", "group_acl"] + StructPath []string + + // Instance fields + Source sqltypes.RegoSource + GroupNode sqltypes.Node +} + +func ACLMappingMatcher(indexMatcher sqltypes.VariableMatcher, selectSQL string, structPath []string) ACLMappingVar { + return ACLMappingVar{IndexMatcher: indexMatcher, SelectSQL: selectSQL, StructPath: structPath} +} + +func (g ACLMappingVar) UsingSubfield(subfield string) ACLMappingVar { + g.Subfield = subfield + return g +} + +func (ACLMappingVar) UseAs() sqltypes.Node { return ACLMappingVar{} } + +func (g ACLMappingVar) ConvertVariable(rego ast.Ref) (sqltypes.Node, bool) { + // "left" will be a map of group names to actions in rego. + // { + // "all_users": ["read"] + // } + left, err := sqltypes.RegoVarPath(g.StructPath, rego) + if err != nil { + return nil, false + } + + aclGrp := ACLMappingVar{ + SelectSQL: g.SelectSQL, + IndexMatcher: g.IndexMatcher, + Subfield: g.Subfield, + + StructPath: g.StructPath, + + Source: sqltypes.RegoSource(rego.String()), + } + + // We expect 1 more term. Either a ref or a string. + if len(left) != 1 { + return nil, false + } + + // If the remaining is a variable, then we need to convert it. + // Assuming we support variable fields. + ref, ok := left[0].Value.(ast.Ref) + if ok && g.IndexMatcher != nil { + groupNode, ok := g.IndexMatcher.ConvertVariable(ref) + if ok { + aclGrp.GroupNode = groupNode + return aclGrp, true + } + } + + // If it is a string, we assume it is a literal + groupName, ok := left[0].Value.(ast.String) + if ok { + aclGrp.GroupNode = sqltypes.String(string(groupName)) + return aclGrp, true + } + + // If we have not matched it yet, then it is something we do not recognize. + return nil, false +} + +func (g ACLMappingVar) SQLString(cfg *sqltypes.SQLGenerator) string { + if g.Subfield != "" { + // We can't use subsequent -> operators because the first one might return + // NULL, which would result in an error like "column does not exist"' from + // the second. + return fmt.Sprintf("%s#>array[%s, '%s']", g.SelectSQL, g.GroupNode.SQLString(cfg), g.Subfield) + } + return fmt.Sprintf("%s->%s", g.SelectSQL, g.GroupNode.SQLString(cfg)) +} + +func (g ACLMappingVar) ContainsSQL(cfg *sqltypes.SQLGenerator, other sqltypes.Node) (string, error) { + switch other.UseAs().(type) { + // Only supports containing other strings. + case sqltypes.AstString: + return fmt.Sprintf("%s ? %s", g.SQLString(cfg), other.SQLString(cfg)), nil + default: + return "", xerrors.Errorf("unsupported acl group contains %T", other) + } +} diff --git a/coderd/rbac/regosql/compile_test.go b/coderd/rbac/regosql/compile_test.go index 07e8e7245a53e..7bea7f76fd485 100644 --- a/coderd/rbac/regosql/compile_test.go +++ b/coderd/rbac/regosql/compile_test.go @@ -193,10 +193,30 @@ func TestRegoQueries(t *testing.T) { `"read" in input.object.acl_user_list["d5389ccc-57a4-4b13-8c3f-31747bcdc9f1"]`, `"*" in input.object.acl_user_list["d5389ccc-57a4-4b13-8c3f-31747bcdc9f1"]`, }, - ExpectedSQL: "((user_acl->'d5389ccc-57a4-4b13-8c3f-31747bcdc9f1' ? 'read') OR " + - "(user_acl->'d5389ccc-57a4-4b13-8c3f-31747bcdc9f1' ? '*'))", + ExpectedSQL: "((user_acl->'d5389ccc-57a4-4b13-8c3f-31747bcdc9f1' ? 'read')" + + " OR (user_acl->'d5389ccc-57a4-4b13-8c3f-31747bcdc9f1' ? '*'))", VariableConverter: regosql.DefaultVariableConverter(), }, + { + Name: "UserWorkspaceACLAllow", + Queries: []string{ + `"read" in input.object.acl_user_list["d5389ccc-57a4-4b13-8c3f-31747bcdc9f1"]`, + `"*" in input.object.acl_user_list["d5389ccc-57a4-4b13-8c3f-31747bcdc9f1"]`, + }, + ExpectedSQL: "((workspaces.user_acl#>array['d5389ccc-57a4-4b13-8c3f-31747bcdc9f1', 'permissions'] ? 'read')" + + " OR (workspaces.user_acl#>array['d5389ccc-57a4-4b13-8c3f-31747bcdc9f1', 'permissions'] ? '*'))", + VariableConverter: regosql.WorkspaceConverter(), + }, + { + Name: "GroupWorkspaceACLAllow", + Queries: []string{ + `"read" in input.object.acl_group_list["96c55a0e-73b4-44fc-abac-70d53c35c04c"]`, + `"*" in input.object.acl_group_list["96c55a0e-73b4-44fc-abac-70d53c35c04c"]`, + }, + ExpectedSQL: "((workspaces.group_acl#>array['96c55a0e-73b4-44fc-abac-70d53c35c04c', 'permissions'] ? 'read')" + + " OR (workspaces.group_acl#>array['96c55a0e-73b4-44fc-abac-70d53c35c04c', 'permissions'] ? '*'))", + VariableConverter: regosql.WorkspaceConverter(), + }, { Name: "NoACLConfig", Queries: []string{ diff --git a/coderd/rbac/regosql/configs.go b/coderd/rbac/regosql/configs.go index 69d425d9dba2f..1c1e126ff692e 100644 --- a/coderd/rbac/regosql/configs.go +++ b/coderd/rbac/regosql/configs.go @@ -14,12 +14,12 @@ func userOwnerMatcher() sqltypes.VariableMatcher { return sqltypes.StringVarMatcher("owner_id :: text", []string{"input", "object", "owner"}) } -func groupACLMatcher(m sqltypes.VariableMatcher) sqltypes.VariableMatcher { - return ACLGroupMatcher(m, "group_acl", []string{"input", "object", "acl_group_list"}) +func groupACLMatcher(m sqltypes.VariableMatcher) ACLMappingVar { + return ACLMappingMatcher(m, "group_acl", []string{"input", "object", "acl_group_list"}) } -func userACLMatcher(m sqltypes.VariableMatcher) sqltypes.VariableMatcher { - return ACLGroupMatcher(m, "user_acl", []string{"input", "object", "acl_user_list"}) +func userACLMatcher(m sqltypes.VariableMatcher) ACLMappingVar { + return ACLMappingMatcher(m, "user_acl", []string{"input", "object", "acl_user_list"}) } func TemplateConverter() *sqltypes.VariableConverter { @@ -36,6 +36,20 @@ func TemplateConverter() *sqltypes.VariableConverter { return matcher } +func WorkspaceConverter() *sqltypes.VariableConverter { + matcher := sqltypes.NewVariableConverter().RegisterMatcher( + resourceIDMatcher(), + sqltypes.StringVarMatcher("workspaces.organization_id :: text", []string{"input", "object", "org_owner"}), + userOwnerMatcher(), + ) + matcher.RegisterMatcher( + ACLMappingMatcher(matcher, "workspaces.group_acl", []string{"input", "object", "acl_group_list"}).UsingSubfield("permissions"), + ACLMappingMatcher(matcher, "workspaces.user_acl", []string{"input", "object", "acl_user_list"}).UsingSubfield("permissions"), + ) + + return matcher +} + func AuditLogConverter() *sqltypes.VariableConverter { matcher := sqltypes.NewVariableConverter().RegisterMatcher( resourceIDMatcher(), @@ -81,20 +95,6 @@ func UserConverter() *sqltypes.VariableConverter { return matcher } -func WorkspaceConverter() *sqltypes.VariableConverter { - matcher := sqltypes.NewVariableConverter().RegisterMatcher( - resourceIDMatcher(), - sqltypes.StringVarMatcher("workspaces.organization_id :: text", []string{"input", "object", "org_owner"}), - userOwnerMatcher(), - ) - matcher.RegisterMatcher( - sqltypes.AlwaysFalse(groupACLMatcher(matcher)), - sqltypes.AlwaysFalse(userACLMatcher(matcher)), - ) - - return matcher -} - // NoACLConverter should be used when the target SQL table does not contain // group or user ACL columns. func NoACLConverter() *sqltypes.VariableConverter { diff --git a/coderd/templateversions.go b/coderd/templateversions.go index 2a6e09d94978e..2c02268bba0a9 100644 --- a/coderd/templateversions.go +++ b/coderd/templateversions.go @@ -552,6 +552,7 @@ func (api *API) postTemplateVersionDryRun(rw http.ResponseWriter, r *http.Reques Valid: true, RawMessage: metadataRaw, }, + LogsOverflowed: false, }) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ @@ -1646,6 +1647,7 @@ func (api *API) postTemplateVersionsByOrganization(rw http.ResponseWriter, r *ht Valid: true, RawMessage: traceMetadataRaw, }, + LogsOverflowed: false, }) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ diff --git a/coderd/wsbuilder/wsbuilder.go b/coderd/wsbuilder/wsbuilder.go index 52567b463baac..73e449ee5bb93 100644 --- a/coderd/wsbuilder/wsbuilder.go +++ b/coderd/wsbuilder/wsbuilder.go @@ -409,6 +409,7 @@ func (b *Builder) buildTx(authFunc func(action policy.Action, object rbac.Object Valid: true, RawMessage: traceMetadataRaw, }, + LogsOverflowed: false, }) if err != nil { return nil, nil, nil, BuildError{http.StatusInternalServerError, "insert provisioner job", err} diff --git a/codersdk/provisionerdaemons.go b/codersdk/provisionerdaemons.go index 5fbda371b8f3f..e36f995f1688e 100644 --- a/codersdk/provisionerdaemons.go +++ b/codersdk/provisionerdaemons.go @@ -188,6 +188,7 @@ type ProvisionerJob struct { Type ProvisionerJobType `json:"type" table:"type"` AvailableWorkers []uuid.UUID `json:"available_workers,omitempty" format:"uuid" table:"available workers"` Metadata ProvisionerJobMetadata `json:"metadata" table:"metadata,recursive_inline"` + LogsOverflowed bool `json:"logs_overflowed" table:"logs overflowed"` } // ProvisionerJobLog represents the provisioner log entry annotated with source and level. diff --git a/docs/admin/security/audit-logs.md b/docs/admin/security/audit-logs.md index 0a4b21a915315..0232c3d45a0c2 100644 --- a/docs/admin/security/audit-logs.md +++ b/docs/admin/security/audit-logs.md @@ -37,7 +37,7 @@ We track the following resources: | User
create, write, delete | |
FieldTracked
avatar_urlfalse
created_atfalse
deletedtrue
emailtrue
github_com_user_idfalse
hashed_one_time_passcodefalse
hashed_passwordtrue
idtrue
is_systemtrue
last_seen_atfalse
login_typetrue
nametrue
one_time_passcode_expires_attrue
quiet_hours_scheduletrue
rbac_rolestrue
statustrue
updated_atfalse
usernametrue
| | WorkspaceBuild
start, stop | |
FieldTracked
ai_task_sidebar_app_idfalse
build_numberfalse
created_atfalse
daily_costfalse
deadlinefalse
has_ai_taskfalse
idfalse
initiator_by_avatar_urlfalse
initiator_by_namefalse
initiator_by_usernamefalse
initiator_idfalse
job_idfalse
max_deadlinefalse
provisioner_statefalse
reasonfalse
template_version_idtrue
template_version_preset_idfalse
transitionfalse
updated_atfalse
workspace_idfalse
| | WorkspaceProxy
| |
FieldTracked
created_attrue
deletedfalse
derp_enabledtrue
derp_onlytrue
display_nametrue
icontrue
idtrue
nametrue
region_idtrue
token_hashed_secrettrue
updated_atfalse
urltrue
versiontrue
wildcard_hostnametrue
| -| WorkspaceTable
| |
FieldTracked
automatic_updatestrue
autostart_scheduletrue
created_atfalse
deletedfalse
deleting_attrue
dormant_attrue
favoritetrue
idtrue
last_used_atfalse
nametrue
next_start_attrue
organization_idfalse
owner_idtrue
template_idtrue
ttltrue
updated_atfalse
| +| WorkspaceTable
| |
FieldTracked
automatic_updatestrue
autostart_scheduletrue
created_atfalse
deletedfalse
deleting_attrue
dormant_attrue
favoritetrue
group_acltrue
idtrue
last_used_atfalse
nametrue
next_start_attrue
organization_idfalse
owner_idtrue
template_idtrue
ttltrue
updated_atfalse
user_acltrue
| diff --git a/docs/reference/api/builds.md b/docs/reference/api/builds.md index fb491405df362..a465575baeaa3 100644 --- a/docs/reference/api/builds.md +++ b/docs/reference/api/builds.md @@ -52,6 +52,7 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -289,6 +290,7 @@ curl -X GET http://coder-server:8080/api/v2/workspacebuilds/{workspacebuild} \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1015,6 +1017,7 @@ curl -X GET http://coder-server:8080/api/v2/workspacebuilds/{workspacebuild}/sta "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1325,6 +1328,7 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace}/builds \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1540,6 +1544,7 @@ Status Code **200** | `»»» error` | string | false | | | | `»»» template_version_id` | string(uuid) | false | | | | `»»» workspace_build_id` | string(uuid) | false | | | +| `»» logs_overflowed` | boolean | false | | | | `»» metadata` | [codersdk.ProvisionerJobMetadata](schemas.md#codersdkprovisionerjobmetadata) | false | | | | `»»» template_display_name` | string | false | | | | `»»» template_icon` | string | false | | | @@ -1816,6 +1821,7 @@ curl -X POST http://coder-server:8080/api/v2/workspaces/{workspace}/builds \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", diff --git a/docs/reference/api/organizations.md b/docs/reference/api/organizations.md index 497e3f56d4e47..d418a1fcba106 100644 --- a/docs/reference/api/organizations.md +++ b/docs/reference/api/organizations.md @@ -407,6 +407,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/provisi "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -457,6 +458,7 @@ Status Code **200** | `»» error` | string | false | | | | `»» template_version_id` | string(uuid) | false | | | | `»» workspace_build_id` | string(uuid) | false | | | +| `» logs_overflowed` | boolean | false | | | | `» metadata` | [codersdk.ProvisionerJobMetadata](schemas.md#codersdkprovisionerjobmetadata) | false | | | | `»» template_display_name` | string | false | | | | `»» template_icon` | string | false | | | @@ -534,6 +536,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/provisi "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", diff --git a/docs/reference/api/schemas.md b/docs/reference/api/schemas.md index 033ef6e196972..dcbd00628e979 100644 --- a/docs/reference/api/schemas.md +++ b/docs/reference/api/schemas.md @@ -5906,6 +5906,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit| "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -5943,6 +5944,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit| | `file_id` | string | false | | | | `id` | string | false | | | | `input` | [codersdk.ProvisionerJobInput](#codersdkprovisionerjobinput) | false | | | +| `logs_overflowed` | boolean | false | | | | `metadata` | [codersdk.ProvisionerJobMetadata](#codersdkprovisionerjobmetadata) | false | | | | `organization_id` | string | false | | | | `queue_position` | integer | false | | | @@ -7626,6 +7628,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -8802,6 +8805,7 @@ If the schedule is empty, the user will be updated to use the default schedule.| "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -9911,6 +9915,7 @@ If the schedule is empty, the user will be updated to use the default schedule.| "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -10642,6 +10647,7 @@ If the schedule is empty, the user will be updated to use the default schedule.| "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", diff --git a/docs/reference/api/templates.md b/docs/reference/api/templates.md index 0db4ef8d04879..ea2e2c50cca7f 100644 --- a/docs/reference/api/templates.md +++ b/docs/reference/api/templates.md @@ -479,6 +479,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/templat "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -577,6 +578,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/templat "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -699,6 +701,7 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/templa "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1264,6 +1267,7 @@ curl -X GET http://coder-server:8080/api/v2/templates/{template}/versions \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1337,6 +1341,7 @@ Status Code **200** | `»»» error` | string | false | | | | `»»» template_version_id` | string(uuid) | false | | | | `»»» workspace_build_id` | string(uuid) | false | | | +| `»» logs_overflowed` | boolean | false | | | | `»» metadata` | [codersdk.ProvisionerJobMetadata](schemas.md#codersdkprovisionerjobmetadata) | false | | | | `»»» template_display_name` | string | false | | | | `»»» template_icon` | string | false | | | @@ -1543,6 +1548,7 @@ curl -X GET http://coder-server:8080/api/v2/templates/{template}/versions/{templ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1616,6 +1622,7 @@ Status Code **200** | `»»» error` | string | false | | | | `»»» template_version_id` | string(uuid) | false | | | | `»»» workspace_build_id` | string(uuid) | false | | | +| `»» logs_overflowed` | boolean | false | | | | `»» metadata` | [codersdk.ProvisionerJobMetadata](schemas.md#codersdkprovisionerjobmetadata) | false | | | | `»»» template_display_name` | string | false | | | | `»»» template_icon` | string | false | | | @@ -1712,6 +1719,7 @@ curl -X GET http://coder-server:8080/api/v2/templateversions/{templateversion} \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1819,6 +1827,7 @@ curl -X PATCH http://coder-server:8080/api/v2/templateversions/{templateversion} "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -2016,6 +2025,7 @@ curl -X POST http://coder-server:8080/api/v2/templateversions/{templateversion}/ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -2089,6 +2099,7 @@ curl -X GET http://coder-server:8080/api/v2/templateversions/{templateversion}/d "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", diff --git a/docs/reference/api/workspaces.md b/docs/reference/api/workspaces.md index debcb421e02e3..d7187259b5bb6 100644 --- a/docs/reference/api/workspaces.md +++ b/docs/reference/api/workspaces.md @@ -107,6 +107,7 @@ of the template will be used. "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -394,6 +395,7 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -706,6 +708,7 @@ of the template will be used. "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -996,6 +999,7 @@ curl -X GET http://coder-server:8080/api/v2/workspaces \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1267,6 +1271,7 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace} \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", @@ -1670,6 +1675,7 @@ curl -X PUT http://coder-server:8080/api/v2/workspaces/{workspace}/dormant \ "template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1", "workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478" }, + "logs_overflowed": true, "metadata": { "template_display_name": "string", "template_icon": "string", diff --git a/docs/reference/cli/provisioner_jobs_list.md b/docs/reference/cli/provisioner_jobs_list.md index 07ad02f419bde..a0bff8554d610 100644 --- a/docs/reference/cli/provisioner_jobs_list.md +++ b/docs/reference/cli/provisioner_jobs_list.md @@ -45,10 +45,10 @@ Select which organization (uuid or name) to use. ### -c, --column -| | | -|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Type | [id\|created at\|started at\|completed at\|canceled at\|error\|error code\|status\|worker id\|worker name\|file id\|tags\|queue position\|queue size\|organization id\|template version id\|workspace build id\|type\|available workers\|template version name\|template id\|template name\|template display name\|template icon\|workspace id\|workspace name\|organization\|queue] | -| Default | created at,id,type,template display name,status,queue,tags | +| | | +|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Type | [id\|created at\|started at\|completed at\|canceled at\|error\|error code\|status\|worker id\|worker name\|file id\|tags\|queue position\|queue size\|organization id\|template version id\|workspace build id\|type\|available workers\|template version name\|template id\|template name\|template display name\|template icon\|workspace id\|workspace name\|logs overflowed\|organization\|queue] | +| Default | created at,id,type,template display name,status,queue,tags | Columns to display in table output. diff --git a/enterprise/audit/table.go b/enterprise/audit/table.go index c767e06e228dd..1ad76a1e44ca9 100644 --- a/enterprise/audit/table.go +++ b/enterprise/audit/table.go @@ -173,6 +173,8 @@ var auditableResourcesTypes = map[any]map[string]Action{ "automatic_updates": ActionTrack, "favorite": ActionTrack, "next_start_at": ActionTrack, + "group_acl": ActionTrack, + "user_acl": ActionTrack, }, &database.WorkspaceBuild{}: { "id": ActionIgnore, diff --git a/enterprise/cli/testdata/coder_provisioner_jobs_list_--help.golden b/enterprise/cli/testdata/coder_provisioner_jobs_list_--help.golden index f380a0334867c..8e22f78e978f2 100644 --- a/enterprise/cli/testdata/coder_provisioner_jobs_list_--help.golden +++ b/enterprise/cli/testdata/coder_provisioner_jobs_list_--help.golden @@ -11,7 +11,7 @@ OPTIONS: -O, --org string, $CODER_ORGANIZATION Select which organization (uuid or name) to use. - -c, --column [id|created at|started at|completed at|canceled at|error|error code|status|worker id|worker name|file id|tags|queue position|queue size|organization id|template version id|workspace build id|type|available workers|template version name|template id|template name|template display name|template icon|workspace id|workspace name|organization|queue] (default: created at,id,type,template display name,status,queue,tags) + -c, --column [id|created at|started at|completed at|canceled at|error|error code|status|worker id|worker name|file id|tags|queue position|queue size|organization id|template version id|workspace build id|type|available workers|template version name|template id|template name|template display name|template icon|workspace id|workspace name|logs overflowed|organization|queue] (default: created at,id,type,template display name,status,queue,tags) Columns to display in table output. -l, --limit int, $CODER_PROVISIONER_JOB_LIST_LIMIT (default: 50) diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts index 6165198c6fa23..3bd870d60159f 100644 --- a/site/src/api/typesGenerated.ts +++ b/site/src/api/typesGenerated.ts @@ -2156,6 +2156,7 @@ export interface ProvisionerJob { readonly type: ProvisionerJobType; readonly available_workers?: readonly string[]; readonly metadata: ProvisionerJobMetadata; + readonly logs_overflowed: boolean; } // From codersdk/provisionerdaemons.go diff --git a/site/src/modules/resources/AgentRow.tsx b/site/src/modules/resources/AgentRow.tsx index 20c551fc73065..ab0e5884c48e9 100644 --- a/site/src/modules/resources/AgentRow.tsx +++ b/site/src/modules/resources/AgentRow.tsx @@ -87,7 +87,8 @@ export const AgentRow: FC = ({ logs.push({ id: -1, level: "error", - output: "Startup logs exceeded the max size of 1MB!", + output: + "Startup logs exceeded the max size of 1MB, and will not continue to be written to the database! Logs will continue to be written to the /tmp/coder-startup-script.log file in the workspace.", created_at: new Date().toISOString(), source_id: "", }); diff --git a/site/src/modules/workspaces/WorkspaceBuildLogs/WorkspaceBuildLogs.tsx b/site/src/modules/workspaces/WorkspaceBuildLogs/WorkspaceBuildLogs.tsx index fcf6f0dbee549..20c929406d32c 100644 --- a/site/src/modules/workspaces/WorkspaceBuildLogs/WorkspaceBuildLogs.tsx +++ b/site/src/modules/workspaces/WorkspaceBuildLogs/WorkspaceBuildLogs.tsx @@ -1,9 +1,9 @@ import { type Interpolation, type Theme, useTheme } from "@emotion/react"; -import type { ProvisionerJobLog } from "api/typesGenerated"; +import type { ProvisionerJobLog, WorkspaceBuild } from "api/typesGenerated"; import type { Line } from "components/Logs/LogLine"; import { DEFAULT_LOG_LINE_SIDE_PADDING, Logs } from "components/Logs/Logs"; import dayjs from "dayjs"; -import { type FC, Fragment, type HTMLAttributes } from "react"; +import { type FC, Fragment, type HTMLAttributes, useMemo } from "react"; import { BODY_FONT_FAMILY, MONOSPACE_FONT_FAMILY } from "theme/constants"; const Language = { @@ -42,15 +42,37 @@ interface WorkspaceBuildLogsProps extends HTMLAttributes { hideTimestamps?: boolean; sticky?: boolean; logs: ProvisionerJobLog[]; + build?: WorkspaceBuild; } export const WorkspaceBuildLogs: FC = ({ hideTimestamps, sticky, logs, + build, ...attrs }) => { const theme = useTheme(); + + const processedLogs = useMemo(() => { + const allLogs = logs || []; + + // Add synthetic overflow message if needed + if (build?.job?.logs_overflowed) { + allLogs.push({ + id: -1, + created_at: new Date().toISOString(), + log_level: "error", + log_source: "provisioner", + output: + "Provisioner logs exceeded the max size of 1MB. Will not continue to write provisioner logs for workspace build.", + stage: "overflow", + }); + } + + return allLogs; + }, [logs, build?.job?.logs_overflowed]); + const groupedLogsByStage = groupLogsByStage(logs); return ( diff --git a/site/src/pages/WorkspaceBuildPage/WorkspaceBuildPageView.tsx b/site/src/pages/WorkspaceBuildPage/WorkspaceBuildPageView.tsx index 6add701c8b688..3a45653557dcc 100644 --- a/site/src/pages/WorkspaceBuildPage/WorkspaceBuildPageView.tsx +++ b/site/src/pages/WorkspaceBuildPage/WorkspaceBuildPageView.tsx @@ -212,7 +212,24 @@ export const WorkspaceBuildPageView: FC = ({ )} - {tabState.value === "build" && } + {build?.job?.logs_overflowed && ( + + Provisioner logs exceeded the max size of 1MB. Will not continue + to write provisioner logs for workspace build. + + )} + + {tabState.value === "build" && ( + + )} {tabState.value !== "build" && selectedAgent && ( )} @@ -261,7 +278,10 @@ const ScrollArea: FC> = (props) => { ); }; -const BuildLogsContent: FC<{ logs?: ProvisionerJobLog[] }> = ({ logs }) => { +const BuildLogsContent: FC<{ + logs?: ProvisionerJobLog[]; + build?: WorkspaceBuild; +}> = ({ logs, build }) => { if (!logs) { return ; } @@ -278,6 +298,7 @@ const BuildLogsContent: FC<{ logs?: ProvisionerJobLog[] }> = ({ logs }) => { }, }} logs={sortLogsByCreatedAt(logs)} + build={build} /> ); }; diff --git a/site/src/testHelpers/entities.ts b/site/src/testHelpers/entities.ts index f91a29cb48412..78dd9e4e8687a 100644 --- a/site/src/testHelpers/entities.ts +++ b/site/src/testHelpers/entities.ts @@ -689,6 +689,7 @@ export const MockProvisionerJob: TypesGen.ProvisionerJob = { template_version_name: "test-version", workspace_name: "test-workspace", }, + logs_overflowed: false, }; export const MockFailedProvisionerJob: TypesGen.ProvisionerJob = {