diff --git a/coderd/autobuild/lifecycle_executor_test.go b/coderd/autobuild/lifecycle_executor_test.go index 0229a907cbb2e..babca5431d6b7 100644 --- a/coderd/autobuild/lifecycle_executor_test.go +++ b/coderd/autobuild/lifecycle_executor_test.go @@ -1251,7 +1251,7 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the prebuilt workspace should remain in a start transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) @@ -1259,7 +1259,15 @@ func TestExecutorPrebuilds(t *testing.T) { require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) // Given: a user claims the prebuilt workspace - dbWorkspace := dbgen.ClaimPrebuild(t, db, user.ID, "claimedWorkspace-autostop", preset.ID) + dbWorkspace := dbgen.ClaimPrebuild( + t, db, + clock.Now(), + user.ID, + "claimedWorkspace-autostop", + preset.ID, + sql.NullString{}, + sql.NullTime{}, + sql.NullInt64{}) workspace := coderdtest.MustWorkspace(t, client, dbWorkspace.ID) // When: the autobuild executor ticks *after* the deadline: @@ -1269,7 +1277,7 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the workspace should be stopped - workspaceStats := <-statsCh + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, workspaceStats.Errors, 0) require.Len(t, workspaceStats.Transitions, 1) require.Contains(t, workspaceStats.Transitions, workspace.ID) @@ -1336,7 +1344,7 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the prebuilt workspace should remain in a stop transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStop, prebuild.LatestBuild.Transition) @@ -1353,7 +1361,15 @@ func TestExecutorPrebuilds(t *testing.T) { database.WorkspaceTransitionStart) // Given: a user claims the prebuilt workspace - dbWorkspace := dbgen.ClaimPrebuild(t, db, user.ID, "claimedWorkspace-autostart", preset.ID) + dbWorkspace := dbgen.ClaimPrebuild( + t, db, + clock.Now(), + user.ID, + "claimedWorkspace-autostart", + preset.ID, + autostartSched, + sql.NullTime{}, + sql.NullInt64{}) workspace := coderdtest.MustWorkspace(t, client, dbWorkspace.ID) // Given: the prebuilt workspace goes to a stop status @@ -1374,7 +1390,7 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the workspace should eventually be started - workspaceStats := <-statsCh + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, workspaceStats.Errors, 0) require.Len(t, workspaceStats.Transitions, 1) require.Contains(t, workspaceStats.Transitions, workspace.ID) @@ -1486,8 +1502,8 @@ func setupTestDBWorkspaceBuild( Architecture: "i386", OperatingSystem: "linux", LifecycleState: database.WorkspaceAgentLifecycleStateReady, - StartedAt: sql.NullTime{Time: time.Now().Add(time.Hour), Valid: true}, - ReadyAt: sql.NullTime{Time: time.Now().Add(-1 * time.Hour), Valid: true}, + StartedAt: sql.NullTime{Time: clock.Now().Add(time.Hour), Valid: true}, + ReadyAt: sql.NullTime{Time: clock.Now().Add(-1 * time.Hour), Valid: true}, APIKeyScope: database.AgentKeyScopeEnumAll, }) @@ -1524,8 +1540,9 @@ func setupTestDBPrebuiltWorkspace( OrganizationID: orgID, OwnerID: database.PrebuildsSystemUserID, Deleted: false, - CreatedAt: time.Now().Add(-time.Hour * 2), + CreatedAt: clock.Now().Add(-time.Hour * 2), AutostartSchedule: options.AutostartSchedule, + LastUsedAt: clock.Now(), }) setupTestDBWorkspaceBuild(ctx, t, clock, db, ps, orgID, workspace.ID, templateVersionID, presetID, buildTransition) diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index a5623fbcbcd36..2201a5455cc4c 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -218,25 +218,16 @@ func (s *MethodTestSuite) TestAPIKey() { dbm.EXPECT().GetAPIKeyByID(gomock.Any(), key.ID).Return(key, nil).AnyTimes() check.Args(key.ID).Asserts(key, policy.ActionRead).Returns(key) })) - s.Run("GetAPIKeyByName", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - key, _ := dbgen.APIKey(s.T(), db, database.APIKey{ - TokenName: "marge-cat", - LoginType: database.LoginTypeToken, - }) - check.Args(database.GetAPIKeyByNameParams{ - TokenName: key.TokenName, - UserID: key.UserID, - }).Asserts(key, policy.ActionRead).Returns(key) + s.Run("GetAPIKeyByName", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + key := testutil.Fake(s.T(), faker, database.APIKey{LoginType: database.LoginTypeToken, TokenName: "marge-cat"}) + dbm.EXPECT().GetAPIKeyByName(gomock.Any(), database.GetAPIKeyByNameParams{TokenName: key.TokenName, UserID: key.UserID}).Return(key, nil).AnyTimes() + check.Args(database.GetAPIKeyByNameParams{TokenName: key.TokenName, UserID: key.UserID}).Asserts(key, policy.ActionRead).Returns(key) })) - s.Run("GetAPIKeysByLoginType", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - a, _ := dbgen.APIKey(s.T(), db, database.APIKey{LoginType: database.LoginTypePassword}) - b, _ := dbgen.APIKey(s.T(), db, database.APIKey{LoginType: database.LoginTypePassword}) - _, _ = dbgen.APIKey(s.T(), db, database.APIKey{LoginType: database.LoginTypeGithub}) - check.Args(database.LoginTypePassword). - Asserts(a, policy.ActionRead, b, policy.ActionRead). - Returns(slice.New(a, b)) + s.Run("GetAPIKeysByLoginType", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + a := testutil.Fake(s.T(), faker, database.APIKey{LoginType: database.LoginTypePassword}) + b := testutil.Fake(s.T(), faker, database.APIKey{LoginType: database.LoginTypePassword}) + dbm.EXPECT().GetAPIKeysByLoginType(gomock.Any(), database.LoginTypePassword).Return([]database.APIKey{a, b}, nil).AnyTimes() + check.Args(database.LoginTypePassword).Asserts(a, policy.ActionRead, b, policy.ActionRead).Returns(slice.New(a, b)) })) s.Run("GetAPIKeysByUserID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { u1 := testutil.Fake(s.T(), faker, database.User{}) @@ -248,228 +239,139 @@ func (s *MethodTestSuite) TestAPIKey() { Asserts(keyA, policy.ActionRead, keyB, policy.ActionRead). Returns(slice.New(keyA, keyB)) })) - s.Run("GetAPIKeysLastUsedAfter", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - a, _ := dbgen.APIKey(s.T(), db, database.APIKey{LastUsed: time.Now().Add(time.Hour)}) - b, _ := dbgen.APIKey(s.T(), db, database.APIKey{LastUsed: time.Now().Add(time.Hour)}) - _, _ = dbgen.APIKey(s.T(), db, database.APIKey{LastUsed: time.Now().Add(-time.Hour)}) - check.Args(time.Now()). - Asserts(a, policy.ActionRead, b, policy.ActionRead). - Returns(slice.New(a, b)) + s.Run("GetAPIKeysLastUsedAfter", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + now := time.Now() + a := database.APIKey{LastUsed: now.Add(time.Hour)} + b := database.APIKey{LastUsed: now.Add(time.Hour)} + dbm.EXPECT().GetAPIKeysLastUsedAfter(gomock.Any(), gomock.Any()).Return([]database.APIKey{a, b}, nil).AnyTimes() + check.Args(now).Asserts(a, policy.ActionRead, b, policy.ActionRead).Returns(slice.New(a, b)) })) - s.Run("InsertAPIKey", s.Subtest(func(db database.Store, check *expects) { - u := dbgen.User(s.T(), db, database.User{}) - - check.Args(database.InsertAPIKeyParams{ - UserID: u.ID, - LoginType: database.LoginTypePassword, - Scope: database.APIKeyScopeAll, - IPAddress: defaultIPAddress(), - }).Asserts(rbac.ResourceApiKey.WithOwner(u.ID.String()), policy.ActionCreate) + s.Run("InsertAPIKey", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + u := testutil.Fake(s.T(), faker, database.User{}) + arg := database.InsertAPIKeyParams{UserID: u.ID, LoginType: database.LoginTypePassword, Scope: database.APIKeyScopeAll, IPAddress: defaultIPAddress()} + ret := testutil.Fake(s.T(), faker, database.APIKey{UserID: u.ID, LoginType: database.LoginTypePassword}) + dbm.EXPECT().InsertAPIKey(gomock.Any(), arg).Return(ret, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceApiKey.WithOwner(u.ID.String()), policy.ActionCreate) })) - s.Run("UpdateAPIKeyByID", s.Subtest(func(db database.Store, check *expects) { - u := dbgen.User(s.T(), db, database.User{}) - a, _ := dbgen.APIKey(s.T(), db, database.APIKey{UserID: u.ID, IPAddress: defaultIPAddress()}) - check.Args(database.UpdateAPIKeyByIDParams{ - ID: a.ID, - IPAddress: defaultIPAddress(), - LastUsed: time.Now(), - ExpiresAt: time.Now().Add(time.Hour), - }).Asserts(a, policy.ActionUpdate).Returns() - })) - s.Run("DeleteApplicationConnectAPIKeysByUserID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - a, _ := dbgen.APIKey(s.T(), db, database.APIKey{ - Scope: database.APIKeyScopeApplicationConnect, - }) + s.Run("UpdateAPIKeyByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + u := testutil.Fake(s.T(), faker, database.User{}) + a := testutil.Fake(s.T(), faker, database.APIKey{UserID: u.ID, IPAddress: defaultIPAddress()}) + arg := database.UpdateAPIKeyByIDParams{ID: a.ID, IPAddress: defaultIPAddress(), LastUsed: time.Now(), ExpiresAt: time.Now().Add(time.Hour)} + dbm.EXPECT().GetAPIKeyByID(gomock.Any(), a.ID).Return(a, nil).AnyTimes() + dbm.EXPECT().UpdateAPIKeyByID(gomock.Any(), arg).Return(nil).AnyTimes() + check.Args(arg).Asserts(a, policy.ActionUpdate).Returns() + })) + s.Run("DeleteApplicationConnectAPIKeysByUserID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + a := testutil.Fake(s.T(), faker, database.APIKey{Scope: database.APIKeyScopeApplicationConnect}) + dbm.EXPECT().DeleteApplicationConnectAPIKeysByUserID(gomock.Any(), a.UserID).Return(nil).AnyTimes() check.Args(a.UserID).Asserts(rbac.ResourceApiKey.WithOwner(a.UserID.String()), policy.ActionDelete).Returns() })) - s.Run("DeleteExternalAuthLink", s.Subtest(func(db database.Store, check *expects) { - a := dbgen.ExternalAuthLink(s.T(), db, database.ExternalAuthLink{}) - check.Args(database.DeleteExternalAuthLinkParams{ - ProviderID: a.ProviderID, - UserID: a.UserID, - }).Asserts(rbac.ResourceUserObject(a.UserID), policy.ActionUpdatePersonal).Returns() + s.Run("DeleteExternalAuthLink", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + a := testutil.Fake(s.T(), faker, database.ExternalAuthLink{}) + dbm.EXPECT().GetExternalAuthLink(gomock.Any(), database.GetExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Return(a, nil).AnyTimes() + dbm.EXPECT().DeleteExternalAuthLink(gomock.Any(), database.DeleteExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Return(nil).AnyTimes() + check.Args(database.DeleteExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Asserts(a, policy.ActionUpdatePersonal).Returns() })) - s.Run("GetExternalAuthLinksByUserID", s.Subtest(func(db database.Store, check *expects) { - a := dbgen.ExternalAuthLink(s.T(), db, database.ExternalAuthLink{}) - b := dbgen.ExternalAuthLink(s.T(), db, database.ExternalAuthLink{ - UserID: a.UserID, - }) - check.Args(a.UserID).Asserts( - rbac.ResourceUserObject(a.UserID), policy.ActionReadPersonal, - rbac.ResourceUserObject(b.UserID), policy.ActionReadPersonal) + s.Run("GetExternalAuthLinksByUserID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + a := testutil.Fake(s.T(), faker, database.ExternalAuthLink{}) + b := testutil.Fake(s.T(), faker, database.ExternalAuthLink{UserID: a.UserID}) + dbm.EXPECT().GetExternalAuthLinksByUserID(gomock.Any(), a.UserID).Return([]database.ExternalAuthLink{a, b}, nil).AnyTimes() + check.Args(a.UserID).Asserts(a, policy.ActionReadPersonal, b, policy.ActionReadPersonal) })) } func (s *MethodTestSuite) TestAuditLogs() { - s.Run("InsertAuditLog", s.Subtest(func(db database.Store, check *expects) { - check.Args(database.InsertAuditLogParams{ - ResourceType: database.ResourceTypeOrganization, - Action: database.AuditActionCreate, - Diff: json.RawMessage("{}"), - AdditionalFields: json.RawMessage("{}"), - }).Asserts(rbac.ResourceAuditLog, policy.ActionCreate) - })) - s.Run("GetAuditLogsOffset", s.Subtest(func(db database.Store, check *expects) { - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - check.Args(database.GetAuditLogsOffsetParams{ - LimitOpt: 10, - }).Asserts(rbac.ResourceAuditLog, policy.ActionRead).WithNotAuthorized("nil") - })) - s.Run("GetAuthorizedAuditLogsOffset", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - check.Args(database.GetAuditLogsOffsetParams{ - LimitOpt: 10, - }, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead) - })) - s.Run("CountAuditLogs", s.Subtest(func(db database.Store, check *expects) { - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) + s.Run("InsertAuditLog", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + arg := database.InsertAuditLogParams{ResourceType: database.ResourceTypeOrganization, Action: database.AuditActionCreate, Diff: json.RawMessage("{}"), AdditionalFields: json.RawMessage("{}")} + dbm.EXPECT().InsertAuditLog(gomock.Any(), arg).Return(database.AuditLog{}, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceAuditLog, policy.ActionCreate) + })) + s.Run("GetAuditLogsOffset", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + arg := database.GetAuditLogsOffsetParams{LimitOpt: 10} + dbm.EXPECT().GetAuditLogsOffset(gomock.Any(), arg).Return([]database.GetAuditLogsOffsetRow{}, nil).AnyTimes() + dbm.EXPECT().GetAuthorizedAuditLogsOffset(gomock.Any(), arg, gomock.Any()).Return([]database.GetAuditLogsOffsetRow{}, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceAuditLog, policy.ActionRead).WithNotAuthorized("nil") + })) + s.Run("GetAuthorizedAuditLogsOffset", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + arg := database.GetAuditLogsOffsetParams{LimitOpt: 10} + dbm.EXPECT().GetAuthorizedAuditLogsOffset(gomock.Any(), arg, gomock.Any()).Return([]database.GetAuditLogsOffsetRow{}, nil).AnyTimes() + dbm.EXPECT().GetAuditLogsOffset(gomock.Any(), arg).Return([]database.GetAuditLogsOffsetRow{}, nil).AnyTimes() + check.Args(arg, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead) + })) + s.Run("CountAuditLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().CountAuditLogs(gomock.Any(), database.CountAuditLogsParams{}).Return(int64(0), nil).AnyTimes() + dbm.EXPECT().CountAuthorizedAuditLogs(gomock.Any(), database.CountAuditLogsParams{}, gomock.Any()).Return(int64(0), nil).AnyTimes() check.Args(database.CountAuditLogsParams{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead).WithNotAuthorized("nil") })) - s.Run("CountAuthorizedAuditLogs", s.Subtest(func(db database.Store, check *expects) { - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) + s.Run("CountAuthorizedAuditLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().CountAuthorizedAuditLogs(gomock.Any(), database.CountAuditLogsParams{}, gomock.Any()).Return(int64(0), nil).AnyTimes() + dbm.EXPECT().CountAuditLogs(gomock.Any(), database.CountAuditLogsParams{}).Return(int64(0), nil).AnyTimes() check.Args(database.CountAuditLogsParams{}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead) })) - s.Run("DeleteOldAuditLogConnectionEvents", s.Subtest(func(db database.Store, check *expects) { - _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) + s.Run("DeleteOldAuditLogConnectionEvents", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().DeleteOldAuditLogConnectionEvents(gomock.Any(), database.DeleteOldAuditLogConnectionEventsParams{}).Return(nil).AnyTimes() check.Args(database.DeleteOldAuditLogConnectionEventsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete) })) } func (s *MethodTestSuite) TestConnectionLogs() { - createWorkspace := func(t *testing.T, db database.Store) database.WorkspaceTable { - u := dbgen.User(s.T(), db, database.User{}) - o := dbgen.Organization(s.T(), db, database.Organization{}) - tpl := dbgen.Template(s.T(), db, database.Template{ - OrganizationID: o.ID, - CreatedBy: u.ID, - }) - return dbgen.Workspace(s.T(), db, database.WorkspaceTable{ - ID: uuid.New(), - OwnerID: u.ID, - OrganizationID: o.ID, - AutomaticUpdates: database.AutomaticUpdatesNever, - TemplateID: tpl.ID, - }) - } - s.Run("UpsertConnectionLog", s.Subtest(func(db database.Store, check *expects) { - ws := createWorkspace(s.T(), db) - check.Args(database.UpsertConnectionLogParams{ - Ip: defaultIPAddress(), - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - ConnectionStatus: database.ConnectionStatusConnected, - WorkspaceOwnerID: ws.OwnerID, - }).Asserts(rbac.ResourceConnectionLog, policy.ActionUpdate) - })) - s.Run("GetConnectionLogsOffset", s.Subtest(func(db database.Store, check *expects) { - ws := createWorkspace(s.T(), db) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Ip: defaultIPAddress(), - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Ip: defaultIPAddress(), - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - check.Args(database.GetConnectionLogsOffsetParams{ - LimitOpt: 10, - }).Asserts(rbac.ResourceConnectionLog, policy.ActionRead).WithNotAuthorized("nil") - })) - s.Run("GetAuthorizedConnectionLogsOffset", s.Subtest(func(db database.Store, check *expects) { - ws := createWorkspace(s.T(), db) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Ip: defaultIPAddress(), - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Ip: defaultIPAddress(), - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - check.Args(database.GetConnectionLogsOffsetParams{ - LimitOpt: 10, - }, emptyPreparedAuthorized{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead) - })) - s.Run("CountConnectionLogs", s.Subtest(func(db database.Store, check *expects) { - ws := createWorkspace(s.T(), db) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - check.Args(database.CountConnectionLogsParams{}).Asserts( - rbac.ResourceConnectionLog, policy.ActionRead, - ).WithNotAuthorized("nil") - })) - s.Run("CountAuthorizedConnectionLogs", s.Subtest(func(db database.Store, check *expects) { - ws := createWorkspace(s.T(), db) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ - Type: database.ConnectionTypeSsh, - WorkspaceID: ws.ID, - OrganizationID: ws.OrganizationID, - WorkspaceOwnerID: ws.OwnerID, - }) - check.Args(database.CountConnectionLogsParams{}, emptyPreparedAuthorized{}).Asserts( - rbac.ResourceConnectionLog, policy.ActionRead, - ) + s.Run("UpsertConnectionLog", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + ws := testutil.Fake(s.T(), faker, database.WorkspaceTable{}) + arg := database.UpsertConnectionLogParams{Ip: defaultIPAddress(), Type: database.ConnectionTypeSsh, WorkspaceID: ws.ID, OrganizationID: ws.OrganizationID, ConnectionStatus: database.ConnectionStatusConnected, WorkspaceOwnerID: ws.OwnerID} + dbm.EXPECT().UpsertConnectionLog(gomock.Any(), arg).Return(database.ConnectionLog{}, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceConnectionLog, policy.ActionUpdate) + })) + s.Run("GetConnectionLogsOffset", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + arg := database.GetConnectionLogsOffsetParams{LimitOpt: 10} + dbm.EXPECT().GetConnectionLogsOffset(gomock.Any(), arg).Return([]database.GetConnectionLogsOffsetRow{}, nil).AnyTimes() + dbm.EXPECT().GetAuthorizedConnectionLogsOffset(gomock.Any(), arg, gomock.Any()).Return([]database.GetConnectionLogsOffsetRow{}, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceConnectionLog, policy.ActionRead).WithNotAuthorized("nil") + })) + s.Run("GetAuthorizedConnectionLogsOffset", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + arg := database.GetConnectionLogsOffsetParams{LimitOpt: 10} + dbm.EXPECT().GetAuthorizedConnectionLogsOffset(gomock.Any(), arg, gomock.Any()).Return([]database.GetConnectionLogsOffsetRow{}, nil).AnyTimes() + dbm.EXPECT().GetConnectionLogsOffset(gomock.Any(), arg).Return([]database.GetConnectionLogsOffsetRow{}, nil).AnyTimes() + check.Args(arg, emptyPreparedAuthorized{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead) + })) + s.Run("CountConnectionLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().CountConnectionLogs(gomock.Any(), database.CountConnectionLogsParams{}).Return(int64(0), nil).AnyTimes() + dbm.EXPECT().CountAuthorizedConnectionLogs(gomock.Any(), database.CountConnectionLogsParams{}, gomock.Any()).Return(int64(0), nil).AnyTimes() + check.Args(database.CountConnectionLogsParams{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead).WithNotAuthorized("nil") + })) + s.Run("CountAuthorizedConnectionLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().CountAuthorizedConnectionLogs(gomock.Any(), database.CountConnectionLogsParams{}, gomock.Any()).Return(int64(0), nil).AnyTimes() + dbm.EXPECT().CountConnectionLogs(gomock.Any(), database.CountConnectionLogsParams{}).Return(int64(0), nil).AnyTimes() + check.Args(database.CountConnectionLogsParams{}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead) })) } func (s *MethodTestSuite) TestFile() { - s.Run("GetFileByHashAndCreator", s.Subtest(func(db database.Store, check *expects) { - f := dbgen.File(s.T(), db, database.File{}) + s.Run("GetFileByHashAndCreator", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + f := testutil.Fake(s.T(), faker, database.File{}) + dbm.EXPECT().GetFileByHashAndCreator(gomock.Any(), gomock.Any()).Return(f, nil).AnyTimes() + // dbauthz may attempt to check template access on NotAuthorized; ensure mock handles it. + dbm.EXPECT().GetFileTemplates(gomock.Any(), f.ID).Return([]database.GetFileTemplatesRow{}, nil).AnyTimes() check.Args(database.GetFileByHashAndCreatorParams{ Hash: f.Hash, CreatedBy: f.CreatedBy, }).Asserts(f, policy.ActionRead).Returns(f) })) - s.Run("GetFileByID", s.Subtest(func(db database.Store, check *expects) { - f := dbgen.File(s.T(), db, database.File{}) + s.Run("GetFileByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + f := testutil.Fake(s.T(), faker, database.File{}) + dbm.EXPECT().GetFileByID(gomock.Any(), f.ID).Return(f, nil).AnyTimes() + dbm.EXPECT().GetFileTemplates(gomock.Any(), f.ID).Return([]database.GetFileTemplatesRow{}, nil).AnyTimes() check.Args(f.ID).Asserts(f, policy.ActionRead).Returns(f) })) - s.Run("GetFileIDByTemplateVersionID", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - u := dbgen.User(s.T(), db, database.User{}) - _ = dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{OrganizationID: o.ID, UserID: u.ID}) - f := dbgen.File(s.T(), db, database.File{CreatedBy: u.ID}) - j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{StorageMethod: database.ProvisionerStorageMethodFile, FileID: f.ID}) - tv := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{OrganizationID: o.ID, JobID: j.ID, CreatedBy: u.ID}) - check.Args(tv.ID).Asserts(rbac.ResourceFile.WithID(f.ID), policy.ActionRead).Returns(f.ID) + s.Run("GetFileIDByTemplateVersionID", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + tvID := uuid.New() + fileID := uuid.New() + dbm.EXPECT().GetFileIDByTemplateVersionID(gomock.Any(), tvID).Return(fileID, nil).AnyTimes() + check.Args(tvID).Asserts(rbac.ResourceFile.WithID(fileID), policy.ActionRead).Returns(fileID) })) - s.Run("InsertFile", s.Subtest(func(db database.Store, check *expects) { - u := dbgen.User(s.T(), db, database.User{}) + s.Run("InsertFile", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + u := testutil.Fake(s.T(), faker, database.User{}) + ret := testutil.Fake(s.T(), faker, database.File{CreatedBy: u.ID}) + dbm.EXPECT().InsertFile(gomock.Any(), gomock.Any()).Return(ret, nil).AnyTimes() check.Args(database.InsertFileParams{ CreatedBy: u.ID, }).Asserts(rbac.ResourceFile.WithOwner(u.ID.String()), policy.ActionCreate) @@ -477,158 +379,150 @@ func (s *MethodTestSuite) TestFile() { } func (s *MethodTestSuite) TestGroup() { - s.Run("DeleteGroupByID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) + s.Run("DeleteGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() + dbm.EXPECT().DeleteGroupByID(gomock.Any(), g.ID).Return(nil).AnyTimes() check.Args(g.ID).Asserts(g, policy.ActionDelete).Returns() })) - s.Run("DeleteGroupMemberFromGroup", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - u := dbgen.User(s.T(), db, database.User{}) - m := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{ - GroupID: g.ID, - UserID: u.ID, - }) - check.Args(database.DeleteGroupMemberFromGroupParams{ - UserID: m.UserID, - GroupID: g.ID, - }).Asserts(g, policy.ActionUpdate).Returns() + + s.Run("DeleteGroupMemberFromGroup", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + u := testutil.Fake(s.T(), faker, database.User{}) + m := testutil.Fake(s.T(), faker, database.GroupMember{GroupID: g.ID, UserID: u.ID}) + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() + dbm.EXPECT().DeleteGroupMemberFromGroup(gomock.Any(), database.DeleteGroupMemberFromGroupParams{UserID: m.UserID, GroupID: g.ID}).Return(nil).AnyTimes() + check.Args(database.DeleteGroupMemberFromGroupParams{UserID: m.UserID, GroupID: g.ID}).Asserts(g, policy.ActionUpdate).Returns() })) - s.Run("GetGroupByID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) + + s.Run("GetGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() check.Args(g.ID).Asserts(g, policy.ActionRead).Returns(g) })) - s.Run("GetGroupByOrgAndName", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - check.Args(database.GetGroupByOrgAndNameParams{ - OrganizationID: g.OrganizationID, - Name: g.Name, - }).Asserts(g, policy.ActionRead).Returns(g) + + s.Run("GetGroupByOrgAndName", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + dbm.EXPECT().GetGroupByOrgAndName(gomock.Any(), database.GetGroupByOrgAndNameParams{OrganizationID: g.OrganizationID, Name: g.Name}).Return(g, nil).AnyTimes() + check.Args(database.GetGroupByOrgAndNameParams{OrganizationID: g.OrganizationID, Name: g.Name}).Asserts(g, policy.ActionRead).Returns(g) })) - s.Run("GetGroupMembersByGroupID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - u := dbgen.User(s.T(), db, database.User{}) - gm := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID}) - check.Args(database.GetGroupMembersByGroupIDParams{ - GroupID: g.ID, - IncludeSystem: false, - }).Asserts(gm, policy.ActionRead) + + s.Run("GetGroupMembersByGroupID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + u := testutil.Fake(s.T(), faker, database.User{}) + gm := testutil.Fake(s.T(), faker, database.GroupMember{GroupID: g.ID, UserID: u.ID}) + arg := database.GetGroupMembersByGroupIDParams{GroupID: g.ID, IncludeSystem: false} + dbm.EXPECT().GetGroupMembersByGroupID(gomock.Any(), arg).Return([]database.GroupMember{gm}, nil).AnyTimes() + check.Args(arg).Asserts(gm, policy.ActionRead) })) - s.Run("GetGroupMembersCountByGroupID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - check.Args(database.GetGroupMembersCountByGroupIDParams{ - GroupID: g.ID, - IncludeSystem: false, - }).Asserts(g, policy.ActionRead) + + s.Run("GetGroupMembersCountByGroupID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + arg := database.GetGroupMembersCountByGroupIDParams{GroupID: g.ID, IncludeSystem: false} + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() + dbm.EXPECT().GetGroupMembersCountByGroupID(gomock.Any(), arg).Return(int64(0), nil).AnyTimes() + check.Args(arg).Asserts(g, policy.ActionRead) })) - s.Run("GetGroupMembers", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - u := dbgen.User(s.T(), db, database.User{}) - dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID}) + + s.Run("GetGroupMembers", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) { + dbm.EXPECT().GetGroupMembers(gomock.Any(), false).Return([]database.GroupMember{}, nil).AnyTimes() check.Args(false).Asserts(rbac.ResourceSystem, policy.ActionRead) })) - s.Run("System/GetGroups", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - _ = dbgen.Group(s.T(), db, database.Group{}) - check.Args(database.GetGroupsParams{}). - Asserts(rbac.ResourceSystem, policy.ActionRead) + + s.Run("System/GetGroups", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + g := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + row := database.GetGroupsRow{Group: g, OrganizationName: o.Name, OrganizationDisplayName: o.DisplayName} + dbm.EXPECT().GetGroups(gomock.Any(), database.GetGroupsParams{}).Return([]database.GetGroupsRow{row}, nil).AnyTimes() + check.Args(database.GetGroupsParams{}).Asserts(rbac.ResourceSystem, policy.ActionRead) })) - s.Run("GetGroups", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - g := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - u := dbgen.User(s.T(), db, database.User{}) - gm := dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g.ID, UserID: u.ID}) - check.Args(database.GetGroupsParams{ - OrganizationID: g.OrganizationID, - HasMemberID: gm.UserID, - }).Asserts(rbac.ResourceSystem, policy.ActionRead, g, policy.ActionRead). - // Fail the system resource skip - FailSystemObjectChecks() + + s.Run("GetGroups", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + g := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + u := testutil.Fake(s.T(), faker, database.User{}) + gm := testutil.Fake(s.T(), faker, database.GroupMember{GroupID: g.ID, UserID: u.ID}) + params := database.GetGroupsParams{OrganizationID: g.OrganizationID, HasMemberID: gm.UserID} + row := database.GetGroupsRow{Group: g, OrganizationName: o.Name, OrganizationDisplayName: o.DisplayName} + dbm.EXPECT().GetGroups(gomock.Any(), params).Return([]database.GetGroupsRow{row}, nil).AnyTimes() + check.Args(params).Asserts(rbac.ResourceSystem, policy.ActionRead, g, policy.ActionRead).FailSystemObjectChecks() })) - s.Run("InsertAllUsersGroup", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) + + s.Run("InsertAllUsersGroup", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + ret := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + dbm.EXPECT().InsertAllUsersGroup(gomock.Any(), o.ID).Return(ret, nil).AnyTimes() check.Args(o.ID).Asserts(rbac.ResourceGroup.InOrg(o.ID), policy.ActionCreate) })) - s.Run("InsertGroup", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - check.Args(database.InsertGroupParams{ - OrganizationID: o.ID, - Name: "test", - }).Asserts(rbac.ResourceGroup.InOrg(o.ID), policy.ActionCreate) + + s.Run("InsertGroup", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + arg := database.InsertGroupParams{OrganizationID: o.ID, Name: "test"} + ret := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID, Name: arg.Name}) + dbm.EXPECT().InsertGroup(gomock.Any(), arg).Return(ret, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceGroup.InOrg(o.ID), policy.ActionCreate) })) - s.Run("InsertGroupMember", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - check.Args(database.InsertGroupMemberParams{ - UserID: uuid.New(), - GroupID: g.ID, - }).Asserts(g, policy.ActionUpdate).Returns() + + s.Run("InsertGroupMember", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + arg := database.InsertGroupMemberParams{UserID: uuid.New(), GroupID: g.ID} + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() + dbm.EXPECT().InsertGroupMember(gomock.Any(), arg).Return(nil).AnyTimes() + check.Args(arg).Asserts(g, policy.ActionUpdate).Returns() })) - s.Run("InsertUserGroupsByName", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - u1 := dbgen.User(s.T(), db, database.User{}) - g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - check.Args(database.InsertUserGroupsByNameParams{ - OrganizationID: o.ID, - UserID: u1.ID, - GroupNames: slice.New(g1.Name, g2.Name), - }).Asserts(rbac.ResourceGroup.InOrg(o.ID), policy.ActionUpdate).Returns() + + s.Run("InsertUserGroupsByName", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + u1 := testutil.Fake(s.T(), faker, database.User{}) + g1 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + g2 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + arg := database.InsertUserGroupsByNameParams{OrganizationID: o.ID, UserID: u1.ID, GroupNames: slice.New(g1.Name, g2.Name)} + dbm.EXPECT().InsertUserGroupsByName(gomock.Any(), arg).Return(nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceGroup.InOrg(o.ID), policy.ActionUpdate).Returns() })) - s.Run("InsertUserGroupsByID", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - u1 := dbgen.User(s.T(), db, database.User{}) - g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - g3 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - _ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g1.ID, UserID: u1.ID}) + + s.Run("InsertUserGroupsByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + u1 := testutil.Fake(s.T(), faker, database.User{}) + g1 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + g2 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + g3 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) returns := slice.New(g2.ID, g3.ID) - if !dbtestutil.WillUsePostgres() { - returns = slice.New(g1.ID, g2.ID, g3.ID) - } - check.Args(database.InsertUserGroupsByIDParams{ - UserID: u1.ID, - GroupIds: slice.New(g1.ID, g2.ID, g3.ID), - }).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(returns) + arg := database.InsertUserGroupsByIDParams{UserID: u1.ID, GroupIds: slice.New(g1.ID, g2.ID, g3.ID)} + dbm.EXPECT().InsertUserGroupsByID(gomock.Any(), arg).Return(returns, nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(returns) })) - s.Run("RemoveUserFromAllGroups", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - u1 := dbgen.User(s.T(), db, database.User{}) - g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - _ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g1.ID, UserID: u1.ID}) - _ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g2.ID, UserID: u1.ID}) + + s.Run("RemoveUserFromAllGroups", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + u1 := testutil.Fake(s.T(), faker, database.User{}) + dbm.EXPECT().RemoveUserFromAllGroups(gomock.Any(), u1.ID).Return(nil).AnyTimes() check.Args(u1.ID).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns() })) - s.Run("RemoveUserFromGroups", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - u1 := dbgen.User(s.T(), db, database.User{}) - g1 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - g2 := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - _ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g1.ID, UserID: u1.ID}) - _ = dbgen.GroupMember(s.T(), db, database.GroupMemberTable{GroupID: g2.ID, UserID: u1.ID}) - check.Args(database.RemoveUserFromGroupsParams{ - UserID: u1.ID, - GroupIds: []uuid.UUID{g1.ID, g2.ID}, - }).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(slice.New(g1.ID, g2.ID)) - })) - s.Run("UpdateGroupByID", s.Subtest(func(db database.Store, check *expects) { - dbtestutil.DisableForeignKeysAndTriggers(s.T(), db) - g := dbgen.Group(s.T(), db, database.Group{}) - check.Args(database.UpdateGroupByIDParams{ - ID: g.ID, - }).Asserts(g, policy.ActionUpdate) - })) - s.Run("ValidateGroupIDs", s.Subtest(func(db database.Store, check *expects) { - o := dbgen.Organization(s.T(), db, database.Organization{}) - g := dbgen.Group(s.T(), db, database.Group{OrganizationID: o.ID}) - check.Args([]uuid.UUID{g.ID}).Asserts(rbac.ResourceSystem, policy.ActionRead) + + s.Run("RemoveUserFromGroups", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + u1 := testutil.Fake(s.T(), faker, database.User{}) + g1 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + g2 := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + arg := database.RemoveUserFromGroupsParams{UserID: u1.ID, GroupIds: []uuid.UUID{g1.ID, g2.ID}} + dbm.EXPECT().RemoveUserFromGroups(gomock.Any(), arg).Return(slice.New(g1.ID, g2.ID), nil).AnyTimes() + check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(slice.New(g1.ID, g2.ID)) + })) + + s.Run("UpdateGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + g := testutil.Fake(s.T(), faker, database.Group{}) + arg := database.UpdateGroupByIDParams{ID: g.ID} + dbm.EXPECT().GetGroupByID(gomock.Any(), g.ID).Return(g, nil).AnyTimes() + dbm.EXPECT().UpdateGroupByID(gomock.Any(), arg).Return(g, nil).AnyTimes() + check.Args(arg).Asserts(g, policy.ActionUpdate) + })) + + s.Run("ValidateGroupIDs", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) { + o := testutil.Fake(s.T(), faker, database.Organization{}) + g := testutil.Fake(s.T(), faker, database.Group{OrganizationID: o.ID}) + ids := []uuid.UUID{g.ID} + dbm.EXPECT().ValidateGroupIDs(gomock.Any(), ids).Return(database.ValidateGroupIDsRow{}, nil).AnyTimes() + check.Args(ids).Asserts(rbac.ResourceSystem, policy.ActionRead) })) } diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go index 11e02d0f651e9..56927507b6109 100644 --- a/coderd/database/dbgen/dbgen.go +++ b/coderd/database/dbgen/dbgen.go @@ -1436,11 +1436,25 @@ func UserSecret(t testing.TB, db database.Store, seed database.UserSecret) datab return userSecret } -func ClaimPrebuild(t testing.TB, db database.Store, newUserID uuid.UUID, newName string, presetID uuid.UUID) database.ClaimPrebuiltWorkspaceRow { +func ClaimPrebuild( + t testing.TB, + db database.Store, + now time.Time, + newUserID uuid.UUID, + newName string, + presetID uuid.UUID, + autostartSchedule sql.NullString, + nextStartAt sql.NullTime, + ttl sql.NullInt64, +) database.ClaimPrebuiltWorkspaceRow { claimedWorkspace, err := db.ClaimPrebuiltWorkspace(genCtx, database.ClaimPrebuiltWorkspaceParams{ - NewUserID: newUserID, - NewName: newName, - PresetID: presetID, + NewUserID: newUserID, + NewName: newName, + Now: now, + PresetID: presetID, + AutostartSchedule: autostartSchedule, + NextStartAt: nextStartAt, + WorkspaceTtl: ttl, }) require.NoError(t, err, "claim prebuilt workspace") diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index c039b7f94e8d5..58874cb7ed8c8 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7122,7 +7122,20 @@ const claimPrebuiltWorkspace = `-- name: ClaimPrebuiltWorkspace :one UPDATE workspaces w SET owner_id = $1::uuid, name = $2::text, - updated_at = NOW() + updated_at = $3::timestamptz, + -- Update autostart_schedule, next_start_at and ttl according to template and workspace-level + -- configurations, allowing the workspace to be managed by the lifecycle executor as expected. + autostart_schedule = $4, + next_start_at = $5, + ttl = $6, + -- Update last_used_at during claim to ensure the claimed workspace is treated as recently used. + -- This avoids unintended dormancy caused by prebuilds having stale usage timestamps. + last_used_at = $3::timestamptz, + -- Clear dormant and deletion timestamps as a safeguard to ensure a clean lifecycle state after claim. + -- These fields should not be set on prebuilds, but we defensively reset them here to prevent + -- accidental dormancy or deletion by the lifecycle executor. + dormant_at = NULL, + deleting_at = NULL WHERE w.id IN ( SELECT p.id FROM workspace_prebuilds p @@ -7133,7 +7146,7 @@ WHERE w.id IN ( -- The prebuilds system should never try to claim a prebuild for an inactive template version. -- Nevertheless, this filter is here as a defensive measure: AND b.template_version_id = t.active_version_id - AND p.current_preset_id = $3::uuid + AND p.current_preset_id = $7::uuid AND p.ready AND NOT t.deleted LIMIT 1 FOR UPDATE OF p SKIP LOCKED -- Ensure that a concurrent request will not select the same prebuild. @@ -7142,9 +7155,13 @@ RETURNING w.id, w.name ` type ClaimPrebuiltWorkspaceParams struct { - NewUserID uuid.UUID `db:"new_user_id" json:"new_user_id"` - NewName string `db:"new_name" json:"new_name"` - PresetID uuid.UUID `db:"preset_id" json:"preset_id"` + NewUserID uuid.UUID `db:"new_user_id" json:"new_user_id"` + NewName string `db:"new_name" json:"new_name"` + Now time.Time `db:"now" json:"now"` + AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"` + NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"` + WorkspaceTtl sql.NullInt64 `db:"workspace_ttl" json:"workspace_ttl"` + PresetID uuid.UUID `db:"preset_id" json:"preset_id"` } type ClaimPrebuiltWorkspaceRow struct { @@ -7153,7 +7170,15 @@ type ClaimPrebuiltWorkspaceRow struct { } func (q *sqlQuerier) ClaimPrebuiltWorkspace(ctx context.Context, arg ClaimPrebuiltWorkspaceParams) (ClaimPrebuiltWorkspaceRow, error) { - row := q.db.QueryRowContext(ctx, claimPrebuiltWorkspace, arg.NewUserID, arg.NewName, arg.PresetID) + row := q.db.QueryRowContext(ctx, claimPrebuiltWorkspace, + arg.NewUserID, + arg.NewName, + arg.Now, + arg.AutostartSchedule, + arg.NextStartAt, + arg.WorkspaceTtl, + arg.PresetID, + ) var i ClaimPrebuiltWorkspaceRow err := row.Scan(&i.ID, &i.Name) return i, err @@ -19180,7 +19205,15 @@ SET deadline = $1::timestamptz, max_deadline = $2::timestamptz, updated_at = $3::timestamptz -WHERE id = $4::uuid +FROM + workspaces +WHERE + workspace_builds.id = $4::uuid + AND workspace_builds.workspace_id = workspaces.id + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- deadline and max_deadline + AND workspaces.owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID ` type UpdateWorkspaceBuildDeadlineByIDParams struct { @@ -21135,6 +21168,10 @@ SET next_start_at = $3 WHERE id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- autostart_schedule and next_start_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID ` type UpdateWorkspaceAutostartParams struct { @@ -21191,6 +21228,10 @@ FROM WHERE workspaces.id = $1 AND templates.id = workspaces.template_id + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- dormant_at and deleting_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID RETURNING workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl ` @@ -21252,6 +21293,10 @@ SET next_start_at = $2 WHERE id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- next_start_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID ` type UpdateWorkspaceNextStartAtParams struct { @@ -21271,6 +21316,10 @@ SET ttl = $2 WHERE id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- ttl + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID ` type UpdateWorkspaceTTLParams struct { @@ -21349,11 +21398,11 @@ func (q *sqlQuerier) UpdateWorkspacesDormantDeletingAtByTemplateID(ctx context.C const updateWorkspacesTTLByTemplateID = `-- name: UpdateWorkspacesTTLByTemplateID :exec UPDATE - workspaces + workspaces SET - ttl = $2 + ttl = $2 WHERE - template_id = $1 + template_id = $1 ` type UpdateWorkspacesTTLByTemplateIDParams struct { diff --git a/coderd/database/queries/prebuilds.sql b/coderd/database/queries/prebuilds.sql index 37bff9487928e..87a713974c563 100644 --- a/coderd/database/queries/prebuilds.sql +++ b/coderd/database/queries/prebuilds.sql @@ -2,7 +2,20 @@ UPDATE workspaces w SET owner_id = @new_user_id::uuid, name = @new_name::text, - updated_at = NOW() + updated_at = @now::timestamptz, + -- Update autostart_schedule, next_start_at and ttl according to template and workspace-level + -- configurations, allowing the workspace to be managed by the lifecycle executor as expected. + autostart_schedule = @autostart_schedule, + next_start_at = @next_start_at, + ttl = @workspace_ttl, + -- Update last_used_at during claim to ensure the claimed workspace is treated as recently used. + -- This avoids unintended dormancy caused by prebuilds having stale usage timestamps. + last_used_at = @now::timestamptz, + -- Clear dormant and deletion timestamps as a safeguard to ensure a clean lifecycle state after claim. + -- These fields should not be set on prebuilds, but we defensively reset them here to prevent + -- accidental dormancy or deletion by the lifecycle executor. + dormant_at = NULL, + deleting_at = NULL WHERE w.id IN ( SELECT p.id FROM workspace_prebuilds p diff --git a/coderd/database/queries/workspacebuilds.sql b/coderd/database/queries/workspacebuilds.sql index be7bec5fa08f2..40bf0f18cf8c5 100644 --- a/coderd/database/queries/workspacebuilds.sql +++ b/coderd/database/queries/workspacebuilds.sql @@ -127,7 +127,15 @@ SET deadline = @deadline::timestamptz, max_deadline = @max_deadline::timestamptz, updated_at = @updated_at::timestamptz -WHERE id = @id::uuid; +FROM + workspaces +WHERE + workspace_builds.id = @id::uuid + AND workspace_builds.workspace_id = workspaces.id + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- deadline and max_deadline + AND workspaces.owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID; -- name: UpdateWorkspaceBuildProvisionerStateByID :exec UPDATE diff --git a/coderd/database/queries/workspaces.sql b/coderd/database/queries/workspaces.sql index b6b4f2de0888f..8b9a9e3076555 100644 --- a/coderd/database/queries/workspaces.sql +++ b/coderd/database/queries/workspaces.sql @@ -518,7 +518,11 @@ SET autostart_schedule = $2, next_start_at = $3 WHERE - id = $1; + id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- autostart_schedule and next_start_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID; -- name: UpdateWorkspaceNextStartAt :exec UPDATE @@ -526,7 +530,11 @@ UPDATE SET next_start_at = $2 WHERE - id = $1; + id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- next_start_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID; -- name: BatchUpdateWorkspaceNextStartAt :exec UPDATE @@ -550,15 +558,19 @@ UPDATE SET ttl = $2 WHERE - id = $1; + id = $1 + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- ttl + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID; -- name: UpdateWorkspacesTTLByTemplateID :exec UPDATE - workspaces + workspaces SET - ttl = $2 + ttl = $2 WHERE - template_id = $1; + template_id = $1; -- name: UpdateWorkspaceLastUsedAt :exec UPDATE @@ -791,6 +803,10 @@ FROM WHERE workspaces.id = $1 AND templates.id = workspaces.template_id + -- Prebuilt workspaces (identified by having the prebuilds system user as owner_id) + -- are managed by the reconciliation loop, not the lifecycle executor which handles + -- dormant_at and deleting_at + AND owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID RETURNING workspaces.*; diff --git a/coderd/prebuilds/api.go b/coderd/prebuilds/api.go index 3092d27421d26..1bedeb10130c8 100644 --- a/coderd/prebuilds/api.go +++ b/coderd/prebuilds/api.go @@ -2,6 +2,8 @@ package prebuilds import ( "context" + "database/sql" + "time" "github.com/google/uuid" "golang.org/x/xerrors" @@ -54,6 +56,15 @@ type StateSnapshotter interface { } type Claimer interface { - Claim(ctx context.Context, userID uuid.UUID, name string, presetID uuid.UUID) (*uuid.UUID, error) + Claim( + ctx context.Context, + now time.Time, + userID uuid.UUID, + name string, + presetID uuid.UUID, + autostartSchedule sql.NullString, + nextStartAt sql.NullTime, + ttl sql.NullInt64, + ) (*uuid.UUID, error) Initiator() uuid.UUID } diff --git a/coderd/prebuilds/noop.go b/coderd/prebuilds/noop.go index 3c2dd78a804db..ebb6d6964214e 100644 --- a/coderd/prebuilds/noop.go +++ b/coderd/prebuilds/noop.go @@ -2,6 +2,8 @@ package prebuilds import ( "context" + "database/sql" + "time" "github.com/google/uuid" @@ -28,7 +30,7 @@ var DefaultReconciler ReconciliationOrchestrator = NoopReconciler{} type NoopClaimer struct{} -func (NoopClaimer) Claim(context.Context, uuid.UUID, string, uuid.UUID) (*uuid.UUID, error) { +func (NoopClaimer) Claim(context.Context, time.Time, uuid.UUID, string, uuid.UUID, sql.NullString, sql.NullTime, sql.NullInt64) (*uuid.UUID, error) { // Not entitled to claim prebuilds in AGPL version. return nil, ErrAGPLDoesNotSupportPrebuiltWorkspaces } diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go index 1ff6e0f2bb306..83ca7669370ec 100644 --- a/coderd/provisionerdserver/provisionerdserver.go +++ b/coderd/provisionerdserver/provisionerdserver.go @@ -1183,11 +1183,18 @@ func (s *server) FailJob(ctx context.Context, failJob *proto.FailedJob) (*proto. if err != nil { return xerrors.Errorf("update workspace build state: %w", err) } + + deadline := build.Deadline + maxDeadline := build.MaxDeadline + if workspace.IsPrebuild() { + deadline = time.Time{} + maxDeadline = time.Time{} + } err = db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{ ID: input.WorkspaceBuildID, UpdatedAt: s.timeNow(), - Deadline: build.Deadline, - MaxDeadline: build.MaxDeadline, + Deadline: deadline, + MaxDeadline: maxDeadline, }) if err != nil { return xerrors.Errorf("update workspace build deadline: %w", err) @@ -1860,38 +1867,47 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro return getWorkspaceError } - templateScheduleStore := *s.TemplateScheduleStore.Load() + // Prebuilt workspaces must not have Deadline or MaxDeadline set, + // as they are managed by the prebuild reconciliation loop, not the lifecycle executor + deadline := time.Time{} + maxDeadline := time.Time{} - autoStop, err := schedule.CalculateAutostop(ctx, schedule.CalculateAutostopParams{ - Database: db, - TemplateScheduleStore: templateScheduleStore, - UserQuietHoursScheduleStore: *s.UserQuietHoursScheduleStore.Load(), - // `now` is used below to set the build completion time. - WorkspaceBuildCompletedAt: now, - Workspace: workspace.WorkspaceTable(), - // Allowed to be the empty string. - WorkspaceAutostart: workspace.AutostartSchedule.String, - }) - if err != nil { - return xerrors.Errorf("calculate auto stop: %w", err) - } + if !workspace.IsPrebuild() { + templateScheduleStore := *s.TemplateScheduleStore.Load() - if workspace.AutostartSchedule.Valid { - templateScheduleOptions, err := templateScheduleStore.Get(ctx, db, workspace.TemplateID) + autoStop, err := schedule.CalculateAutostop(ctx, schedule.CalculateAutostopParams{ + Database: db, + TemplateScheduleStore: templateScheduleStore, + UserQuietHoursScheduleStore: *s.UserQuietHoursScheduleStore.Load(), + // `now` is used below to set the build completion time. + WorkspaceBuildCompletedAt: now, + Workspace: workspace.WorkspaceTable(), + // Allowed to be the empty string. + WorkspaceAutostart: workspace.AutostartSchedule.String, + }) if err != nil { - return xerrors.Errorf("get template schedule options: %w", err) + return xerrors.Errorf("calculate auto stop: %w", err) } - nextStartAt, err := schedule.NextAllowedAutostart(now, workspace.AutostartSchedule.String, templateScheduleOptions) - if err == nil { - err = db.UpdateWorkspaceNextStartAt(ctx, database.UpdateWorkspaceNextStartAtParams{ - ID: workspace.ID, - NextStartAt: sql.NullTime{Valid: true, Time: nextStartAt.UTC()}, - }) + if workspace.AutostartSchedule.Valid { + templateScheduleOptions, err := templateScheduleStore.Get(ctx, db, workspace.TemplateID) if err != nil { - return xerrors.Errorf("update workspace next start at: %w", err) + return xerrors.Errorf("get template schedule options: %w", err) + } + + nextStartAt, err := schedule.NextAllowedAutostart(now, workspace.AutostartSchedule.String, templateScheduleOptions) + if err == nil { + err = db.UpdateWorkspaceNextStartAt(ctx, database.UpdateWorkspaceNextStartAtParams{ + ID: workspace.ID, + NextStartAt: sql.NullTime{Valid: true, Time: nextStartAt.UTC()}, + }) + if err != nil { + return xerrors.Errorf("update workspace next start at: %w", err) + } } } + deadline = autoStop.Deadline + maxDeadline = autoStop.MaxDeadline } err = db.UpdateProvisionerJobWithCompleteByID(ctx, database.UpdateProvisionerJobWithCompleteByIDParams{ @@ -1917,8 +1933,8 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro } err = db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{ ID: workspaceBuild.ID, - Deadline: autoStop.Deadline, - MaxDeadline: autoStop.MaxDeadline, + Deadline: deadline, + MaxDeadline: maxDeadline, UpdatedAt: now, }) if err != nil { diff --git a/coderd/workspaces.go b/coderd/workspaces.go index 6da85c7608ca4..ac5c2d92d628e 100644 --- a/coderd/workspaces.go +++ b/coderd/workspaces.go @@ -635,10 +635,17 @@ func createWorkspace( claimedWorkspace *database.Workspace ) + // Use injected Clock to allow time mocking in tests + now := api.Clock.Now() + // If a template preset was chosen, try claim a prebuilt workspace. if req.TemplateVersionPresetID != uuid.Nil { // Try and claim an eligible prebuild, if available. - claimedWorkspace, err = claimPrebuild(ctx, prebuildsClaimer, db, api.Logger, req, owner) + // On successful claim, initialize all lifecycle fields from template and workspace-level config + // so the newly claimed workspace is properly managed by the lifecycle executor. + claimedWorkspace, err = claimPrebuild( + ctx, prebuildsClaimer, db, api.Logger, now, req, owner, + dbAutostartSchedule, nextStartAt, dbTTL) // If claiming fails with an expected error (no claimable prebuilds or AGPL does not support prebuilds), // we fall back to creating a new workspace. Otherwise, propagate the unexpected error. if err != nil { @@ -666,7 +673,6 @@ func createWorkspace( // No prebuild found; regular flow. if claimedWorkspace == nil { - now := dbtime.Now() // Workspaces are created without any versions. minimumWorkspace, err := db.InsertWorkspace(ctx, database.InsertWorkspaceParams{ ID: uuid.New(), @@ -681,7 +687,7 @@ func createWorkspace( Ttl: dbTTL, // The workspaces page will sort by last used at, and it's useful to // have the newly created workspace at the top of the list! - LastUsedAt: dbtime.Now(), + LastUsedAt: now, AutomaticUpdates: dbAU, }) if err != nil { @@ -872,8 +878,19 @@ func requestTemplate(ctx context.Context, rw http.ResponseWriter, req codersdk.C return template, true } -func claimPrebuild(ctx context.Context, claimer prebuilds.Claimer, db database.Store, logger slog.Logger, req codersdk.CreateWorkspaceRequest, owner workspaceOwner) (*database.Workspace, error) { - claimedID, err := claimer.Claim(ctx, owner.ID, req.Name, req.TemplateVersionPresetID) +func claimPrebuild( + ctx context.Context, + claimer prebuilds.Claimer, + db database.Store, + logger slog.Logger, + now time.Time, + req codersdk.CreateWorkspaceRequest, + owner workspaceOwner, + autostartSchedule sql.NullString, + nextStartAt sql.NullTime, + ttl sql.NullInt64, +) (*database.Workspace, error) { + claimedID, err := claimer.Claim(ctx, now, owner.ID, req.Name, req.TemplateVersionPresetID, autostartSchedule, nextStartAt, ttl) if err != nil { // TODO: enhance this by clarifying whether this *specific* prebuild failed or whether there are none to claim. return nil, xerrors.Errorf("claim prebuild: %w", err) diff --git a/enterprise/coderd/prebuilds/claim.go b/enterprise/coderd/prebuilds/claim.go index b6a85ae1fc094..daea281d38d60 100644 --- a/enterprise/coderd/prebuilds/claim.go +++ b/enterprise/coderd/prebuilds/claim.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "time" "github.com/google/uuid" "golang.org/x/xerrors" @@ -24,14 +25,22 @@ func NewEnterpriseClaimer(store database.Store) *EnterpriseClaimer { func (c EnterpriseClaimer) Claim( ctx context.Context, + now time.Time, userID uuid.UUID, name string, presetID uuid.UUID, + autostartSchedule sql.NullString, + nextStartAt sql.NullTime, + ttl sql.NullInt64, ) (*uuid.UUID, error) { result, err := c.store.ClaimPrebuiltWorkspace(ctx, database.ClaimPrebuiltWorkspaceParams{ - NewUserID: userID, - NewName: name, - PresetID: presetID, + NewUserID: userID, + NewName: name, + Now: now, + PresetID: presetID, + AutostartSchedule: autostartSchedule, + NextStartAt: nextStartAt, + WorkspaceTtl: ttl, }) if err != nil { switch { diff --git a/enterprise/coderd/prebuilds/claim_test.go b/enterprise/coderd/prebuilds/claim_test.go index 01195e3485016..9ed7e9ffd19e0 100644 --- a/enterprise/coderd/prebuilds/claim_test.go +++ b/enterprise/coderd/prebuilds/claim_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/xerrors" + "github.com/coder/coder/v2/coderd/database/dbtime" + "github.com/coder/coder/v2/coderd/files" "github.com/coder/quartz" @@ -132,7 +134,9 @@ func TestClaimPrebuild(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - // Setup. + // Setup + clock := quartz.NewMock(t) + clock.Set(dbtime.Now()) ctx := testutil.Context(t, testutil.WaitSuperLong) db, pubsub := dbtestutil.NewDB(t) @@ -144,6 +148,7 @@ func TestClaimPrebuild(t *testing.T) { Options: &coderdtest.Options{ Database: spy, Pubsub: pubsub, + Clock: clock, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{ @@ -238,6 +243,7 @@ func TestClaimPrebuild(t *testing.T) { // When: a user creates a new workspace with a preset for which prebuilds are configured. workspaceName := strings.ReplaceAll(testutil.GetRandomName(t), "_", "-") params := database.ClaimPrebuiltWorkspaceParams{ + Now: clock.Now(), NewUserID: user.ID, NewName: workspaceName, PresetID: presets[0].ID, diff --git a/enterprise/coderd/schedule/template.go b/enterprise/coderd/schedule/template.go index 313268f2e39ad..203de46db4168 100644 --- a/enterprise/coderd/schedule/template.go +++ b/enterprise/coderd/schedule/template.go @@ -205,7 +205,6 @@ func (s *EnterpriseTemplateScheduleStore) Set(ctx context.Context, db database.S if opts.DefaultTTL != 0 { ttl = sql.NullInt64{Valid: true, Int64: int64(opts.DefaultTTL)} } - if err = tx.UpdateWorkspacesTTLByTemplateID(ctx, database.UpdateWorkspacesTTLByTemplateIDParams{ TemplateID: template.ID, Ttl: ttl, diff --git a/enterprise/coderd/workspaces_test.go b/enterprise/coderd/workspaces_test.go index f8fcddb005e19..a260de9506e82 100644 --- a/enterprise/coderd/workspaces_test.go +++ b/enterprise/coderd/workspaces_test.go @@ -1722,7 +1722,7 @@ func TestTemplateDoesNotAllowUserAutostop(t *testing.T) { }) } -func TestExecutorPrebuilds(t *testing.T) { +func TestPrebuildsAutobuild(t *testing.T) { t.Parallel() if !dbtestutil.WillUsePostgres() { @@ -1800,14 +1800,21 @@ func TestExecutorPrebuilds(t *testing.T) { username string, version codersdk.TemplateVersion, presetID uuid.UUID, + autostartSchedule ...string, ) codersdk.Workspace { t.Helper() + var startSchedule string + if len(autostartSchedule) > 0 { + startSchedule = autostartSchedule[0] + } + workspaceName := strings.ReplaceAll(testutil.GetRandomName(t), "_", "-") userWorkspace, err := userClient.CreateUserWorkspace(ctx, username, codersdk.CreateWorkspaceRequest{ TemplateVersionID: version.ID, Name: workspaceName, TemplateVersionPresetID: presetID, + AutostartSchedule: ptr.Ref(startSchedule), }) require.NoError(t, err) build := coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, userWorkspace.LatestBuild.ID) @@ -1820,7 +1827,7 @@ func TestExecutorPrebuilds(t *testing.T) { // Prebuilt workspaces should not be autostopped based on the default TTL. // This test ensures that DefaultTTLMillis is ignored while the workspace is in a prebuild state. - // Once the workspace is claimed, the default autostop timer should take effect. + // Once the workspace is claimed, the default TTL should take effect. t.Run("DefaultTTLOnlyTriggersAfterClaim", func(t *testing.T) { t.Parallel() @@ -1875,9 +1882,9 @@ func TestExecutorPrebuilds(t *testing.T) { userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember()) version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(prebuildInstances)) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + // Set a template level TTL to trigger the autostop + // Template level TTL can only be set if autostop is disabled for users coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { - // Set a template level TTL to trigger the autostop - // Template level TTL can only be set if autostop is disabled for users ctr.AllowUserAutostop = ptr.Ref[bool](false) ctr.DefaultTTLMillis = ptr.Ref[int64](ttlTime.Milliseconds()) }) @@ -1890,43 +1897,48 @@ func TestExecutorPrebuilds(t *testing.T) { runningPrebuilds := getRunningPrebuilds(t, ctx, db, int(prebuildInstances)) require.Len(t, runningPrebuilds, int(prebuildInstances)) - // Given: a running prebuilt workspace with a deadline, ready to be claimed + // Given: a running prebuilt workspace, ready to be claimed prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) - require.NotZero(t, prebuild.LatestBuild.Deadline) - - // When: the autobuild executor ticks *after* the deadline - next := prebuild.LatestBuild.Deadline.Time.Add(time.Minute) - clock.Set(next) + // Prebuilt workspaces should have an empty Deadline and MaxDeadline + // which is equivalent to 0001-01-01 00:00:00 +0000 + require.Zero(t, prebuild.LatestBuild.Deadline) + require.Zero(t, prebuild.LatestBuild.MaxDeadline) + + // When: the autobuild executor ticks *after* the TTL time (10:00 AM UTC) + next := clock.Now().Add(ttlTime).Add(time.Minute) + clock.Set(next) // 10:01 AM UTC go func() { tickCh <- next }() // Then: the prebuilt workspace should remain in a start transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID) require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) + require.Zero(t, prebuild.LatestBuild.Deadline) + require.Zero(t, prebuild.LatestBuild.MaxDeadline) // Given: a user claims the prebuilt workspace sometime later - clock.Set(clock.Now().Add(ttlTime)) + clock.Set(clock.Now().Add(1 * time.Hour)) // 11:01 AM UTC workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID) require.Equal(t, prebuild.ID, workspace.ID) - // Workspace deadline must be ttlTime from the time it is claimed + // Workspace deadline must be ttlTime from the time it is claimed (1:01 PM UTC) require.True(t, workspace.LatestBuild.Deadline.Time.Equal(clock.Now().Add(ttlTime))) - // When: the autobuild executor ticks *after* the deadline + // When: the autobuild executor ticks *after* the TTL time (1:01 PM UTC) next = workspace.LatestBuild.Deadline.Time.Add(time.Minute) - clock.Set(next) + clock.Set(next) // 1:02 PM UTC go func() { tickCh <- next close(tickCh) }() // Then: the workspace should be stopped - workspaceStats := <-statsCh + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, workspaceStats.Errors, 0) require.Len(t, workspaceStats.Transitions, 1) require.Contains(t, workspaceStats.Transitions, workspace.ID) @@ -1941,158 +1953,125 @@ func TestExecutorPrebuilds(t *testing.T) { t.Run("AutostopScheduleOnlyTriggersAfterClaim", func(t *testing.T) { t.Parallel() - cases := []struct { - name string - isClaimedBeforeDeadline bool - }{ - // If the prebuild is claimed before the scheduled deadline, - // the claimed workspace should inherit and respect that same deadline. - { - name: "ClaimedBeforeDeadline_UsesSameDeadline", - isClaimedBeforeDeadline: true, + // Set the clock to Monday, January 1st, 2024 at 8:00 AM UTC to keep the test deterministic + clock := quartz.NewMock(t) + clock.Set(time.Date(2024, 1, 1, 8, 0, 0, 0, time.UTC)) + + // Setup + ctx := testutil.Context(t, testutil.WaitSuperLong) + db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) + logger := testutil.Logger(t) + tickCh := make(chan time.Time) + statsCh := make(chan autobuild.Stats) + notificationsNoop := notifications.NewNoopEnqueuer() + client, _, api, owner := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ + Options: &coderdtest.Options{ + Database: db, + Pubsub: pb, + AutobuildTicker: tickCh, + IncludeProvisionerDaemon: true, + AutobuildStats: statsCh, + Clock: clock, + TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore( + agplUserQuietHoursScheduleStore(), + notificationsNoop, + logger, + clock, + ), }, - // If the prebuild is claimed after the scheduled deadline, - // the workspace should not stop immediately, but instead respect the next - // valid scheduled deadline (the next day). - { - name: "ClaimedAfterDeadline_SchedulesForNextDay", - isClaimedBeforeDeadline: false, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{codersdk.FeatureAdvancedTemplateScheduling: 1}, }, - } + }) - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - // Set the clock to Monday, January 1st, 2024 at 8:00 AM UTC to keep the test deterministic - clock := quartz.NewMock(t) - clock.Set(time.Date(2024, 1, 1, 8, 0, 0, 0, time.UTC)) - - // Setup - ctx := testutil.Context(t, testutil.WaitSuperLong) - db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) - logger := testutil.Logger(t) - tickCh := make(chan time.Time) - statsCh := make(chan autobuild.Stats) - notificationsNoop := notifications.NewNoopEnqueuer() - client, _, api, owner := coderdenttest.NewWithAPI(t, &coderdenttest.Options{ - Options: &coderdtest.Options{ - Database: db, - Pubsub: pb, - AutobuildTicker: tickCh, - IncludeProvisionerDaemon: true, - AutobuildStats: statsCh, - Clock: clock, - TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore( - agplUserQuietHoursScheduleStore(), - notificationsNoop, - logger, - clock, - ), - }, - LicenseOptions: &coderdenttest.LicenseOptions{ - Features: license.Features{codersdk.FeatureAdvancedTemplateScheduling: 1}, - }, - }) + // Setup Prebuild reconciler + cache := files.New(prometheus.NewRegistry(), &coderdtest.FakeAuthorizer{}) + reconciler := prebuilds.NewStoreReconciler( + db, pb, cache, + codersdk.PrebuildsConfig{}, + logger, + clock, + prometheus.NewRegistry(), + notificationsNoop, + api.AGPL.BuildUsageChecker, + ) + var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db) + api.AGPL.PrebuildsClaimer.Store(&claimer) - // Setup Prebuild reconciler - cache := files.New(prometheus.NewRegistry(), &coderdtest.FakeAuthorizer{}) - reconciler := prebuilds.NewStoreReconciler( - db, pb, cache, - codersdk.PrebuildsConfig{}, - logger, - clock, - prometheus.NewRegistry(), - notificationsNoop, - api.AGPL.BuildUsageChecker, - ) - var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db) - api.AGPL.PrebuildsClaimer.Store(&claimer) - - // Setup user, template and template version with a preset with 1 prebuild instance - prebuildInstances := int32(1) - userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember()) - version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(prebuildInstances)) - coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) - coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { - // Set a template level Autostop schedule to trigger the autostop daily - ctr.AutostopRequirement = ptr.Ref[codersdk.TemplateAutostopRequirement]( - codersdk.TemplateAutostopRequirement{ - DaysOfWeek: []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}, - Weeks: 1, - }) + // Setup user, template and template version with a preset with 1 prebuild instance + prebuildInstances := int32(1) + userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember()) + version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(prebuildInstances)) + coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + // Set a template level Autostop schedule to trigger the autostop daily + coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { + ctr.AutostopRequirement = ptr.Ref[codersdk.TemplateAutostopRequirement]( + codersdk.TemplateAutostopRequirement{ + DaysOfWeek: []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}, + Weeks: 1, }) - presets, err := client.TemplateVersionPresets(ctx, version.ID) - require.NoError(t, err) - require.Len(t, presets, 1) - - // Given: Reconciliation loop runs and starts prebuilt workspace - runReconciliationLoop(t, ctx, db, reconciler, presets) - runningPrebuilds := getRunningPrebuilds(t, ctx, db, int(prebuildInstances)) - require.Len(t, runningPrebuilds, int(prebuildInstances)) - - // Given: a running prebuilt workspace with a deadline, ready to be claimed - prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID) - require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) - require.NotZero(t, prebuild.LatestBuild.Deadline) - - next := clock.Now() - if tc.isClaimedBeforeDeadline { - // When: the autobuild executor ticks *before* the deadline: - next = next.Add(time.Minute) - } else { - // When: the autobuild executor ticks *after* the deadline: - next = next.Add(24 * time.Hour) - } + }) + presets, err := client.TemplateVersionPresets(ctx, version.ID) + require.NoError(t, err) + require.Len(t, presets, 1) - clock.Set(next) - go func() { - tickCh <- next - }() - - // Then: the prebuilt workspace should remain in a start transition - prebuildStats := <-statsCh - require.Len(t, prebuildStats.Errors, 0) - require.Len(t, prebuildStats.Transitions, 0) - require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) - prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID) - require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) - - // Given: a user claims the prebuilt workspace - workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID) - require.Equal(t, prebuild.ID, workspace.ID) - - if tc.isClaimedBeforeDeadline { - // Then: the claimed workspace should inherit and respect that same deadline. - require.True(t, workspace.LatestBuild.Deadline.Time.Equal(prebuild.LatestBuild.Deadline.Time)) - } else { - // Then: the claimed workspace should respect the next valid scheduled deadline (next day). - require.True(t, workspace.LatestBuild.Deadline.Time.Equal(clock.Now().Truncate(24*time.Hour).Add(24*time.Hour))) - } + // Given: Reconciliation loop runs and starts prebuilt workspace + runReconciliationLoop(t, ctx, db, reconciler, presets) + runningPrebuilds := getRunningPrebuilds(t, ctx, db, int(prebuildInstances)) + require.Len(t, runningPrebuilds, int(prebuildInstances)) - // When: the autobuild executor ticks *after* the deadline: - next = workspace.LatestBuild.Deadline.Time.Add(time.Minute) - clock.Set(next) - go func() { - tickCh <- next - close(tickCh) - }() - - // Then: the workspace should be stopped - workspaceStats := <-statsCh - require.Len(t, workspaceStats.Errors, 0) - require.Len(t, workspaceStats.Transitions, 1) - require.Contains(t, workspaceStats.Transitions, workspace.ID) - require.Equal(t, database.WorkspaceTransitionStop, workspaceStats.Transitions[workspace.ID]) - workspace = coderdtest.MustWorkspace(t, client, workspace.ID) - require.Equal(t, codersdk.BuildReasonAutostop, workspace.LatestBuild.Reason) - }) - } + // Given: a running prebuilt workspace, ready to be claimed + prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID) + require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) + // Prebuilt workspaces should have an empty Deadline and MaxDeadline + // which is equivalent to 0001-01-01 00:00:00 +0000 + require.Zero(t, prebuild.LatestBuild.Deadline) + require.Zero(t, prebuild.LatestBuild.MaxDeadline) + + // When: the autobuild executor ticks *after* the deadline (2024-01-02 0:00 UTC) + next := clock.Now().Truncate(24 * time.Hour).Add(24 * time.Hour).Add(time.Minute) + clock.Set(next) // 2024-01-02 0:01 UTC + go func() { + tickCh <- next + }() + + // Then: the prebuilt workspace should remain in a start transition + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) + require.Len(t, prebuildStats.Errors, 0) + require.Len(t, prebuildStats.Transitions, 0) + require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) + prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID) + require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) + require.Zero(t, prebuild.LatestBuild.Deadline) + require.Zero(t, prebuild.LatestBuild.MaxDeadline) + + // Given: a user claims the prebuilt workspace + workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID) + require.Equal(t, prebuild.ID, workspace.ID) + // Then: the claimed workspace should respect the next valid scheduled deadline (2024-01-03 0:00 UTC) + require.True(t, workspace.LatestBuild.Deadline.Time.Equal(clock.Now().Truncate(24*time.Hour).Add(24*time.Hour))) + + // When: the autobuild executor ticks *after* the deadline (2024-01-03 0:00 UTC) + next = workspace.LatestBuild.Deadline.Time.Add(time.Minute) + clock.Set(next) // 2024-01-03 0:01 UTC + go func() { + tickCh <- next + close(tickCh) + }() + + // Then: the workspace should be stopped + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) + require.Len(t, workspaceStats.Errors, 0) + require.Len(t, workspaceStats.Transitions, 1) + require.Contains(t, workspaceStats.Transitions, workspace.ID) + require.Equal(t, database.WorkspaceTransitionStop, workspaceStats.Transitions[workspace.ID]) + workspace = coderdtest.MustWorkspace(t, client, workspace.ID) + require.Equal(t, codersdk.BuildReasonAutostop, workspace.LatestBuild.Reason) }) // Prebuild workspaces should not follow the autostart schedule. // This test verifies that AutostartRequirement (autostart schedule) is ignored while the workspace is a prebuild. + // After being claimed, the workspace should be started according to the autostart schedule. t.Run("AutostartScheduleOnlyTriggersAfterClaim", func(t *testing.T) { t.Parallel() @@ -2146,8 +2125,11 @@ func TestExecutorPrebuilds(t *testing.T) { userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember()) version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(prebuildInstances)) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + // Template-level autostart config only defines allowed days for workspaces to autostart + // The actual autostart schedule is set at the workspace level + sched, err := cron.Weekly("CRON_TZ=UTC 0 0 * * *") + require.NoError(t, err) coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { - // Set a template level Autostart schedule to trigger the autostart daily ctr.AllowUserAutostart = ptr.Ref[bool](true) ctr.AutostartRequirement = &codersdk.TemplateAutostartRequirement{DaysOfWeek: codersdk.AllDaysOfWeek} }) @@ -2160,14 +2142,11 @@ func TestExecutorPrebuilds(t *testing.T) { runningPrebuilds := getRunningPrebuilds(t, ctx, db, int(prebuildInstances)) require.Len(t, runningPrebuilds, int(prebuildInstances)) - // Given: prebuilt workspace has autostart schedule daily at midnight + // Given: a running prebuilt workspace prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID) - sched, err := cron.Weekly("CRON_TZ=UTC 0 0 * * *") - require.NoError(t, err) - err = client.UpdateWorkspaceAutostart(ctx, prebuild.ID, codersdk.UpdateWorkspaceAutostartRequest{ - Schedule: ptr.Ref(sched.String()), - }) - require.NoError(t, err) + // Prebuilt workspaces should have an empty Autostart Schedule + require.Nil(t, prebuild.AutostartSchedule) + require.Nil(t, prebuild.NextStartAt) // Given: prebuilt workspace is stopped prebuild = coderdtest.MustTransitionWorkspace(t, client, prebuild.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) @@ -2181,32 +2160,32 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the prebuilt workspace should remain in a stop transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStop, prebuild.LatestBuild.Transition) prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID) require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) + require.Nil(t, prebuild.AutostartSchedule) + require.Nil(t, prebuild.NextStartAt) // Given: a prebuilt workspace that is running and ready to be claimed prebuild = coderdtest.MustTransitionWorkspace(t, client, prebuild.ID, codersdk.WorkspaceTransitionStop, codersdk.WorkspaceTransitionStart) coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, prebuild.LatestBuild.ID) - // Make sure the workspace's agent is again ready getRunningPrebuilds(t, ctx, db, int(prebuildInstances)) - // Given: a user claims the prebuilt workspace - workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID) + // Given: a user claims the prebuilt workspace with an Autostart schedule request + workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID, sched.String()) require.Equal(t, prebuild.ID, workspace.ID) + // Then: newly claimed workspace's AutostartSchedule and NextStartAt should be set + require.NotNil(t, workspace.AutostartSchedule) require.NotNil(t, workspace.NextStartAt) // Given: workspace is stopped workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) - // Then: the claimed workspace should inherit and respect that same NextStartAt - require.True(t, workspace.NextStartAt.Equal(*prebuild.NextStartAt)) - // Tick at the next scheduled time after the prebuild’s LatestBuild.CreatedAt, // since the next allowed autostart is calculated starting from that point. // When: the autobuild executor ticks after the scheduled time @@ -2215,17 +2194,19 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the workspace should have a NextStartAt equal to the next autostart schedule - workspaceStats := <-statsCh + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, workspaceStats.Errors, 0) require.Len(t, workspaceStats.Transitions, 1) workspace = coderdtest.MustWorkspace(t, client, workspace.ID) + require.NotNil(t, workspace.AutostartSchedule) require.NotNil(t, workspace.NextStartAt) require.Equal(t, sched.Next(clock.Now()), workspace.NextStartAt.UTC()) }) - // Prebuild workspaces should not transition to dormant when the inactive TTL is reached. - // This test verifies that TimeTilDormantMillis is ignored while the workspace is a prebuild. - // After being claimed, the workspace should become dormant according to the configured inactivity period. + // Prebuild workspaces should not transition to dormant or be deleted due to inactivity. + // This test verifies that both TimeTilDormantMillis and TimeTilDormantAutoDeleteMillis + // are ignored while the workspace is a prebuild. After the workspace is claimed, + // it should respect these inactivity thresholds accordingly. t.Run("DormantOnlyAfterClaimed", func(t *testing.T) { t.Parallel() @@ -2276,13 +2257,15 @@ func TestExecutorPrebuilds(t *testing.T) { // Setup user, template and template version with a preset with 1 prebuild instance prebuildInstances := int32(1) - inactiveTTL := 2 * time.Hour + dormantTTL := 2 * time.Hour + deletionTTL := 2 * time.Hour userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember()) version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(prebuildInstances)) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + // Set a template level dormant TTL to trigger dormancy coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { - // Set a template level inactive TTL to trigger dormancy - ctr.TimeTilDormantMillis = ptr.Ref[int64](inactiveTTL.Milliseconds()) + ctr.TimeTilDormantMillis = ptr.Ref[int64](dormantTTL.Milliseconds()) + ctr.TimeTilDormantAutoDeleteMillis = ptr.Ref[int64](deletionTTL.Milliseconds()) }) presets, err := client.TemplateVersionPresets(ctx, version.ID) require.NoError(t, err) @@ -2296,41 +2279,68 @@ func TestExecutorPrebuilds(t *testing.T) { // Given: a running prebuilt workspace, ready to be claimed prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) + require.Nil(t, prebuild.DormantAt) + require.Nil(t, prebuild.DeletingAt) - // When: the autobuild executor ticks *after* the inactive TTL + // When: the autobuild executor ticks *after* the dormant TTL (10:00 AM UTC) + next := clock.Now().Add(dormantTTL).Add(time.Minute) + clock.Set(next) // 10:01 AM UTC go func() { - tickCh <- prebuild.LastUsedAt.Add(inactiveTTL).Add(time.Minute) + tickCh <- next }() // Then: the prebuilt workspace should remain in a start transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID) require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason) + require.Nil(t, prebuild.DormantAt) + require.Nil(t, prebuild.DeletingAt) // Given: a user claims the prebuilt workspace sometime later - clock.Set(clock.Now().Add(inactiveTTL)) + clock.Set(clock.Now().Add(1 * time.Hour)) // 11:01 AM UTC workspace := claimPrebuild(t, ctx, client, userClient, user.Username, version, presets[0].ID) require.Equal(t, prebuild.ID, workspace.ID) - require.Nil(t, prebuild.DormantAt) + // Then: the claimed workspace should have DormantAt and DeletingAt unset (nil), + // and LastUsedAt updated + require.Nil(t, workspace.DormantAt) + require.Nil(t, workspace.DeletingAt) + require.True(t, workspace.LastUsedAt.After(prebuild.LastUsedAt)) - // When: the autobuild executor ticks *after* the inactive TTL + // When: the autobuild executor ticks *after* the dormant TTL (1:01 PM UTC) + next = clock.Now().Add(dormantTTL).Add(time.Minute) + clock.Set(next) // 1:02 PM UTC go func() { - tickCh <- prebuild.LastUsedAt.Add(inactiveTTL).Add(time.Minute) - close(tickCh) + tickCh <- next }() - // Then: the workspace should transition to stopped state for breaching failure TTL - workspaceStats := <-statsCh + // Then: the workspace should transition to stopped state for breaching dormant TTL + workspaceStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, workspaceStats.Errors, 0) require.Len(t, workspaceStats.Transitions, 1) require.Contains(t, workspaceStats.Transitions, workspace.ID) require.Equal(t, database.WorkspaceTransitionStop, workspaceStats.Transitions[workspace.ID]) workspace = coderdtest.MustWorkspace(t, client, workspace.ID) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) + workspace = coderdtest.MustWorkspace(t, client, workspace.ID) require.Equal(t, codersdk.BuildReasonDormancy, workspace.LatestBuild.Reason) + require.Equal(t, codersdk.WorkspaceStatusStopped, workspace.LatestBuild.Status) require.NotNil(t, workspace.DormantAt) + require.NotNil(t, workspace.DeletingAt) + + // When: the autobuild executor ticks *after* the deletion TTL + go func() { + tickCh <- workspace.DeletingAt.Add(time.Minute) + }() + + // Then: the workspace should be deleted + dormantWorkspaceStats := testutil.RequireReceive(ctx, t, statsCh) + require.Len(t, dormantWorkspaceStats.Errors, 0) + require.Len(t, dormantWorkspaceStats.Transitions, 1) + require.Contains(t, dormantWorkspaceStats.Transitions, workspace.ID) + require.Equal(t, database.WorkspaceTransitionDelete, dormantWorkspaceStats.Transitions[workspace.ID]) }) // Prebuild workspaces should not be deleted when the failure TTL is reached. @@ -2390,8 +2400,8 @@ func TestExecutorPrebuilds(t *testing.T) { failureTTL := 2 * time.Hour version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithFailedResponseAndPresetsWithPrebuilds(prebuildInstances)) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + // Set a template level Failure TTL to trigger workspace deletion template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(ctr *codersdk.CreateTemplateRequest) { - // Set a template level Failure TTL to trigger workspace deletion ctr.FailureTTLMillis = ptr.Ref[int64](failureTTL.Milliseconds()) }) presets, err := client.TemplateVersionPresets(ctx, version.ID) @@ -2400,7 +2410,6 @@ func TestExecutorPrebuilds(t *testing.T) { // Given: reconciliation loop runs and starts prebuilt workspace in failed state runReconciliationLoop(t, ctx, db, reconciler, presets) - var failedWorkspaceBuilds []database.GetFailedWorkspaceBuildsByTemplateIDRow require.Eventually(t, func() bool { rows, err := db.GetFailedWorkspaceBuildsByTemplateID(ctx, database.GetFailedWorkspaceBuildsByTemplateIDParams{ @@ -2427,7 +2436,7 @@ func TestExecutorPrebuilds(t *testing.T) { }() // Then: the prebuilt workspace should remain in a start transition - prebuildStats := <-statsCh + prebuildStats := testutil.RequireReceive(ctx, t, statsCh) require.Len(t, prebuildStats.Errors, 0) require.Len(t, prebuildStats.Transitions, 0) require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) @@ -2437,50 +2446,46 @@ func TestExecutorPrebuilds(t *testing.T) { } func templateWithAgentAndPresetsWithPrebuilds(desiredInstances int32) *echo.Responses { - return &echo.Responses{ - Parse: echo.ParseComplete, - ProvisionPlan: []*proto.Response{ - { - Type: &proto.Response_Plan{ - Plan: &proto.PlanComplete{ - Presets: []*proto.Preset{ - { - Name: "preset-test", - Parameters: []*proto.PresetParameter{ - { - Name: "k1", - Value: "v1", - }, - }, - Prebuild: &proto.Prebuild{ - Instances: desiredInstances, - }, - }, - }, - }, + agent := &proto.Agent{ + Name: "smith", + OperatingSystem: "linux", + Architecture: "i386", + } + + resource := func(withAgent bool) *proto.Resource { + r := &proto.Resource{Type: "compute", Name: "main"} + if withAgent { + r.Agents = []*proto.Agent{agent} + } + return r + } + + applyResponse := func(withAgent bool) *proto.Response { + return &proto.Response{ + Type: &proto.Response_Apply{ + Apply: &proto.ApplyComplete{ + Resources: []*proto.Resource{resource(withAgent)}, }, }, - }, - ProvisionApply: []*proto.Response{ - { - Type: &proto.Response_Apply{ - Apply: &proto.ApplyComplete{ - Resources: []*proto.Resource{ - { - Type: "compute", - Name: "main", - Agents: []*proto.Agent{ - { - Name: "smith", - OperatingSystem: "linux", - Architecture: "i386", - }, - }, - }, - }, - }, + } + } + + return &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionPlan: []*proto.Response{{ + Type: &proto.Response_Plan{ + Plan: &proto.PlanComplete{ + Presets: []*proto.Preset{{ + Name: "preset-test", + Parameters: []*proto.PresetParameter{{Name: "k1", Value: "v1"}}, + Prebuild: &proto.Prebuild{Instances: desiredInstances}, + }}, }, }, + }}, + ProvisionApplyMap: map[proto.WorkspaceTransition][]*proto.Response{ + proto.WorkspaceTransition_START: {applyResponse(true)}, + proto.WorkspaceTransition_STOP: {applyResponse(false)}, }, } } diff --git a/scripts/develop.sh b/scripts/develop.sh index 5a802735c7c66..23efe67576813 100755 --- a/scripts/develop.sh +++ b/scripts/develop.sh @@ -72,9 +72,25 @@ if [ -n "${CODER_AGENT_URL:-}" ]; then fi # Preflight checks: ensure we have our required dependencies, and make sure nothing is listening on port 3000 or 8080 -dependencies curl git go make pnpm -curl --fail http://127.0.0.1:3000 >/dev/null 2>&1 && echo '== ERROR: something is listening on port 3000. Kill it and re-run this script.' && exit 1 -curl --fail http://127.0.0.1:8080 >/dev/null 2>&1 && echo '== ERROR: something is listening on port 8080. Kill it and re-run this script.' && exit 1 +dependencies curl git go jq make pnpm + +if curl --silent --fail http://127.0.0.1:3000; then + # Check if this is the Coder development server. + if curl --silent --fail http://127.0.0.1:3000/api/v2/buildinfo 2>&1 | jq -r '.version' >/dev/null 2>&1; then + echo '== INFO: Coder development server is already running on port 3000!' && exit 0 + else + echo '== ERROR: something is listening on port 3000. Kill it and re-run this script.' && exit 1 + fi +fi + +if curl --fail http://127.0.0.1:8080 >/dev/null 2>&1; then + # Check if this is the Coder development frontend. + if curl --silent --fail http://127.0.0.1:8080/api/v2/buildinfo 2>&1 | jq -r '.version' >/dev/null 2>&1; then + echo '== INFO: Coder development frontend is already running on port 8080!' && exit 0 + else + echo '== ERROR: something is listening on port 8080. Kill it and re-run this script.' && exit 1 + fi +fi # Compile the CLI binary. This should also compile the frontend and refresh # node_modules if necessary. diff --git a/site/src/theme/icons.json b/site/src/theme/icons.json index 5dee3442e8fe6..79a76b4c8918f 100644 --- a/site/src/theme/icons.json +++ b/site/src/theme/icons.json @@ -89,6 +89,7 @@ "personalize.svg", "php.svg", "phpstorm.svg", + "postgres.svg", "projector.svg", "pycharm.svg", "python.svg", diff --git a/site/static/icon/postgres.svg b/site/static/icon/postgres.svg new file mode 100644 index 0000000000000..ce8789a76a307 --- /dev/null +++ b/site/static/icon/postgres.svg @@ -0,0 +1 @@ +