diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 03394c67b317b..3566f77982c1c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -451,16 +451,21 @@ jobs: # Postgres tends not to choke. NUM_PARALLEL_PACKAGES=8 NUM_PARALLEL_TESTS=16 + # Only the CLI and Agent are officially supported on Windows and the rest are too flaky + PACKAGES="./cli/... ./enterprise/cli/... ./agent/..." elif [ "${{ runner.os }}" == "macOS" ]; then # Our macOS runners have 8 cores. We set NUM_PARALLEL_TESTS to 16 # because the tests complete faster and Postgres doesn't choke. It seems # that macOS's tmpfs is faster than the one on Windows. NUM_PARALLEL_PACKAGES=8 NUM_PARALLEL_TESTS=16 + # Only the CLI and Agent are officially supported on macOS and the rest are too flaky + PACKAGES="./cli/... ./enterprise/cli/... ./agent/..." elif [ "${{ runner.os }}" == "Linux" ]; then # Our Linux runners have 8 cores. NUM_PARALLEL_PACKAGES=8 NUM_PARALLEL_TESTS=8 + PACKAGES="./..." fi # by default, run tests with cache @@ -477,10 +482,7 @@ jobs: # invalidated. See scripts/normalize_path.sh for more details. normalize_path_with_symlinks "$RUNNER_TEMP/sym" "$(dirname $(which terraform))" - # We rerun failing tests to counteract flakiness coming from Postgres - # choking on macOS and Windows sometimes. - gotestsum --rerun-fails=2 --rerun-fails-max-failures=50 \ - --format standard-quiet --packages "./..." \ + gotestsum --format standard-quiet --packages "$PACKAGES" \ -- -timeout=20m -v -p $NUM_PARALLEL_PACKAGES -parallel=$NUM_PARALLEL_TESTS $TESTCOUNT - name: Upload Go Build Cache @@ -550,7 +552,6 @@ jobs: env: POSTGRES_VERSION: "17" TS_DEBUG_DISCO: "true" - TEST_RETRIES: 2 run: | make test-postgres @@ -604,7 +605,7 @@ jobs: POSTGRES_VERSION: "17" run: | make test-postgres-docker - gotestsum --junitfile="gotests.xml" --packages="./..." --rerun-fails=2 --rerun-fails-abort-on-data-race -- -race -parallel 4 -p 4 + gotestsum --junitfile="gotests.xml" --packages="./..." -- -race -parallel 4 -p 4 - name: Upload Test Cache uses: ./.github/actions/test-cache/upload @@ -726,7 +727,6 @@ jobs: if: ${{ !matrix.variant.premium }} env: DEBUG: pw:api - CODER_E2E_TEST_RETRIES: 2 working-directory: site # Run all of the tests with a premium license @@ -736,7 +736,6 @@ jobs: DEBUG: pw:api CODER_E2E_LICENSE: ${{ secrets.CODER_E2E_LICENSE }} CODER_E2E_REQUIRE_PREMIUM_TESTS: "1" - CODER_E2E_TEST_RETRIES: 2 working-directory: site - name: Upload Playwright Failed Tests @@ -1406,7 +1405,7 @@ jobs: uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 - name: Set up Flux CLI - uses: fluxcd/flux2/action@bda4c8187e436462be0d072e728b67afa215c593 # v2.6.3 + uses: fluxcd/flux2/action@6bf37f6a560fd84982d67f853162e4b3c2235edb # v2.6.4 with: # Keep this and the github action up to date with the version of flux installed in dogfood cluster version: "2.5.1" diff --git a/.github/workflows/docs-ci.yaml b/.github/workflows/docs-ci.yaml index f65ab434a9309..39954783f1ba8 100644 --- a/.github/workflows/docs-ci.yaml +++ b/.github/workflows/docs-ci.yaml @@ -28,7 +28,7 @@ jobs: - name: Setup Node uses: ./.github/actions/setup-node - - uses: tj-actions/changed-files@cf79a64fed8a943fb1073260883d08fe0dfb4e56 # v45.0.7 + - uses: tj-actions/changed-files@055970845dd036d7345da7399b7e89f2e10f2b04 # v45.0.7 id: changed-files with: files: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5a1faa9bd1528..1fc379ffbb2b6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -634,6 +634,29 @@ jobs: - name: ls build run: ls -lh build + - name: Publish Coder CLI binaries and detached signatures to GCS + if: ${{ !inputs.dry_run && github.ref == 'refs/heads/main' && github.repository_owner == 'coder'}} + run: | + set -euxo pipefail + + version="$(./scripts/version.sh)" + + binaries=( + "coder-darwin-amd64" + "coder-darwin-arm64" + "coder-linux-amd64" + "coder-linux-arm64" + "coder-linux-armv7" + "coder-windows-amd64.exe" + "coder-windows-arm64.exe" + ) + + for binary in "${binaries[@]}"; do + detached_signature="${binary}.asc" + gcloud storage cp "./site/out/bin/${binary}" "gs://releases.coder.com/coder-cli/${version}/${binary}" + gcloud storage cp "./site/out/bin/${detached_signature}" "gs://releases.coder.com/coder-cli/${version}/${detached_signature}" + done + - name: Publish release run: | set -euo pipefail diff --git a/.github/workflows/start-workspace.yaml b/.github/workflows/start-workspace.yaml index 975acd7e1d939..9c1106a040a0e 100644 --- a/.github/workflows/start-workspace.yaml +++ b/.github/workflows/start-workspace.yaml @@ -19,7 +19,7 @@ jobs: timeout-minutes: 5 steps: - name: Start Coder workspace - uses: coder/start-workspace-action@35a4608cefc7e8cc56573cae7c3b85304575cb72 + uses: coder/start-workspace-action@f97a681b4cc7985c9eef9963750c7cc6ebc93a19 with: github-token: ${{ secrets.GITHUB_TOKEN }} github-username: >- diff --git a/agent/agentcontainers/api.go b/agent/agentcontainers/api.go index d749bf88a522e..dc92a4d38d9a2 100644 --- a/agent/agentcontainers/api.go +++ b/agent/agentcontainers/api.go @@ -2,8 +2,10 @@ package agentcontainers import ( "context" + "encoding/json" "errors" "fmt" + "maps" "net/http" "os" "path" @@ -30,6 +32,7 @@ import ( "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/provisioner" "github.com/coder/quartz" + "github.com/coder/websocket" ) const ( @@ -74,6 +77,7 @@ type API struct { mu sync.RWMutex // Protects the following fields. initDone chan struct{} // Closed by Init. + updateChans []chan struct{} closed bool containers codersdk.WorkspaceAgentListContainersResponse // Output from the last list operation. containersErr error // Error from the last list operation. @@ -535,6 +539,7 @@ func (api *API) Routes() http.Handler { r.Use(ensureInitDoneMW) r.Get("/", api.handleList) + r.Get("/watch", api.watchContainers) // TODO(mafredri): Simplify this route as the previous /devcontainers // /-route was dropped. We can drop the /devcontainers prefix here too. r.Route("/devcontainers/{devcontainer}", func(r chi.Router) { @@ -544,6 +549,88 @@ func (api *API) Routes() http.Handler { return r } +func (api *API) broadcastUpdatesLocked() { + // Broadcast state changes to WebSocket listeners. + for _, ch := range api.updateChans { + select { + case ch <- struct{}{}: + default: + } + } +} + +func (api *API) watchContainers(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + conn, err := websocket.Accept(rw, r, nil) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Failed to upgrade connection to websocket.", + Detail: err.Error(), + }) + return + } + + // Here we close the websocket for reading, so that the websocket library will handle pings and + // close frames. + _ = conn.CloseRead(context.Background()) + + ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText) + defer wsNetConn.Close() + + go httpapi.Heartbeat(ctx, conn) + + updateCh := make(chan struct{}, 1) + + api.mu.Lock() + api.updateChans = append(api.updateChans, updateCh) + api.mu.Unlock() + + defer func() { + api.mu.Lock() + api.updateChans = slices.DeleteFunc(api.updateChans, func(ch chan struct{}) bool { + return ch == updateCh + }) + close(updateCh) + api.mu.Unlock() + }() + + encoder := json.NewEncoder(wsNetConn) + + ct, err := api.getContainers() + if err != nil { + api.logger.Error(ctx, "unable to get containers", slog.Error(err)) + return + } + + if err := encoder.Encode(ct); err != nil { + api.logger.Error(ctx, "encode container list", slog.Error(err)) + return + } + + for { + select { + case <-api.ctx.Done(): + return + + case <-ctx.Done(): + return + + case <-updateCh: + ct, err := api.getContainers() + if err != nil { + api.logger.Error(ctx, "unable to get containers", slog.Error(err)) + continue + } + + if err := encoder.Encode(ct); err != nil { + api.logger.Error(ctx, "encode container list", slog.Error(err)) + return + } + } + } +} + // handleList handles the HTTP request to list containers. func (api *API) handleList(rw http.ResponseWriter, r *http.Request) { ct, err := api.getContainers() @@ -583,8 +670,26 @@ func (api *API) updateContainers(ctx context.Context) error { api.mu.Lock() defer api.mu.Unlock() + var previouslyKnownDevcontainers map[string]codersdk.WorkspaceAgentDevcontainer + if len(api.updateChans) > 0 { + previouslyKnownDevcontainers = maps.Clone(api.knownDevcontainers) + } + api.processUpdatedContainersLocked(ctx, updated) + if len(api.updateChans) > 0 { + statesAreEqual := maps.EqualFunc( + previouslyKnownDevcontainers, + api.knownDevcontainers, + func(dc1, dc2 codersdk.WorkspaceAgentDevcontainer) bool { + return dc1.Equals(dc2) + }) + + if !statesAreEqual { + api.broadcastUpdatesLocked() + } + } + api.logger.Debug(ctx, "containers updated successfully", slog.F("container_count", len(api.containers.Containers)), slog.F("warning_count", len(api.containers.Warnings)), slog.F("devcontainer_count", len(api.knownDevcontainers))) return nil @@ -955,6 +1060,8 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques dc.Container = nil dc.Error = "" api.knownDevcontainers[dc.WorkspaceFolder] = dc + api.broadcastUpdatesLocked() + go func() { _ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath, WithRemoveExistingContainer()) }() @@ -1070,6 +1177,7 @@ func (api *API) CreateDevcontainer(workspaceFolder, configPath string, opts ...D dc.Error = "" api.recreateSuccessTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "successTimes") api.knownDevcontainers[dc.WorkspaceFolder] = dc + api.broadcastUpdatesLocked() api.mu.Unlock() // Ensure an immediate refresh to accurately reflect the diff --git a/agent/agentcontainers/api_test.go b/agent/agentcontainers/api_test.go index 37ce66e2c150b..9451461bb3215 100644 --- a/agent/agentcontainers/api_test.go +++ b/agent/agentcontainers/api_test.go @@ -36,6 +36,7 @@ import ( "github.com/coder/coder/v2/pty" "github.com/coder/coder/v2/testutil" "github.com/coder/quartz" + "github.com/coder/websocket" ) // fakeContainerCLI implements the agentcontainers.ContainerCLI interface for @@ -441,6 +442,178 @@ func TestAPI(t *testing.T) { logbuf.Reset() }) + t.Run("Watch", func(t *testing.T) { + t.Parallel() + + fakeContainer1 := fakeContainer(t, func(c *codersdk.WorkspaceAgentContainer) { + c.ID = "container1" + c.FriendlyName = "devcontainer1" + c.Image = "busybox:latest" + c.Labels = map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project1", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project1/.devcontainer/devcontainer.json", + } + }) + + fakeContainer2 := fakeContainer(t, func(c *codersdk.WorkspaceAgentContainer) { + c.ID = "container2" + c.FriendlyName = "devcontainer2" + c.Image = "ubuntu:latest" + c.Labels = map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project2", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project2/.devcontainer/devcontainer.json", + } + }) + + stages := []struct { + containers []codersdk.WorkspaceAgentContainer + expected codersdk.WorkspaceAgentListContainersResponse + }{ + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, + }, + }, + }, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, + }, + }, + }, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "stopped", + Container: nil, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, + }, + }, + }, + } + + var ( + ctx = testutil.Context(t, testutil.WaitShort) + mClock = quartz.NewMock(t) + updaterTickerTrap = mClock.Trap().TickerFunc("updaterLoop") + mCtrl = gomock.NewController(t) + mLister = acmock.NewMockContainerCLI(mCtrl) + logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + ) + + // Set up initial state for immediate send on connection + mLister.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stages[0].containers}, nil) + mLister.EXPECT().DetectArchitecture(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + api := agentcontainers.NewAPI(logger, + agentcontainers.WithClock(mClock), + agentcontainers.WithContainerCLI(mLister), + agentcontainers.WithWatcher(watcher.NewNoop()), + ) + api.Start() + defer api.Close() + + srv := httptest.NewServer(api.Routes()) + defer srv.Close() + + updaterTickerTrap.MustWait(ctx).MustRelease(ctx) + defer updaterTickerTrap.Close() + + client, res, err := websocket.Dial(ctx, srv.URL+"/watch", nil) + require.NoError(t, err) + if res != nil && res.Body != nil { + defer res.Body.Close() + } + + // Read initial state sent immediately on connection + mt, msg, err := client.Read(ctx) + require.NoError(t, err) + require.Equal(t, websocket.MessageText, mt) + + var got codersdk.WorkspaceAgentListContainersResponse + err = json.Unmarshal(msg, &got) + require.NoError(t, err) + + require.Equal(t, stages[0].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[0].expected.Devcontainers)) + for j, expectedDev := range stages[0].expected.Devcontainers { + gotDev := got.Devcontainers[j] + require.Equal(t, expectedDev.Name, gotDev.Name) + require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) + require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) + require.Equal(t, expectedDev.Status, gotDev.Status) + require.Equal(t, expectedDev.Container, gotDev.Container) + } + + // Process remaining stages through updater loop + for i, stage := range stages[1:] { + mLister.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stage.containers}, nil) + + // Given: We allow the update loop to progress + _, aw := mClock.AdvanceNext() + aw.MustWait(ctx) + + // When: We attempt to read a message from the socket. + mt, msg, err := client.Read(ctx) + require.NoError(t, err) + require.Equal(t, websocket.MessageText, mt) + + // Then: We expect the receieved message matches the expected response. + var got codersdk.WorkspaceAgentListContainersResponse + err = json.Unmarshal(msg, &got) + require.NoError(t, err) + + require.Equal(t, stages[i+1].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[i+1].expected.Devcontainers)) + for j, expectedDev := range stages[i+1].expected.Devcontainers { + gotDev := got.Devcontainers[j] + require.Equal(t, expectedDev.Name, gotDev.Name) + require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) + require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) + require.Equal(t, expectedDev.Status, gotDev.Status) + require.Equal(t, expectedDev.Container, gotDev.Container) + } + } + }) + // List tests the API.getContainers method using a mock // implementation. It specifically tests caching behavior. t.Run("List", func(t *testing.T) { @@ -2710,8 +2883,12 @@ func TestAPI(t *testing.T) { Op: fsnotify.Write, }) - err = api.RefreshContainers(ctx) - require.NoError(t, err) + require.Eventuallyf(t, func() bool { + err = api.RefreshContainers(ctx) + require.NoError(t, err) + + return len(fakeSAC.agents) == 1 + }, testutil.WaitShort, testutil.IntervalFast, "subagent should be created after config change") t.Log("Phase 2: Cont, waiting for sub agent to exit") exitSubAgentOnce.Do(func() { @@ -2746,8 +2923,12 @@ func TestAPI(t *testing.T) { Op: fsnotify.Write, }) - err = api.RefreshContainers(ctx) - require.NoError(t, err) + require.Eventuallyf(t, func() bool { + err = api.RefreshContainers(ctx) + require.NoError(t, err) + + return len(fakeSAC.agents) == 0 + }, testutil.WaitShort, testutil.IntervalFast, "subagent should be deleted after config change") req = httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec = httptest.NewRecorder() diff --git a/agent/agentssh/agentssh_test.go b/agent/agentssh/agentssh_test.go index 08fa02ddb4565..159fe345483d2 100644 --- a/agent/agentssh/agentssh_test.go +++ b/agent/agentssh/agentssh_test.go @@ -453,7 +453,7 @@ func TestSSHServer_ClosesStdin(t *testing.T) { // exit code 1 if it hits EOF, which is what we want to test. cmdErrCh := make(chan error, 1) go func() { - cmdErrCh <- sess.Start(fmt.Sprintf("echo started; read; echo \"read exit code: $?\" > %s", filePath)) + cmdErrCh <- sess.Start(fmt.Sprintf(`echo started; echo "read exit code: $(read && echo 0 || echo 1)" > %s`, filePath)) }() cmdErr := testutil.RequireReceive(ctx, t, cmdErrCh) diff --git a/cli/delete_test.go b/cli/delete_test.go index a48ca98627f65..c01893419f80f 100644 --- a/cli/delete_test.go +++ b/cli/delete_test.go @@ -233,9 +233,6 @@ func TestDelete(t *testing.T) { t.Skip("this test requires postgres") } - clock := quartz.NewMock(t) - ctx := testutil.Context(t, testutil.WaitSuperLong) - // Setup db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) client, _ := coderdtest.NewWithProvisionerCloser(t, &coderdtest.Options{ @@ -301,6 +298,9 @@ func TestDelete(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + clock := quartz.NewMock(t) + ctx := testutil.Context(t, testutil.WaitSuperLong) + // Create one prebuilt workspace (owned by system user) and one normal workspace (owned by a user) // Each workspace is persisted in the DB along with associated workspace jobs and builds. dbPrebuiltWorkspace := setupTestDBWorkspace(t, clock, db, pb, orgID, database.PrebuildsSystemUserID, template.ID, version.ID, preset.ID) diff --git a/cli/testdata/coder_list_--output_json.golden b/cli/testdata/coder_list_--output_json.golden index e97894c4afb21..51c2887cd1e4a 100644 --- a/cli/testdata/coder_list_--output_json.golden +++ b/cli/testdata/coder_list_--output_json.golden @@ -86,6 +86,7 @@ "automatic_updates": "never", "allow_renames": false, "favorite": false, - "next_start_at": "====[timestamp]=====" + "next_start_at": "====[timestamp]=====", + "is_prebuild": false } ] diff --git a/coderd/agentapi/api.go b/coderd/agentapi/api.go index c409f8ea89e9b..dbcb8ea024914 100644 --- a/coderd/agentapi/api.go +++ b/coderd/agentapi/api.go @@ -19,7 +19,7 @@ import ( agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/agentapi/resourcesmonitor" "github.com/coder/coder/v2/coderd/appearance" - "github.com/coder/coder/v2/coderd/audit" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/externalauth" @@ -50,7 +50,7 @@ type API struct { *ResourcesMonitoringAPI *LogsAPI *ScriptsAPI - *AuditAPI + *ConnLogAPI *SubAgentAPI *tailnet.DRPCService @@ -71,7 +71,7 @@ type Options struct { Database database.Store NotificationsEnqueuer notifications.Enqueuer Pubsub pubsub.Pubsub - Auditor *atomic.Pointer[audit.Auditor] + ConnectionLogger *atomic.Pointer[connectionlog.ConnectionLogger] DerpMapFn func() *tailcfg.DERPMap TailnetCoordinator *atomic.Pointer[tailnet.Coordinator] StatsReporter *workspacestats.Reporter @@ -180,11 +180,11 @@ func New(opts Options) *API { Database: opts.Database, } - api.AuditAPI = &AuditAPI{ - AgentFn: api.agent, - Auditor: opts.Auditor, - Database: opts.Database, - Log: opts.Log, + api.ConnLogAPI = &ConnLogAPI{ + AgentFn: api.agent, + ConnectionLogger: opts.ConnectionLogger, + Database: opts.Database, + Log: opts.Log, } api.DRPCService = &tailnet.DRPCService{ diff --git a/coderd/agentapi/audit.go b/coderd/agentapi/audit.go deleted file mode 100644 index 2025b2d6cd92b..0000000000000 --- a/coderd/agentapi/audit.go +++ /dev/null @@ -1,105 +0,0 @@ -package agentapi - -import ( - "context" - "encoding/json" - "strconv" - "sync/atomic" - - "github.com/google/uuid" - "golang.org/x/xerrors" - "google.golang.org/protobuf/types/known/emptypb" - - "cdr.dev/slog" - - agentproto "github.com/coder/coder/v2/agent/proto" - "github.com/coder/coder/v2/coderd/audit" - "github.com/coder/coder/v2/coderd/database" - "github.com/coder/coder/v2/coderd/database/db2sdk" - "github.com/coder/coder/v2/codersdk/agentsdk" -) - -type AuditAPI struct { - AgentFn func(context.Context) (database.WorkspaceAgent, error) - Auditor *atomic.Pointer[audit.Auditor] - Database database.Store - Log slog.Logger -} - -func (a *AuditAPI) ReportConnection(ctx context.Context, req *agentproto.ReportConnectionRequest) (*emptypb.Empty, error) { - // We will use connection ID as request ID, typically this is the - // SSH session ID as reported by the agent. - connectionID, err := uuid.FromBytes(req.GetConnection().GetId()) - if err != nil { - return nil, xerrors.Errorf("connection id from bytes: %w", err) - } - - action, err := db2sdk.AuditActionFromAgentProtoConnectionAction(req.GetConnection().GetAction()) - if err != nil { - return nil, err - } - connectionType, err := agentsdk.ConnectionTypeFromProto(req.GetConnection().GetType()) - if err != nil { - return nil, err - } - - // Fetch contextual data for this audit event. - workspaceAgent, err := a.AgentFn(ctx) - if err != nil { - return nil, xerrors.Errorf("get agent: %w", err) - } - workspace, err := a.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID) - if err != nil { - return nil, xerrors.Errorf("get workspace by agent id: %w", err) - } - build, err := a.Database.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID) - if err != nil { - return nil, xerrors.Errorf("get latest workspace build by workspace id: %w", err) - } - - // We pass the below information to the Auditor so that it - // can form a friendly string for the user to view in the UI. - type additionalFields struct { - audit.AdditionalFields - - ConnectionType agentsdk.ConnectionType `json:"connection_type"` - Reason string `json:"reason,omitempty"` - } - resourceInfo := additionalFields{ - AdditionalFields: audit.AdditionalFields{ - WorkspaceID: workspace.ID, - WorkspaceName: workspace.Name, - WorkspaceOwner: workspace.OwnerUsername, - BuildNumber: strconv.FormatInt(int64(build.BuildNumber), 10), - BuildReason: database.BuildReason(string(build.Reason)), - }, - ConnectionType: connectionType, - Reason: req.GetConnection().GetReason(), - } - - riBytes, err := json.Marshal(resourceInfo) - if err != nil { - a.Log.Error(ctx, "marshal resource info for agent connection failed", slog.Error(err)) - riBytes = []byte("{}") - } - - audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.WorkspaceAgent]{ - Audit: *a.Auditor.Load(), - Log: a.Log, - Time: req.GetConnection().GetTimestamp().AsTime(), - OrganizationID: workspace.OrganizationID, - RequestID: connectionID, - Action: action, - New: workspaceAgent, - Old: workspaceAgent, - IP: req.GetConnection().GetIp(), - Status: int(req.GetConnection().GetStatusCode()), - AdditionalFields: riBytes, - - // It's not possible to tell which user connected. Once we have - // the capability, this may be reported by the agent. - UserID: uuid.Nil, - }) - - return &emptypb.Empty{}, nil -} diff --git a/coderd/agentapi/connectionlog.go b/coderd/agentapi/connectionlog.go new file mode 100644 index 0000000000000..f26f835746981 --- /dev/null +++ b/coderd/agentapi/connectionlog.go @@ -0,0 +1,106 @@ +package agentapi + +import ( + "context" + "database/sql" + "sync/atomic" + + "github.com/google/uuid" + "golang.org/x/xerrors" + "google.golang.org/protobuf/types/known/emptypb" + + "cdr.dev/slog" + agentproto "github.com/coder/coder/v2/agent/proto" + "github.com/coder/coder/v2/coderd/connectionlog" + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" +) + +type ConnLogAPI struct { + AgentFn func(context.Context) (database.WorkspaceAgent, error) + ConnectionLogger *atomic.Pointer[connectionlog.ConnectionLogger] + Database database.Store + Log slog.Logger +} + +func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.ReportConnectionRequest) (*emptypb.Empty, error) { + // We use the connection ID to identify which connection log event to mark + // as closed, when we receive a close action for that ID. + connectionID, err := uuid.FromBytes(req.GetConnection().GetId()) + if err != nil { + return nil, xerrors.Errorf("connection id from bytes: %w", err) + } + + if connectionID == uuid.Nil { + return nil, xerrors.New("connection ID cannot be nil") + } + action, err := db2sdk.ConnectionLogStatusFromAgentProtoConnectionAction(req.GetConnection().GetAction()) + if err != nil { + return nil, err + } + connectionType, err := db2sdk.ConnectionLogConnectionTypeFromAgentProtoConnectionType(req.GetConnection().GetType()) + if err != nil { + return nil, err + } + + var code sql.NullInt32 + if action == database.ConnectionStatusDisconnected { + code = sql.NullInt32{ + Int32: req.GetConnection().GetStatusCode(), + Valid: true, + } + } + + // Fetch contextual data for this connection log event. + workspaceAgent, err := a.AgentFn(ctx) + if err != nil { + return nil, xerrors.Errorf("get agent: %w", err) + } + workspace, err := a.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID) + if err != nil { + return nil, xerrors.Errorf("get workspace by agent id: %w", err) + } + + reason := req.GetConnection().GetReason() + connLogger := *a.ConnectionLogger.Load() + err = connLogger.Upsert(ctx, database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: req.GetConnection().GetTimestamp().AsTime(), + OrganizationID: workspace.OrganizationID, + WorkspaceOwnerID: workspace.OwnerID, + WorkspaceID: workspace.ID, + WorkspaceName: workspace.Name, + AgentName: workspaceAgent.Name, + Type: connectionType, + Code: code, + Ip: database.ParseIP(req.GetConnection().GetIp()), + ConnectionID: uuid.NullUUID{ + UUID: connectionID, + Valid: true, + }, + DisconnectReason: sql.NullString{ + String: reason, + Valid: reason != "", + }, + // We supply the action: + // - So the DB can handle duplicate connections or disconnections properly. + // - To make it clear whether this is a connection or disconnection + // prior to it's insertion into the DB (logs) + ConnectionStatus: action, + + // It's not possible to tell which user connected. Once we have + // the capability, this may be reported by the agent. + UserID: uuid.NullUUID{ + Valid: false, + }, + // N/A + UserAgent: sql.NullString{}, + // N/A + SlugOrPort: sql.NullString{}, + }) + if err != nil { + return nil, xerrors.Errorf("export connection log: %w", err) + } + + return &emptypb.Empty{}, nil +} diff --git a/coderd/agentapi/audit_test.go b/coderd/agentapi/connectionlog_test.go similarity index 62% rename from coderd/agentapi/audit_test.go rename to coderd/agentapi/connectionlog_test.go index b881fde5d22bc..4a060b8f16faf 100644 --- a/coderd/agentapi/audit_test.go +++ b/coderd/agentapi/connectionlog_test.go @@ -2,7 +2,7 @@ package agentapi_test import ( "context" - "encoding/json" + "database/sql" "net" "sync/atomic" "testing" @@ -16,15 +16,14 @@ import ( agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/agentapi" - "github.com/coder/coder/v2/coderd/audit" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/database/dbmock" "github.com/coder/coder/v2/coderd/database/dbtime" - "github.com/coder/coder/v2/codersdk/agentsdk" ) -func TestAuditReport(t *testing.T) { +func TestConnectionLog(t *testing.T) { t.Parallel() var ( @@ -38,10 +37,6 @@ func TestAuditReport(t *testing.T) { OwnerID: owner.ID, Name: "cool-workspace", } - build = database.WorkspaceBuild{ - ID: uuid.New(), - WorkspaceID: workspace.ID, - } agent = database.WorkspaceAgent{ ID: uuid.New(), } @@ -62,7 +57,7 @@ func TestAuditReport(t *testing.T) { id: uuid.New(), action: agentproto.Connection_CONNECT.Enum(), typ: agentproto.Connection_SSH.Enum(), - time: time.Now(), + time: dbtime.Now(), ip: "127.0.0.1", status: 200, }, @@ -71,7 +66,7 @@ func TestAuditReport(t *testing.T) { id: uuid.New(), action: agentproto.Connection_CONNECT.Enum(), typ: agentproto.Connection_VSCODE.Enum(), - time: time.Now(), + time: dbtime.Now(), ip: "8.8.8.8", }, { @@ -79,28 +74,28 @@ func TestAuditReport(t *testing.T) { id: uuid.New(), action: agentproto.Connection_CONNECT.Enum(), typ: agentproto.Connection_JETBRAINS.Enum(), - time: time.Now(), + time: dbtime.Now(), }, { name: "Reconnecting PTY Connect", id: uuid.New(), action: agentproto.Connection_CONNECT.Enum(), typ: agentproto.Connection_RECONNECTING_PTY.Enum(), - time: time.Now(), + time: dbtime.Now(), }, { name: "SSH Disconnect", id: uuid.New(), action: agentproto.Connection_DISCONNECT.Enum(), typ: agentproto.Connection_SSH.Enum(), - time: time.Now(), + time: dbtime.Now(), }, { name: "SSH Disconnect", id: uuid.New(), action: agentproto.Connection_DISCONNECT.Enum(), typ: agentproto.Connection_SSH.Enum(), - time: time.Now(), + time: dbtime.Now(), status: 500, reason: "because error says so", }, @@ -110,15 +105,14 @@ func TestAuditReport(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - mAudit := audit.NewMock() + connLogger := connectionlog.NewFake() mDB := dbmock.NewMockStore(gomock.NewController(t)) mDB.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(workspace, nil) - mDB.EXPECT().GetLatestWorkspaceBuildByWorkspaceID(gomock.Any(), workspace.ID).Return(build, nil) - api := &agentapi.AuditAPI{ - Auditor: asAtomicPointer[audit.Auditor](mAudit), - Database: mDB, + api := &agentapi.ConnLogAPI{ + ConnectionLogger: asAtomicPointer[connectionlog.ConnectionLogger](connLogger), + Database: mDB, AgentFn: func(context.Context) (database.WorkspaceAgent, error) { return agent, nil }, @@ -135,41 +129,48 @@ func TestAuditReport(t *testing.T) { }, }) - require.True(t, mAudit.Contains(t, database.AuditLog{ - Time: dbtime.Time(tt.time).In(time.UTC), - Action: agentProtoConnectionActionToAudit(t, *tt.action), - OrganizationID: workspace.OrganizationID, - UserID: uuid.Nil, - RequestID: tt.id, - ResourceType: database.ResourceTypeWorkspaceAgent, - ResourceID: agent.ID, - ResourceTarget: agent.Name, - Ip: pqtype.Inet{Valid: true, IPNet: net.IPNet{IP: net.ParseIP(tt.ip), Mask: net.CIDRMask(32, 32)}}, - StatusCode: tt.status, - })) + require.True(t, connLogger.Contains(t, database.UpsertConnectionLogParams{ + Time: dbtime.Time(tt.time).In(time.UTC), + OrganizationID: workspace.OrganizationID, + WorkspaceOwnerID: workspace.OwnerID, + WorkspaceID: workspace.ID, + WorkspaceName: workspace.Name, + AgentName: agent.Name, + UserID: uuid.NullUUID{ + UUID: uuid.Nil, + Valid: false, + }, + ConnectionStatus: agentProtoConnectionActionToConnectionLog(t, *tt.action), - // Check some additional fields. - var m map[string]any - err := json.Unmarshal(mAudit.AuditLogs()[0].AdditionalFields, &m) - require.NoError(t, err) - require.Equal(t, string(agentProtoConnectionTypeToSDK(t, *tt.typ)), m["connection_type"].(string)) - if tt.reason != "" { - require.Equal(t, tt.reason, m["reason"]) - } + Code: sql.NullInt32{ + Int32: tt.status, + Valid: *tt.action == agentproto.Connection_DISCONNECT, + }, + Ip: pqtype.Inet{Valid: true, IPNet: net.IPNet{IP: net.ParseIP(tt.ip), Mask: net.CIDRMask(32, 32)}}, + Type: agentProtoConnectionTypeToConnectionLog(t, *tt.typ), + DisconnectReason: sql.NullString{ + String: tt.reason, + Valid: tt.reason != "", + }, + ConnectionID: uuid.NullUUID{ + UUID: tt.id, + Valid: tt.id != uuid.Nil, + }, + })) }) } } -func agentProtoConnectionActionToAudit(t *testing.T, action agentproto.Connection_Action) database.AuditAction { - a, err := db2sdk.AuditActionFromAgentProtoConnectionAction(action) +func agentProtoConnectionTypeToConnectionLog(t *testing.T, typ agentproto.Connection_Type) database.ConnectionType { + a, err := db2sdk.ConnectionLogConnectionTypeFromAgentProtoConnectionType(typ) require.NoError(t, err) return a } -func agentProtoConnectionTypeToSDK(t *testing.T, typ agentproto.Connection_Type) agentsdk.ConnectionType { - action, err := agentsdk.ConnectionTypeFromProto(typ) +func agentProtoConnectionActionToConnectionLog(t *testing.T, action agentproto.Connection_Action) database.ConnectionStatus { + a, err := db2sdk.ConnectionLogStatusFromAgentProtoConnectionAction(action) require.NoError(t, err) - return action + return a } func asAtomicPointer[T any](v T) *atomic.Pointer[T] { diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 79cff80b1fbc5..7a3bd8a0d913a 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -383,6 +383,52 @@ const docTemplate = `{ } } }, + "/connectionlog": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Enterprise" + ], + "summary": "Get connection logs", + "operationId": "get-connection-logs", + "parameters": [ + { + "type": "string", + "description": "Search query", + "name": "q", + "in": "query" + }, + { + "type": "integer", + "description": "Page limit", + "name": "limit", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "Page offset", + "name": "offset", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.ConnectionLogResponse" + } + } + } + } + }, "/csp/reports": { "post": { "security": [ @@ -8778,6 +8824,41 @@ const docTemplate = `{ } } }, + "/workspaceagents/{workspaceagent}/containers/watch": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Agents" + ], + "summary": "Watch workspace agent for container updates.", + "operationId": "watch-workspace-agent-for-container-updates", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Workspace agent ID", + "name": "workspaceagent", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListContainersResponse" + } + } + } + } + }, "/workspaceagents/{workspaceagent}/coordinate": { "get": { "security": [ @@ -11409,6 +11490,139 @@ const docTemplate = `{ } } }, + "codersdk.ConnectionLog": { + "type": "object", + "properties": { + "agent_name": { + "type": "string" + }, + "connect_time": { + "type": "string", + "format": "date-time" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string" + }, + "organization": { + "$ref": "#/definitions/codersdk.MinimalOrganization" + }, + "ssh_info": { + "description": "SSHInfo is only set when ` + "`" + `type` + "`" + ` is one of:\n- ` + "`" + `ConnectionTypeSSH` + "`" + `\n- ` + "`" + `ConnectionTypeReconnectingPTY` + "`" + `\n- ` + "`" + `ConnectionTypeVSCode` + "`" + `\n- ` + "`" + `ConnectionTypeJetBrains` + "`" + `", + "allOf": [ + { + "$ref": "#/definitions/codersdk.ConnectionLogSSHInfo" + } + ] + }, + "type": { + "$ref": "#/definitions/codersdk.ConnectionType" + }, + "web_info": { + "description": "WebInfo is only set when ` + "`" + `type` + "`" + ` is one of:\n- ` + "`" + `ConnectionTypePortForwarding` + "`" + `\n- ` + "`" + `ConnectionTypeWorkspaceApp` + "`" + `", + "allOf": [ + { + "$ref": "#/definitions/codersdk.ConnectionLogWebInfo" + } + ] + }, + "workspace_id": { + "type": "string", + "format": "uuid" + }, + "workspace_name": { + "type": "string" + }, + "workspace_owner_id": { + "type": "string", + "format": "uuid" + }, + "workspace_owner_username": { + "type": "string" + } + } + }, + "codersdk.ConnectionLogResponse": { + "type": "object", + "properties": { + "connection_logs": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.ConnectionLog" + } + }, + "count": { + "type": "integer" + } + } + }, + "codersdk.ConnectionLogSSHInfo": { + "type": "object", + "properties": { + "connection_id": { + "type": "string", + "format": "uuid" + }, + "disconnect_reason": { + "description": "DisconnectReason is omitted if a disconnect event with the same connection ID\nhas not yet been seen.", + "type": "string" + }, + "disconnect_time": { + "description": "DisconnectTime is omitted if a disconnect event with the same connection ID\nhas not yet been seen.", + "type": "string", + "format": "date-time" + }, + "exit_code": { + "description": "ExitCode is the exit code of the SSH session. It is omitted if a\ndisconnect event with the same connection ID has not yet been seen.", + "type": "integer" + } + } + }, + "codersdk.ConnectionLogWebInfo": { + "type": "object", + "properties": { + "slug_or_port": { + "type": "string" + }, + "status_code": { + "description": "StatusCode is the HTTP status code of the request.", + "type": "integer" + }, + "user": { + "description": "User is omitted if the connection event was from an unauthenticated user.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.User" + } + ] + }, + "user_agent": { + "type": "string" + } + } + }, + "codersdk.ConnectionType": { + "type": "string", + "enum": [ + "ssh", + "vscode", + "jetbrains", + "reconnecting_pty", + "workspace_app", + "port_forwarding" + ], + "x-enum-varnames": [ + "ConnectionTypeSSH", + "ConnectionTypeVSCode", + "ConnectionTypeJetBrains", + "ConnectionTypeReconnectingPTY", + "ConnectionTypeWorkspaceApp", + "ConnectionTypePortForwarding" + ] + }, "codersdk.ConvertLoginRequest": { "type": "object", "required": [ @@ -15329,6 +15543,7 @@ const docTemplate = `{ "assign_org_role", "assign_role", "audit_log", + "connection_log", "crypto_key", "debug_info", "deployment_config", @@ -15368,6 +15583,7 @@ const docTemplate = `{ "ResourceAssignOrgRole", "ResourceAssignRole", "ResourceAuditLog", + "ResourceConnectionLog", "ResourceCryptoKey", "ResourceDebugInfo", "ResourceDeploymentConfig", @@ -17437,6 +17653,10 @@ const docTemplate = `{ "type": "string", "format": "uuid" }, + "is_prebuild": { + "description": "IsPrebuild indicates whether the workspace is a prebuilt workspace.\nPrebuilt workspaces are owned by the prebuilds system user and have specific behavior,\nsuch as being managed differently from regular workspaces.\nOnce a prebuilt workspace is claimed by a user, it transitions to a regular workspace,\nand IsPrebuild returns false.", + "type": "boolean" + }, "last_used_at": { "type": "string", "format": "date-time" @@ -19372,7 +19592,7 @@ const docTemplate = `{ "type": "integer" }, "expiry": { - "description": "Expiry is the optional expiration time of the access token.\n\nIf zero, TokenSource implementations will reuse the same\ntoken forever and RefreshToken or equivalent\nmechanisms for that TokenSource will not be used.", + "description": "Expiry is the optional expiration time of the access token.\n\nIf zero, [TokenSource] implementations will reuse the same\ntoken forever and RefreshToken or equivalent\nmechanisms for that TokenSource will not be used.", "type": "string" }, "refresh_token": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index 5fa1d98030cb5..ded07f40f1163 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -323,6 +323,48 @@ } } }, + "/connectionlog": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Enterprise"], + "summary": "Get connection logs", + "operationId": "get-connection-logs", + "parameters": [ + { + "type": "string", + "description": "Search query", + "name": "q", + "in": "query" + }, + { + "type": "integer", + "description": "Page limit", + "name": "limit", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "Page offset", + "name": "offset", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.ConnectionLogResponse" + } + } + } + } + }, "/csp/reports": { "post": { "security": [ @@ -7751,6 +7793,37 @@ } } }, + "/workspaceagents/{workspaceagent}/containers/watch": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Agents"], + "summary": "Watch workspace agent for container updates.", + "operationId": "watch-workspace-agent-for-container-updates", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Workspace agent ID", + "name": "workspaceagent", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListContainersResponse" + } + } + } + } + }, "/workspaceagents/{workspaceagent}/coordinate": { "get": { "security": [ @@ -10143,6 +10216,139 @@ } } }, + "codersdk.ConnectionLog": { + "type": "object", + "properties": { + "agent_name": { + "type": "string" + }, + "connect_time": { + "type": "string", + "format": "date-time" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string" + }, + "organization": { + "$ref": "#/definitions/codersdk.MinimalOrganization" + }, + "ssh_info": { + "description": "SSHInfo is only set when `type` is one of:\n- `ConnectionTypeSSH`\n- `ConnectionTypeReconnectingPTY`\n- `ConnectionTypeVSCode`\n- `ConnectionTypeJetBrains`", + "allOf": [ + { + "$ref": "#/definitions/codersdk.ConnectionLogSSHInfo" + } + ] + }, + "type": { + "$ref": "#/definitions/codersdk.ConnectionType" + }, + "web_info": { + "description": "WebInfo is only set when `type` is one of:\n- `ConnectionTypePortForwarding`\n- `ConnectionTypeWorkspaceApp`", + "allOf": [ + { + "$ref": "#/definitions/codersdk.ConnectionLogWebInfo" + } + ] + }, + "workspace_id": { + "type": "string", + "format": "uuid" + }, + "workspace_name": { + "type": "string" + }, + "workspace_owner_id": { + "type": "string", + "format": "uuid" + }, + "workspace_owner_username": { + "type": "string" + } + } + }, + "codersdk.ConnectionLogResponse": { + "type": "object", + "properties": { + "connection_logs": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.ConnectionLog" + } + }, + "count": { + "type": "integer" + } + } + }, + "codersdk.ConnectionLogSSHInfo": { + "type": "object", + "properties": { + "connection_id": { + "type": "string", + "format": "uuid" + }, + "disconnect_reason": { + "description": "DisconnectReason is omitted if a disconnect event with the same connection ID\nhas not yet been seen.", + "type": "string" + }, + "disconnect_time": { + "description": "DisconnectTime is omitted if a disconnect event with the same connection ID\nhas not yet been seen.", + "type": "string", + "format": "date-time" + }, + "exit_code": { + "description": "ExitCode is the exit code of the SSH session. It is omitted if a\ndisconnect event with the same connection ID has not yet been seen.", + "type": "integer" + } + } + }, + "codersdk.ConnectionLogWebInfo": { + "type": "object", + "properties": { + "slug_or_port": { + "type": "string" + }, + "status_code": { + "description": "StatusCode is the HTTP status code of the request.", + "type": "integer" + }, + "user": { + "description": "User is omitted if the connection event was from an unauthenticated user.", + "allOf": [ + { + "$ref": "#/definitions/codersdk.User" + } + ] + }, + "user_agent": { + "type": "string" + } + } + }, + "codersdk.ConnectionType": { + "type": "string", + "enum": [ + "ssh", + "vscode", + "jetbrains", + "reconnecting_pty", + "workspace_app", + "port_forwarding" + ], + "x-enum-varnames": [ + "ConnectionTypeSSH", + "ConnectionTypeVSCode", + "ConnectionTypeJetBrains", + "ConnectionTypeReconnectingPTY", + "ConnectionTypeWorkspaceApp", + "ConnectionTypePortForwarding" + ] + }, "codersdk.ConvertLoginRequest": { "type": "object", "required": ["password", "to_type"], @@ -13905,6 +14111,7 @@ "assign_org_role", "assign_role", "audit_log", + "connection_log", "crypto_key", "debug_info", "deployment_config", @@ -13944,6 +14151,7 @@ "ResourceAssignOrgRole", "ResourceAssignRole", "ResourceAuditLog", + "ResourceConnectionLog", "ResourceCryptoKey", "ResourceDebugInfo", "ResourceDeploymentConfig", @@ -15908,6 +16116,10 @@ "type": "string", "format": "uuid" }, + "is_prebuild": { + "description": "IsPrebuild indicates whether the workspace is a prebuilt workspace.\nPrebuilt workspaces are owned by the prebuilds system user and have specific behavior,\nsuch as being managed differently from regular workspaces.\nOnce a prebuilt workspace is claimed by a user, it transitions to a regular workspace,\nand IsPrebuild returns false.", + "type": "boolean" + }, "last_used_at": { "type": "string", "format": "date-time" @@ -17731,7 +17943,7 @@ "type": "integer" }, "expiry": { - "description": "Expiry is the optional expiration time of the access token.\n\nIf zero, TokenSource implementations will reuse the same\ntoken forever and RefreshToken or equivalent\nmechanisms for that TokenSource will not be used.", + "description": "Expiry is the optional expiration time of the access token.\n\nIf zero, [TokenSource] implementations will reuse the same\ntoken forever and RefreshToken or equivalent\nmechanisms for that TokenSource will not be used.", "type": "string" }, "refresh_token": { diff --git a/coderd/audit.go b/coderd/audit.go index 786707768c05e..e8d7c4dfe9bca 100644 --- a/coderd/audit.go +++ b/coderd/audit.go @@ -40,7 +40,7 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() apiKey := httpmw.APIKey(r) - page, ok := parsePagination(rw, r) + page, ok := ParsePagination(rw, r) if !ok { return } diff --git a/coderd/audit/diff.go b/coderd/audit/diff.go index 56ac9f88ccaae..b8139bb63b290 100644 --- a/coderd/audit/diff.go +++ b/coderd/audit/diff.go @@ -31,9 +31,7 @@ type Auditable interface { database.NotificationTemplate | idpsync.OrganizationSyncSettings | idpsync.GroupSyncSettings | - idpsync.RoleSyncSettings | - database.WorkspaceAgent | - database.WorkspaceApp + idpsync.RoleSyncSettings } // Map is a map of changed fields in an audited resource. It maps field names to diff --git a/coderd/audit/request.go b/coderd/audit/request.go index 0fa88fa40e2ea..a973bdb915e3c 100644 --- a/coderd/audit/request.go +++ b/coderd/audit/request.go @@ -6,13 +6,11 @@ import ( "encoding/json" "flag" "fmt" - "net" "net/http" "strconv" "time" "github.com/google/uuid" - "github.com/sqlc-dev/pqtype" "go.opentelemetry.io/otel/baggage" "golang.org/x/xerrors" @@ -133,10 +131,6 @@ func ResourceTarget[T Auditable](tgt T) string { return "Organization Group Sync" case idpsync.RoleSyncSettings: return "Organization Role Sync" - case database.WorkspaceAgent: - return typed.Name - case database.WorkspaceApp: - return typed.Slug default: panic(fmt.Sprintf("unknown resource %T for ResourceTarget", tgt)) } @@ -199,10 +193,6 @@ func ResourceID[T Auditable](tgt T) uuid.UUID { return noID // Org field on audit log has org id case idpsync.RoleSyncSettings: return noID // Org field on audit log has org id - case database.WorkspaceAgent: - return typed.ID - case database.WorkspaceApp: - return typed.ID default: panic(fmt.Sprintf("unknown resource %T for ResourceID", tgt)) } @@ -256,10 +246,6 @@ func ResourceType[T Auditable](tgt T) database.ResourceType { return database.ResourceTypeIdpSyncSettingsRole case idpsync.GroupSyncSettings: return database.ResourceTypeIdpSyncSettingsGroup - case database.WorkspaceAgent: - return database.ResourceTypeWorkspaceAgent - case database.WorkspaceApp: - return database.ResourceTypeWorkspaceApp default: panic(fmt.Sprintf("unknown resource %T for ResourceType", typed)) } @@ -316,10 +302,6 @@ func ResourceRequiresOrgID[T Auditable]() bool { return true case idpsync.RoleSyncSettings: return true - case database.WorkspaceAgent: - return true - case database.WorkspaceApp: - return true default: panic(fmt.Sprintf("unknown resource %T for ResourceRequiresOrgID", tgt)) } @@ -434,7 +416,7 @@ func InitRequest[T Auditable](w http.ResponseWriter, p *RequestParams) (*Request action = req.Action } - ip := ParseIP(p.Request.RemoteAddr) + ip := database.ParseIP(p.Request.RemoteAddr) auditLog := database.AuditLog{ ID: uuid.New(), Time: dbtime.Now(), @@ -466,7 +448,7 @@ func InitRequest[T Auditable](w http.ResponseWriter, p *RequestParams) (*Request // BackgroundAudit creates an audit log for a background event. // The audit log is committed upon invocation. func BackgroundAudit[T Auditable](ctx context.Context, p *BackgroundAuditParams[T]) { - ip := ParseIP(p.IP) + ip := database.ParseIP(p.IP) diff := Diff(p.Audit, p.Old, p.New) var err error @@ -581,19 +563,3 @@ func either[T Auditable, R any](old, newVal T, fn func(T) R, auditAction databas panic("both old and new are nil") } } - -func ParseIP(ipStr string) pqtype.Inet { - ip := net.ParseIP(ipStr) - ipNet := net.IPNet{} - if ip != nil { - ipNet = net.IPNet{ - IP: ip, - Mask: net.CIDRMask(len(ip)*8, len(ip)*8), - } - } - - return pqtype.Inet{ - IPNet: ipNet, - Valid: ip != nil, - } -} diff --git a/coderd/audit_test.go b/coderd/audit_test.go index e6fa985038155..13dbc9ccd8406 100644 --- a/coderd/audit_test.go +++ b/coderd/audit_test.go @@ -15,6 +15,7 @@ import ( "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisioner/echo" @@ -531,3 +532,112 @@ func completeWithAgentAndApp() *echo.Responses { }, } } + +// TestDeprecatedConnEvents tests the deprecated connection and disconnection +// events in the audit logs. These events are no longer created, but need to be +// returned by the API. +func TestDeprecatedConnEvents(t *testing.T) { + t.Parallel() + var ( + ctx = context.Background() + client, _, api = coderdtest.NewWithAPI(t, &coderdtest.Options{IncludeProvisionerDaemon: true}) + user = coderdtest.CreateFirstUser(t, client) + version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, completeWithAgentAndApp()) + template = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) + ) + + coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + workspace := coderdtest.CreateWorkspace(t, client, template.ID) + workspace.LatestBuild = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) + + type additionalFields struct { + audit.AdditionalFields + ConnectionType string `json:"connection_type"` + } + + sshFields := additionalFields{ + AdditionalFields: audit.AdditionalFields{ + WorkspaceName: workspace.Name, + BuildNumber: "999", + BuildReason: "initiator", + WorkspaceOwner: workspace.OwnerName, + WorkspaceID: workspace.ID, + }, + ConnectionType: "SSH", + } + + sshFieldsBytes, err := json.Marshal(sshFields) + require.NoError(t, err) + + appFields := audit.AdditionalFields{ + WorkspaceName: workspace.Name, + // Deliberately empty + BuildNumber: "", + BuildReason: "", + WorkspaceOwner: workspace.OwnerName, + WorkspaceID: workspace.ID, + } + + appFieldsBytes, err := json.Marshal(appFields) + require.NoError(t, err) + + dbgen.AuditLog(t, api.Database, database.AuditLog{ + OrganizationID: user.OrganizationID, + Action: database.AuditActionConnect, + ResourceType: database.ResourceTypeWorkspaceAgent, + ResourceID: workspace.LatestBuild.Resources[0].Agents[0].ID, + ResourceTarget: workspace.LatestBuild.Resources[0].Agents[0].Name, + Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45 + AdditionalFields: sshFieldsBytes, + }) + + dbgen.AuditLog(t, api.Database, database.AuditLog{ + OrganizationID: user.OrganizationID, + Action: database.AuditActionDisconnect, + ResourceType: database.ResourceTypeWorkspaceAgent, + ResourceID: workspace.LatestBuild.Resources[0].Agents[0].ID, + ResourceTarget: workspace.LatestBuild.Resources[0].Agents[0].Name, + Time: time.Date(2022, 8, 15, 14, 35, 0o0, 100, time.UTC), // 2022-8-15 14:35:00 + AdditionalFields: sshFieldsBytes, + }) + + dbgen.AuditLog(t, api.Database, database.AuditLog{ + OrganizationID: user.OrganizationID, + UserID: user.UserID, + Action: database.AuditActionOpen, + ResourceType: database.ResourceTypeWorkspaceApp, + ResourceID: workspace.LatestBuild.Resources[0].Agents[0].Apps[0].ID, + ResourceTarget: workspace.LatestBuild.Resources[0].Agents[0].Apps[0].Slug, + Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45 + AdditionalFields: appFieldsBytes, + }) + + connLog, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: "action:connect", + }) + require.NoError(t, err) + require.Len(t, connLog.AuditLogs, 1) + var sshOutFields additionalFields + err = json.Unmarshal(connLog.AuditLogs[0].AdditionalFields, &sshOutFields) + require.NoError(t, err) + require.Equal(t, sshFields, sshOutFields) + + dcLog, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: "action:disconnect", + }) + require.NoError(t, err) + require.Len(t, dcLog.AuditLogs, 1) + err = json.Unmarshal(dcLog.AuditLogs[0].AdditionalFields, &sshOutFields) + require.NoError(t, err) + require.Equal(t, sshFields, sshOutFields) + + openLog, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: "action:open", + }) + require.NoError(t, err) + require.Len(t, openLog.AuditLogs, 1) + var appOutFields audit.AdditionalFields + err = json.Unmarshal(openLog.AuditLogs[0].AdditionalFields, &appOutFields) + require.NoError(t, err) + require.Equal(t, appFields, appOutFields) +} diff --git a/coderd/coderd.go b/coderd/coderd.go index 72316d1ea18e5..c3c1fb09cc6cc 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -59,6 +59,7 @@ import ( "github.com/coder/coder/v2/coderd/appearance" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/awsidentity" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbrollup" @@ -154,6 +155,7 @@ type Options struct { CacheDir string Auditor audit.Auditor + ConnectionLogger connectionlog.ConnectionLogger AgentConnectionUpdateFrequency time.Duration AgentInactiveDisconnectTimeout time.Duration AWSCertificates awsidentity.Certificates @@ -400,6 +402,9 @@ func New(options *Options) *API { if options.Auditor == nil { options.Auditor = audit.NewNop() } + if options.ConnectionLogger == nil { + options.ConnectionLogger = connectionlog.NewNop() + } if options.SSHConfig.HostnamePrefix == "" { options.SSHConfig.HostnamePrefix = "coder." } @@ -568,6 +573,7 @@ func New(options *Options) *API { }, metricsCache: metricsCache, Auditor: atomic.Pointer[audit.Auditor]{}, + ConnectionLogger: atomic.Pointer[connectionlog.ConnectionLogger]{}, TailnetCoordinator: atomic.Pointer[tailnet.Coordinator]{}, UpdatesProvider: updatesProvider, TemplateScheduleStore: options.TemplateScheduleStore, @@ -589,7 +595,7 @@ func New(options *Options) *API { options.Logger.Named("workspaceapps"), options.AccessURL, options.Authorizer, - &api.Auditor, + &api.ConnectionLogger, options.Database, options.DeploymentValues, oauthConfigs, @@ -691,6 +697,7 @@ func New(options *Options) *API { } api.Auditor.Store(&options.Auditor) + api.ConnectionLogger.Store(&options.ConnectionLogger) api.TailnetCoordinator.Store(&options.TailnetCoordinator) dialer := &InmemTailnetDialer{ CoordPtr: &api.TailnetCoordinator, @@ -1351,6 +1358,7 @@ func New(options *Options) *API { r.Get("/listening-ports", api.workspaceAgentListeningPorts) r.Get("/connection", api.workspaceAgentConnection) r.Get("/containers", api.workspaceAgentListContainers) + r.Get("/containers/watch", api.watchWorkspaceAgentContainers) r.Post("/containers/devcontainers/{devcontainer}/recreate", api.workspaceAgentRecreateDevcontainer) r.Get("/coordinate", api.workspaceAgentClientCoordinate) @@ -1612,6 +1620,7 @@ type API struct { // specific replica. ID uuid.UUID Auditor atomic.Pointer[audit.Auditor] + ConnectionLogger atomic.Pointer[connectionlog.ConnectionLogger] WorkspaceClientCoordinateOverride atomic.Pointer[func(rw http.ResponseWriter) bool] TailnetCoordinator atomic.Pointer[tailnet.Coordinator] NetworkTelemetryBatcher *tailnet.NetworkTelemetryBatcher diff --git a/coderd/coderdtest/authorize.go b/coderd/coderdtest/authorize.go index 67551d0e3d2dd..68ab5a27e5a18 100644 --- a/coderd/coderdtest/authorize.go +++ b/coderd/coderdtest/authorize.go @@ -451,6 +451,7 @@ func randomRBACType() string { all := []string{ rbac.ResourceWorkspace.Type, rbac.ResourceAuditLog.Type, + rbac.ResourceConnectionLog.Type, rbac.ResourceTemplate.Type, rbac.ResourceGroup.Type, rbac.ResourceFile.Type, diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go index 4aa968468e146..96030b215e5dd 100644 --- a/coderd/coderdtest/coderdtest.go +++ b/coderd/coderdtest/coderdtest.go @@ -61,6 +61,7 @@ import ( "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/autobuild" "github.com/coder/coder/v2/coderd/awsidentity" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/cryptokeys" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" @@ -125,6 +126,7 @@ type Options struct { TemplateScheduleStore schedule.TemplateScheduleStore Coordinator tailnet.Coordinator CoordinatorResumeTokenProvider tailnet.ResumeTokenProvider + ConnectionLogger connectionlog.ConnectionLogger HealthcheckFunc func(ctx context.Context, apiKey string) *healthsdk.HealthcheckReport HealthcheckTimeout time.Duration @@ -356,6 +358,12 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can } auditor.Store(&options.Auditor) + var connectionLogger atomic.Pointer[connectionlog.ConnectionLogger] + if options.ConnectionLogger == nil { + options.ConnectionLogger = connectionlog.NewNop() + } + connectionLogger.Store(&options.ConnectionLogger) + ctx, cancelFunc := context.WithCancel(context.Background()) experiments := coderd.ReadExperiments(*options.Logger, options.DeploymentValues.Experiments) lifecycleExecutor := autobuild.NewExecutor( @@ -543,6 +551,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can ExternalAuthConfigs: options.ExternalAuthConfigs, Auditor: options.Auditor, + ConnectionLogger: options.ConnectionLogger, AWSCertificates: options.AWSCertificates, AzureCertificates: options.AzureCertificates, GithubOAuth2Config: options.GithubOAuth2Config, diff --git a/coderd/connectionlog/connectionlog.go b/coderd/connectionlog/connectionlog.go new file mode 100644 index 0000000000000..1b56ffc288fd3 --- /dev/null +++ b/coderd/connectionlog/connectionlog.go @@ -0,0 +1,121 @@ +package connectionlog + +import ( + "context" + "sync" + "testing" + + "github.com/google/uuid" + + "github.com/coder/coder/v2/coderd/database" +) + +type ConnectionLogger interface { + Upsert(ctx context.Context, clog database.UpsertConnectionLogParams) error +} + +type nop struct{} + +func NewNop() ConnectionLogger { + return nop{} +} + +func (nop) Upsert(context.Context, database.UpsertConnectionLogParams) error { + return nil +} + +func NewFake() *FakeConnectionLogger { + return &FakeConnectionLogger{} +} + +type FakeConnectionLogger struct { + mu sync.Mutex + upsertions []database.UpsertConnectionLogParams +} + +func (m *FakeConnectionLogger) Reset() { + m.mu.Lock() + defer m.mu.Unlock() + m.upsertions = make([]database.UpsertConnectionLogParams, 0) +} + +func (m *FakeConnectionLogger) ConnectionLogs() []database.UpsertConnectionLogParams { + m.mu.Lock() + defer m.mu.Unlock() + return m.upsertions +} + +func (m *FakeConnectionLogger) Upsert(_ context.Context, clog database.UpsertConnectionLogParams) error { + m.mu.Lock() + defer m.mu.Unlock() + + m.upsertions = append(m.upsertions, clog) + + return nil +} + +func (m *FakeConnectionLogger) Contains(t testing.TB, expected database.UpsertConnectionLogParams) bool { + m.mu.Lock() + defer m.mu.Unlock() + for idx, cl := range m.upsertions { + if expected.ID != uuid.Nil && cl.ID != expected.ID { + t.Logf("connection log %d: expected ID %s, got %s", idx+1, expected.ID, cl.ID) + continue + } + if !expected.Time.IsZero() && expected.Time != cl.Time { + t.Logf("connection log %d: expected Time %s, got %s", idx+1, expected.Time, cl.Time) + continue + } + if expected.OrganizationID != uuid.Nil && cl.OrganizationID != expected.OrganizationID { + t.Logf("connection log %d: expected OrganizationID %s, got %s", idx+1, expected.OrganizationID, cl.OrganizationID) + continue + } + if expected.WorkspaceOwnerID != uuid.Nil && cl.WorkspaceOwnerID != expected.WorkspaceOwnerID { + t.Logf("connection log %d: expected WorkspaceOwnerID %s, got %s", idx+1, expected.WorkspaceOwnerID, cl.WorkspaceOwnerID) + continue + } + if expected.WorkspaceID != uuid.Nil && cl.WorkspaceID != expected.WorkspaceID { + t.Logf("connection log %d: expected WorkspaceID %s, got %s", idx+1, expected.WorkspaceID, cl.WorkspaceID) + continue + } + if expected.WorkspaceName != "" && cl.WorkspaceName != expected.WorkspaceName { + t.Logf("connection log %d: expected WorkspaceName %s, got %s", idx+1, expected.WorkspaceName, cl.WorkspaceName) + continue + } + if expected.AgentName != "" && cl.AgentName != expected.AgentName { + t.Logf("connection log %d: expected AgentName %s, got %s", idx+1, expected.AgentName, cl.AgentName) + continue + } + if expected.Type != "" && cl.Type != expected.Type { + t.Logf("connection log %d: expected Type %s, got %s", idx+1, expected.Type, cl.Type) + continue + } + if expected.Code.Valid && cl.Code.Int32 != expected.Code.Int32 { + t.Logf("connection log %d: expected Code %d, got %d", idx+1, expected.Code.Int32, cl.Code.Int32) + continue + } + if expected.Ip.Valid && cl.Ip.IPNet.String() != expected.Ip.IPNet.String() { + t.Logf("connection log %d: expected IP %s, got %s", idx+1, expected.Ip.IPNet, cl.Ip.IPNet) + continue + } + if expected.UserAgent.Valid && cl.UserAgent.String != expected.UserAgent.String { + t.Logf("connection log %d: expected UserAgent %s, got %s", idx+1, expected.UserAgent.String, cl.UserAgent.String) + continue + } + if expected.UserID.Valid && cl.UserID.UUID != expected.UserID.UUID { + t.Logf("connection log %d: expected UserID %s, got %s", idx+1, expected.UserID.UUID, cl.UserID.UUID) + continue + } + if expected.SlugOrPort.Valid && cl.SlugOrPort.String != expected.SlugOrPort.String { + t.Logf("connection log %d: expected SlugOrPort %s, got %s", idx+1, expected.SlugOrPort.String, cl.SlugOrPort.String) + continue + } + if expected.ConnectionID.Valid && cl.ConnectionID.UUID != expected.ConnectionID.UUID { + t.Logf("connection log %d: expected ConnectionID %s, got %s", idx+1, expected.ConnectionID.UUID, cl.ConnectionID.UUID) + continue + } + return true + } + + return false +} diff --git a/coderd/database/db2sdk/db2sdk.go b/coderd/database/db2sdk/db2sdk.go index 5e9be4d61a57c..320a90b09430b 100644 --- a/coderd/database/db2sdk/db2sdk.go +++ b/coderd/database/db2sdk/db2sdk.go @@ -781,26 +781,31 @@ func TemplateRoleActions(role codersdk.TemplateRole) []policy.Action { return []policy.Action{} } -func AuditActionFromAgentProtoConnectionAction(action agentproto.Connection_Action) (database.AuditAction, error) { - switch action { - case agentproto.Connection_CONNECT: - return database.AuditActionConnect, nil - case agentproto.Connection_DISCONNECT: - return database.AuditActionDisconnect, nil +func ConnectionLogConnectionTypeFromAgentProtoConnectionType(typ agentproto.Connection_Type) (database.ConnectionType, error) { + switch typ { + case agentproto.Connection_SSH: + return database.ConnectionTypeSsh, nil + case agentproto.Connection_JETBRAINS: + return database.ConnectionTypeJetbrains, nil + case agentproto.Connection_VSCODE: + return database.ConnectionTypeVscode, nil + case agentproto.Connection_RECONNECTING_PTY: + return database.ConnectionTypeReconnectingPty, nil default: - // Also Connection_ACTION_UNSPECIFIED, no mapping. - return "", xerrors.Errorf("unknown agent connection action %q", action) + // Also Connection_TYPE_UNSPECIFIED, no mapping. + return "", xerrors.Errorf("unknown agent connection type %q", typ) } } -func AgentProtoConnectionActionToAuditAction(action database.AuditAction) (agentproto.Connection_Action, error) { +func ConnectionLogStatusFromAgentProtoConnectionAction(action agentproto.Connection_Action) (database.ConnectionStatus, error) { switch action { - case database.AuditActionConnect: - return agentproto.Connection_CONNECT, nil - case database.AuditActionDisconnect: - return agentproto.Connection_DISCONNECT, nil + case agentproto.Connection_CONNECT: + return database.ConnectionStatusConnected, nil + case agentproto.Connection_DISCONNECT: + return database.ConnectionStatusDisconnected, nil default: - return agentproto.Connection_ACTION_UNSPECIFIED, xerrors.Errorf("unknown agent connection action %q", action) + // Also Connection_ACTION_UNSPECIFIED, no mapping. + return "", xerrors.Errorf("unknown agent connection action %q", action) } } diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 55665b4381862..9af6e50764dfd 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -306,6 +306,24 @@ var ( Scope: rbac.ScopeAll, }.WithCachedASTValue() + subjectConnectionLogger = rbac.Subject{ + Type: rbac.SubjectTypeConnectionLogger, + FriendlyName: "Connection Logger", + ID: uuid.Nil.String(), + Roles: rbac.Roles([]rbac.Role{ + { + Identifier: rbac.RoleIdentifier{Name: "connectionlogger"}, + DisplayName: "Connection Logger", + Site: rbac.Permissions(map[string][]policy.Action{ + rbac.ResourceConnectionLog.Type: {policy.ActionUpdate, policy.ActionRead}, + }), + Org: map[string][]rbac.Permission{}, + User: []rbac.Permission{}, + }, + }), + Scope: rbac.ScopeAll, + }.WithCachedASTValue() + subjectNotifier = rbac.Subject{ Type: rbac.SubjectTypeNotifier, FriendlyName: "Notifier", @@ -521,6 +539,10 @@ func AsKeyReader(ctx context.Context) context.Context { return As(ctx, subjectCryptoKeyReader) } +func AsConnectionLogger(ctx context.Context) context.Context { + return As(ctx, subjectConnectionLogger) +} + // AsNotifier returns a context with an actor that has permissions required for // creating/reading/updating/deleting notifications. func AsNotifier(ctx context.Context) context.Context { @@ -1331,15 +1353,26 @@ func (q *querier) CountAuditLogs(ctx context.Context, arg database.CountAuditLog if err == nil { return q.db.CountAuditLogs(ctx, arg) } - prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceAuditLog.Type) if err != nil { return 0, xerrors.Errorf("(dev error) prepare sql filter: %w", err) } - return q.db.CountAuthorizedAuditLogs(ctx, arg, prep) } +func (q *querier) CountConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams) (int64, error) { + // Just like the actual query, shortcut if the user is an owner. + err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog) + if err == nil { + return q.db.CountConnectionLogs(ctx, arg) + } + prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceConnectionLog.Type) + if err != nil { + return 0, xerrors.Errorf("(dev error) prepare sql filter: %w", err) + } + return q.db.CountAuthorizedConnectionLogs(ctx, arg, prep) +} + func (q *querier) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) { if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace.All()); err != nil { return nil, err @@ -1519,6 +1552,16 @@ func (q *querier) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Contex return q.db.DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx, arg) } +func (q *querier) DeleteOldAuditLogConnectionEvents(ctx context.Context, threshold database.DeleteOldAuditLogConnectionEventsParams) error { + // `ResourceSystem` is deprecated, but it doesn't make sense to add + // `policy.ActionDelete` to `ResourceAuditLog`, since this is the one and + // only time we'll be deleting from the audit log. + if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil { + return err + } + return q.db.DeleteOldAuditLogConnectionEvents(ctx, threshold) +} + func (q *querier) DeleteOldNotificationMessages(ctx context.Context) error { if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceNotificationMessage); err != nil { return err @@ -1856,6 +1899,21 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI return q.db.GetAuthorizationUserRoles(ctx, userID) } +func (q *querier) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) { + // Just like with the audit logs query, shortcut if the user is an owner. + err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog) + if err == nil { + return q.db.GetConnectionLogsOffset(ctx, arg) + } + + prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceConnectionLog.Type) + if err != nil { + return nil, xerrors.Errorf("(dev error) prepare sql filter: %w", err) + } + + return q.db.GetAuthorizedConnectionLogsOffset(ctx, arg, prep) +} + func (q *querier) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) { if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil { return "", err @@ -5099,6 +5157,13 @@ func (q *querier) UpsertApplicationName(ctx context.Context, value string) error return q.db.UpsertApplicationName(ctx, value) } +func (q *querier) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) { + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceConnectionLog); err != nil { + return database.ConnectionLog{}, err + } + return q.db.UpsertConnectionLog(ctx, arg) +} + func (q *querier) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error { if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil { return err @@ -5344,3 +5409,11 @@ func (q *querier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg database func (q *querier) CountAuthorizedAuditLogs(ctx context.Context, arg database.CountAuditLogsParams, _ rbac.PreparedAuthorized) (int64, error) { return q.CountAuditLogs(ctx, arg) } + +func (q *querier) GetAuthorizedConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams, _ rbac.PreparedAuthorized) ([]database.GetConnectionLogsOffsetRow, error) { + return q.GetConnectionLogsOffset(ctx, arg) +} + +func (q *querier) CountAuthorizedConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams, _ rbac.PreparedAuthorized) (int64, error) { + return q.CountConnectionLogs(ctx, arg) +} diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index fba199b637c06..c153974394650 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -337,6 +337,115 @@ func (s *MethodTestSuite) TestAuditLogs() { _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) check.Args(database.CountAuditLogsParams{}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead) })) + s.Run("DeleteOldAuditLogConnectionEvents", s.Subtest(func(db database.Store, check *expects) { + _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) + check.Args(database.DeleteOldAuditLogConnectionEventsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete) + })) +} + +func (s *MethodTestSuite) TestConnectionLogs() { + createWorkspace := func(t *testing.T, db database.Store) database.WorkspaceTable { + u := dbgen.User(s.T(), db, database.User{}) + o := dbgen.Organization(s.T(), db, database.Organization{}) + tpl := dbgen.Template(s.T(), db, database.Template{ + OrganizationID: o.ID, + CreatedBy: u.ID, + }) + return dbgen.Workspace(s.T(), db, database.WorkspaceTable{ + ID: uuid.New(), + OwnerID: u.ID, + OrganizationID: o.ID, + AutomaticUpdates: database.AutomaticUpdatesNever, + TemplateID: tpl.ID, + }) + } + s.Run("UpsertConnectionLog", s.Subtest(func(db database.Store, check *expects) { + ws := createWorkspace(s.T(), db) + check.Args(database.UpsertConnectionLogParams{ + Ip: defaultIPAddress(), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + ConnectionStatus: database.ConnectionStatusConnected, + WorkspaceOwnerID: ws.OwnerID, + }).Asserts(rbac.ResourceConnectionLog, policy.ActionUpdate) + })) + s.Run("GetConnectionLogsOffset", s.Subtest(func(db database.Store, check *expects) { + ws := createWorkspace(s.T(), db) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Ip: defaultIPAddress(), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Ip: defaultIPAddress(), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + check.Args(database.GetConnectionLogsOffsetParams{ + LimitOpt: 10, + }).Asserts(rbac.ResourceConnectionLog, policy.ActionRead).WithNotAuthorized("nil") + })) + s.Run("GetAuthorizedConnectionLogsOffset", s.Subtest(func(db database.Store, check *expects) { + ws := createWorkspace(s.T(), db) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Ip: defaultIPAddress(), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Ip: defaultIPAddress(), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + check.Args(database.GetConnectionLogsOffsetParams{ + LimitOpt: 10, + }, emptyPreparedAuthorized{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead) + })) + s.Run("CountConnectionLogs", s.Subtest(func(db database.Store, check *expects) { + ws := createWorkspace(s.T(), db) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + check.Args(database.CountConnectionLogsParams{}).Asserts( + rbac.ResourceConnectionLog, policy.ActionRead, + ).WithNotAuthorized("nil") + })) + s.Run("CountAuthorizedConnectionLogs", s.Subtest(func(db database.Store, check *expects) { + ws := createWorkspace(s.T(), db) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + _ = dbgen.ConnectionLog(s.T(), db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + check.Args(database.CountConnectionLogsParams{}, emptyPreparedAuthorized{}).Asserts( + rbac.ResourceConnectionLog, policy.ActionRead, + ) + })) } func (s *MethodTestSuite) TestFile() { diff --git a/coderd/database/dbauthz/setup_test.go b/coderd/database/dbauthz/setup_test.go index 23effafc632e0..d4dacb78a4d50 100644 --- a/coderd/database/dbauthz/setup_test.go +++ b/coderd/database/dbauthz/setup_test.go @@ -318,7 +318,7 @@ func hasEmptyResponse(values []reflect.Value) bool { } } - // Special case for int64, as it's the return type for count query. + // Special case for int64, as it's the return type for count queries. if r.Kind() == reflect.Int64 { if r.Int() == 0 { return true diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go index fda7c6325899f..d5693afe98826 100644 --- a/coderd/database/dbgen/dbgen.go +++ b/coderd/database/dbgen/dbgen.go @@ -65,7 +65,7 @@ func AuditLog(t testing.TB, db database.Store, seed database.AuditLog) database. Action: takeFirst(seed.Action, database.AuditActionCreate), Diff: takeFirstSlice(seed.Diff, []byte("{}")), StatusCode: takeFirst(seed.StatusCode, 200), - AdditionalFields: takeFirstSlice(seed.Diff, []byte("{}")), + AdditionalFields: takeFirstSlice(seed.AdditionalFields, []byte("{}")), RequestID: takeFirst(seed.RequestID, uuid.New()), ResourceIcon: takeFirst(seed.ResourceIcon, ""), }) @@ -73,6 +73,53 @@ func AuditLog(t testing.TB, db database.Store, seed database.AuditLog) database. return log } +func ConnectionLog(t testing.TB, db database.Store, seed database.UpsertConnectionLogParams) database.ConnectionLog { + log, err := db.UpsertConnectionLog(genCtx, database.UpsertConnectionLogParams{ + ID: takeFirst(seed.ID, uuid.New()), + Time: takeFirst(seed.Time, dbtime.Now()), + OrganizationID: takeFirst(seed.OrganizationID, uuid.New()), + WorkspaceOwnerID: takeFirst(seed.WorkspaceOwnerID, uuid.New()), + WorkspaceID: takeFirst(seed.WorkspaceID, uuid.New()), + WorkspaceName: takeFirst(seed.WorkspaceName, testutil.GetRandomName(t)), + AgentName: takeFirst(seed.AgentName, testutil.GetRandomName(t)), + Type: takeFirst(seed.Type, database.ConnectionTypeSsh), + Code: sql.NullInt32{ + Int32: takeFirst(seed.Code.Int32, 0), + Valid: takeFirst(seed.Code.Valid, false), + }, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + Valid: true, + }, + UserAgent: sql.NullString{ + String: takeFirst(seed.UserAgent.String, ""), + Valid: takeFirst(seed.UserAgent.Valid, false), + }, + UserID: uuid.NullUUID{ + UUID: takeFirst(seed.UserID.UUID, uuid.Nil), + Valid: takeFirst(seed.UserID.Valid, false), + }, + SlugOrPort: sql.NullString{ + String: takeFirst(seed.SlugOrPort.String, ""), + Valid: takeFirst(seed.SlugOrPort.Valid, false), + }, + ConnectionID: uuid.NullUUID{ + UUID: takeFirst(seed.ConnectionID.UUID, uuid.Nil), + Valid: takeFirst(seed.ConnectionID.Valid, false), + }, + DisconnectReason: sql.NullString{ + String: takeFirst(seed.DisconnectReason.String, ""), + Valid: takeFirst(seed.DisconnectReason.Valid, false), + }, + ConnectionStatus: takeFirst(seed.ConnectionStatus, database.ConnectionStatusConnected), + }) + require.NoError(t, err, "insert connection log") + return log +} + func Template(t testing.TB, db database.Store, seed database.Template) database.Template { id := takeFirst(seed.ID, uuid.New()) if seed.GroupACL == nil { diff --git a/coderd/database/dbmetrics/querymetrics.go b/coderd/database/dbmetrics/querymetrics.go index b8ae92cd9f270..7a7c3cb2d41c6 100644 --- a/coderd/database/dbmetrics/querymetrics.go +++ b/coderd/database/dbmetrics/querymetrics.go @@ -194,6 +194,13 @@ func (m queryMetricsStore) CountAuditLogs(ctx context.Context, arg database.Coun return r0, r1 } +func (m queryMetricsStore) CountConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams) (int64, error) { + start := time.Now() + r0, r1 := m.s.CountConnectionLogs(ctx, arg) + m.queryLatencies.WithLabelValues("CountConnectionLogs").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m queryMetricsStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) { start := time.Now() r0, r1 := m.s.CountInProgressPrebuilds(ctx) @@ -355,6 +362,13 @@ func (m queryMetricsStore) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx conte return r0 } +func (m queryMetricsStore) DeleteOldAuditLogConnectionEvents(ctx context.Context, threshold database.DeleteOldAuditLogConnectionEventsParams) error { + start := time.Now() + r0 := m.s.DeleteOldAuditLogConnectionEvents(ctx, threshold) + m.queryLatencies.WithLabelValues("DeleteOldAuditLogConnectionEvents").Observe(time.Since(start).Seconds()) + return r0 +} + func (m queryMetricsStore) DeleteOldNotificationMessages(ctx context.Context) error { start := time.Now() r0 := m.s.DeleteOldNotificationMessages(ctx) @@ -656,6 +670,13 @@ func (m queryMetricsStore) GetAuthorizationUserRoles(ctx context.Context, userID return row, err } +func (m queryMetricsStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) { + start := time.Now() + r0, r1 := m.s.GetConnectionLogsOffset(ctx, arg) + m.queryLatencies.WithLabelValues("GetConnectionLogsOffset").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m queryMetricsStore) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) { start := time.Now() r0, r1 := m.s.GetCoordinatorResumeTokenSigningKey(ctx) @@ -3162,6 +3183,13 @@ func (m queryMetricsStore) UpsertApplicationName(ctx context.Context, value stri return r0 } +func (m queryMetricsStore) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) { + start := time.Now() + r0, r1 := m.s.UpsertConnectionLog(ctx, arg) + m.queryLatencies.WithLabelValues("UpsertConnectionLog").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m queryMetricsStore) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error { start := time.Now() r0 := m.s.UpsertCoordinatorResumeTokenSigningKey(ctx, value) @@ -3392,3 +3420,17 @@ func (m queryMetricsStore) CountAuthorizedAuditLogs(ctx context.Context, arg dat m.queryLatencies.WithLabelValues("CountAuthorizedAuditLogs").Observe(time.Since(start).Seconds()) return r0, r1 } + +func (m queryMetricsStore) GetAuthorizedConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]database.GetConnectionLogsOffsetRow, error) { + start := time.Now() + r0, r1 := m.s.GetAuthorizedConnectionLogsOffset(ctx, arg, prepared) + m.queryLatencies.WithLabelValues("GetAuthorizedConnectionLogsOffset").Observe(time.Since(start).Seconds()) + return r0, r1 +} + +func (m queryMetricsStore) CountAuthorizedConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams, prepared rbac.PreparedAuthorized) (int64, error) { + start := time.Now() + r0, r1 := m.s.CountAuthorizedConnectionLogs(ctx, arg, prepared) + m.queryLatencies.WithLabelValues("CountAuthorizedConnectionLogs").Observe(time.Since(start).Seconds()) + return r0, r1 +} diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index ec9ca45b195e7..fba3deb45e4be 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -278,6 +278,36 @@ func (mr *MockStoreMockRecorder) CountAuthorizedAuditLogs(ctx, arg, prepared any return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAuthorizedAuditLogs", reflect.TypeOf((*MockStore)(nil).CountAuthorizedAuditLogs), ctx, arg, prepared) } +// CountAuthorizedConnectionLogs mocks base method. +func (m *MockStore) CountAuthorizedConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams, prepared rbac.PreparedAuthorized) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountAuthorizedConnectionLogs", ctx, arg, prepared) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountAuthorizedConnectionLogs indicates an expected call of CountAuthorizedConnectionLogs. +func (mr *MockStoreMockRecorder) CountAuthorizedConnectionLogs(ctx, arg, prepared any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAuthorizedConnectionLogs", reflect.TypeOf((*MockStore)(nil).CountAuthorizedConnectionLogs), ctx, arg, prepared) +} + +// CountConnectionLogs mocks base method. +func (m *MockStore) CountConnectionLogs(ctx context.Context, arg database.CountConnectionLogsParams) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountConnectionLogs", ctx, arg) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountConnectionLogs indicates an expected call of CountConnectionLogs. +func (mr *MockStoreMockRecorder) CountConnectionLogs(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountConnectionLogs", reflect.TypeOf((*MockStore)(nil).CountConnectionLogs), ctx, arg) +} + // CountInProgressPrebuilds mocks base method. func (m *MockStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) { m.ctrl.T.Helper() @@ -605,6 +635,20 @@ func (mr *MockStoreMockRecorder) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOAuth2ProviderAppTokensByAppAndUserID", reflect.TypeOf((*MockStore)(nil).DeleteOAuth2ProviderAppTokensByAppAndUserID), ctx, arg) } +// DeleteOldAuditLogConnectionEvents mocks base method. +func (m *MockStore) DeleteOldAuditLogConnectionEvents(ctx context.Context, arg database.DeleteOldAuditLogConnectionEventsParams) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOldAuditLogConnectionEvents", ctx, arg) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOldAuditLogConnectionEvents indicates an expected call of DeleteOldAuditLogConnectionEvents. +func (mr *MockStoreMockRecorder) DeleteOldAuditLogConnectionEvents(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldAuditLogConnectionEvents", reflect.TypeOf((*MockStore)(nil).DeleteOldAuditLogConnectionEvents), ctx, arg) +} + // DeleteOldNotificationMessages mocks base method. func (m *MockStore) DeleteOldNotificationMessages(ctx context.Context) error { m.ctrl.T.Helper() @@ -1248,6 +1292,21 @@ func (mr *MockStoreMockRecorder) GetAuthorizedAuditLogsOffset(ctx, arg, prepared return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedAuditLogsOffset", reflect.TypeOf((*MockStore)(nil).GetAuthorizedAuditLogsOffset), ctx, arg, prepared) } +// GetAuthorizedConnectionLogsOffset mocks base method. +func (m *MockStore) GetAuthorizedConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]database.GetConnectionLogsOffsetRow, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAuthorizedConnectionLogsOffset", ctx, arg, prepared) + ret0, _ := ret[0].([]database.GetConnectionLogsOffsetRow) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAuthorizedConnectionLogsOffset indicates an expected call of GetAuthorizedConnectionLogsOffset. +func (mr *MockStoreMockRecorder) GetAuthorizedConnectionLogsOffset(ctx, arg, prepared any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedConnectionLogsOffset", reflect.TypeOf((*MockStore)(nil).GetAuthorizedConnectionLogsOffset), ctx, arg, prepared) +} + // GetAuthorizedTemplates mocks base method. func (m *MockStore) GetAuthorizedTemplates(ctx context.Context, arg database.GetTemplatesWithFilterParams, prepared rbac.PreparedAuthorized) ([]database.Template, error) { m.ctrl.T.Helper() @@ -1323,6 +1382,21 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspacesAndAgentsByOwnerID(ctx, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspacesAndAgentsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspacesAndAgentsByOwnerID), ctx, ownerID, prepared) } +// GetConnectionLogsOffset mocks base method. +func (m *MockStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConnectionLogsOffset", ctx, arg) + ret0, _ := ret[0].([]database.GetConnectionLogsOffsetRow) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetConnectionLogsOffset indicates an expected call of GetConnectionLogsOffset. +func (mr *MockStoreMockRecorder) GetConnectionLogsOffset(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionLogsOffset", reflect.TypeOf((*MockStore)(nil).GetConnectionLogsOffset), ctx, arg) +} + // GetCoordinatorResumeTokenSigningKey mocks base method. func (m *MockStore) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) { m.ctrl.T.Helper() @@ -6698,6 +6772,21 @@ func (mr *MockStoreMockRecorder) UpsertApplicationName(ctx, value any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertApplicationName", reflect.TypeOf((*MockStore)(nil).UpsertApplicationName), ctx, value) } +// UpsertConnectionLog mocks base method. +func (m *MockStore) UpsertConnectionLog(ctx context.Context, arg database.UpsertConnectionLogParams) (database.ConnectionLog, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertConnectionLog", ctx, arg) + ret0, _ := ret[0].(database.ConnectionLog) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpsertConnectionLog indicates an expected call of UpsertConnectionLog. +func (mr *MockStoreMockRecorder) UpsertConnectionLog(ctx, arg any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertConnectionLog", reflect.TypeOf((*MockStore)(nil).UpsertConnectionLog), ctx, arg) +} + // UpsertCoordinatorResumeTokenSigningKey mocks base method. func (m *MockStore) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error { m.ctrl.T.Helper() diff --git a/coderd/database/dbpurge/dbpurge.go b/coderd/database/dbpurge/dbpurge.go index b7a308cfd6a06..135d7f40b05dd 100644 --- a/coderd/database/dbpurge/dbpurge.go +++ b/coderd/database/dbpurge/dbpurge.go @@ -18,6 +18,11 @@ import ( const ( delay = 10 * time.Minute maxAgentLogAge = 7 * 24 * time.Hour + // Connection events are now inserted into the `connection_logs` table. + // We'll slowly remove old connection events from the `audit_logs` table, + // but we won't touch the `connection_logs` table. + maxAuditLogConnectionEventAge = 90 * 24 * time.Hour // 90 days + auditLogConnectionEventBatchSize = 1000 ) // New creates a new periodically purging database instance. @@ -63,6 +68,14 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, clk quartz. return xerrors.Errorf("failed to delete old notification messages: %w", err) } + deleteOldAuditLogConnectionEventsBefore := start.Add(-maxAuditLogConnectionEventAge) + if err := tx.DeleteOldAuditLogConnectionEvents(ctx, database.DeleteOldAuditLogConnectionEventsParams{ + BeforeTime: deleteOldAuditLogConnectionEventsBefore, + LimitCount: auditLogConnectionEventBatchSize, + }); err != nil { + return xerrors.Errorf("failed to delete old audit log connection events: %w", err) + } + logger.Debug(ctx, "purged old database entries", slog.F("duration", clk.Since(start))) return nil diff --git a/coderd/database/dbpurge/dbpurge_test.go b/coderd/database/dbpurge/dbpurge_test.go index 4e81868ac73fb..1d57a87e68f48 100644 --- a/coderd/database/dbpurge/dbpurge_test.go +++ b/coderd/database/dbpurge/dbpurge_test.go @@ -490,3 +490,148 @@ func containsProvisionerDaemon(daemons []database.ProvisionerDaemon, name string return d.Name == name }) } + +//nolint:paralleltest // It uses LockIDDBPurge. +func TestDeleteOldAuditLogConnectionEvents(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) + defer cancel() + + clk := quartz.NewMock(t) + now := dbtime.Now() + afterThreshold := now.Add(-91 * 24 * time.Hour) // 91 days ago (older than 90 day threshold) + beforeThreshold := now.Add(-30 * 24 * time.Hour) // 30 days ago (newer than 90 day threshold) + closeBeforeThreshold := now.Add(-89 * 24 * time.Hour) // 89 days ago + clk.Set(now).MustWait(ctx) + + db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) + logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}) + user := dbgen.User(t, db, database.User{}) + org := dbgen.Organization(t, db, database.Organization{}) + + oldConnectLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: afterThreshold, + Action: database.AuditActionConnect, + ResourceType: database.ResourceTypeWorkspace, + }) + + oldDisconnectLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: afterThreshold, + Action: database.AuditActionDisconnect, + ResourceType: database.ResourceTypeWorkspace, + }) + + oldOpenLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: afterThreshold, + Action: database.AuditActionOpen, + ResourceType: database.ResourceTypeWorkspace, + }) + + oldCloseLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: afterThreshold, + Action: database.AuditActionClose, + ResourceType: database.ResourceTypeWorkspace, + }) + + recentConnectLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: beforeThreshold, + Action: database.AuditActionConnect, + ResourceType: database.ResourceTypeWorkspace, + }) + + oldNonConnectionLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: afterThreshold, + Action: database.AuditActionCreate, + ResourceType: database.ResourceTypeWorkspace, + }) + + nearThresholdConnectLog := dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: closeBeforeThreshold, + Action: database.AuditActionConnect, + ResourceType: database.ResourceTypeWorkspace, + }) + + // Run the purge + done := awaitDoTick(ctx, t, clk) + closer := dbpurge.New(ctx, logger, db, clk) + defer closer.Close() + // Wait for tick + testutil.TryReceive(ctx, t, done) + + // Verify results by querying all audit logs + logs, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{}) + require.NoError(t, err) + + // Extract log IDs for comparison + logIDs := make([]uuid.UUID, len(logs)) + for i, log := range logs { + logIDs[i] = log.AuditLog.ID + } + + require.NotContains(t, logIDs, oldConnectLog.ID, "old connect log should be deleted") + require.NotContains(t, logIDs, oldDisconnectLog.ID, "old disconnect log should be deleted") + require.NotContains(t, logIDs, oldOpenLog.ID, "old open log should be deleted") + require.NotContains(t, logIDs, oldCloseLog.ID, "old close log should be deleted") + require.Contains(t, logIDs, recentConnectLog.ID, "recent connect log should be kept") + require.Contains(t, logIDs, nearThresholdConnectLog.ID, "near threshold connect log should be kept") + require.Contains(t, logIDs, oldNonConnectionLog.ID, "old non-connection log should be kept") +} + +func TestDeleteOldAuditLogConnectionEventsLimit(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) + defer cancel() + + db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure()) + user := dbgen.User(t, db, database.User{}) + org := dbgen.Organization(t, db, database.Organization{}) + + now := dbtime.Now() + threshold := now.Add(-90 * 24 * time.Hour) + + for i := 0; i < 5; i++ { + dbgen.AuditLog(t, db, database.AuditLog{ + UserID: user.ID, + OrganizationID: org.ID, + Time: threshold.Add(-time.Duration(i+1) * time.Hour), + Action: database.AuditActionConnect, + ResourceType: database.ResourceTypeWorkspace, + }) + } + + err := db.DeleteOldAuditLogConnectionEvents(ctx, database.DeleteOldAuditLogConnectionEventsParams{ + BeforeTime: threshold, + LimitCount: 1, + }) + require.NoError(t, err) + + logs, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{}) + require.NoError(t, err) + + require.Len(t, logs, 4) + + err = db.DeleteOldAuditLogConnectionEvents(ctx, database.DeleteOldAuditLogConnectionEventsParams{ + BeforeTime: threshold, + LimitCount: 100, + }) + require.NoError(t, err) + + logs, err = db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{}) + require.NoError(t, err) + + require.Len(t, logs, 0) +} diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index 54f984294fa4e..26818fbf6c99d 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -38,6 +38,8 @@ CREATE TYPE audit_action AS ENUM ( 'close' ); +COMMENT ON TYPE audit_action IS 'NOTE: `connect`, `disconnect`, `open`, and `close` are deprecated and no longer used - these events are now tracked in the connection_logs table.'; + CREATE TYPE automatic_updates AS ENUM ( 'always', 'never' @@ -52,6 +54,20 @@ CREATE TYPE build_reason AS ENUM ( 'autodelete' ); +CREATE TYPE connection_status AS ENUM ( + 'connected', + 'disconnected' +); + +CREATE TYPE connection_type AS ENUM ( + 'ssh', + 'vscode', + 'jetbrains', + 'reconnecting_pty', + 'workspace_app', + 'port_forwarding' +); + CREATE TYPE crypto_key_feature AS ENUM ( 'workspace_apps_token', 'workspace_apps_api_key', @@ -823,6 +839,39 @@ CREATE TABLE audit_logs ( resource_icon text NOT NULL ); +CREATE TABLE connection_logs ( + id uuid NOT NULL, + connect_time timestamp with time zone NOT NULL, + organization_id uuid NOT NULL, + workspace_owner_id uuid NOT NULL, + workspace_id uuid NOT NULL, + workspace_name text NOT NULL, + agent_name text NOT NULL, + type connection_type NOT NULL, + ip inet NOT NULL, + code integer, + user_agent text, + user_id uuid, + slug_or_port text, + connection_id uuid, + disconnect_time timestamp with time zone, + disconnect_reason text +); + +COMMENT ON COLUMN connection_logs.code IS 'Either the HTTP status code of the web request, or the exit code of an SSH connection. For non-web connections, this is Null until we receive a disconnect event for the same connection_id.'; + +COMMENT ON COLUMN connection_logs.user_agent IS 'Null for SSH events. For web connections, this is the User-Agent header from the request.'; + +COMMENT ON COLUMN connection_logs.user_id IS 'Null for SSH events. For web connections, this is the ID of the user that made the request.'; + +COMMENT ON COLUMN connection_logs.slug_or_port IS 'Null for SSH events. For web connections, this is the slug of the app or the port number being forwarded.'; + +COMMENT ON COLUMN connection_logs.connection_id IS 'The SSH connection ID. Used to correlate connections and disconnections. As it originates from the agent, it is not guaranteed to be unique.'; + +COMMENT ON COLUMN connection_logs.disconnect_time IS 'The time the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id.'; + +COMMENT ON COLUMN connection_logs.disconnect_reason IS 'The reason the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id.'; + CREATE TABLE crypto_keys ( feature crypto_key_feature NOT NULL, sequence integer NOT NULL, @@ -2413,6 +2462,9 @@ ALTER TABLE ONLY api_keys ALTER TABLE ONLY audit_logs ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id); +ALTER TABLE ONLY connection_logs + ADD CONSTRAINT connection_logs_pkey PRIMARY KEY (id); + ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_pkey PRIMARY KEY (feature, sequence); @@ -2699,6 +2751,18 @@ CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id); CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC); +CREATE INDEX idx_connection_logs_connect_time_desc ON connection_logs USING btree (connect_time DESC); + +CREATE UNIQUE INDEX idx_connection_logs_connection_id_workspace_id_agent_name ON connection_logs USING btree (connection_id, workspace_id, agent_name); + +COMMENT ON INDEX idx_connection_logs_connection_id_workspace_id_agent_name IS 'Connection ID is NULL for web events, but present for SSH events. Therefore, this index allows multiple web events for the same workspace & agent. For SSH events, the upsertion query handles duplicates on this index by upserting the disconnect_time and disconnect_reason for the same connection_id when the connection is closed.'; + +CREATE INDEX idx_connection_logs_organization_id ON connection_logs USING btree (organization_id); + +CREATE INDEX idx_connection_logs_workspace_id ON connection_logs USING btree (workspace_id); + +CREATE INDEX idx_connection_logs_workspace_owner_id ON connection_logs USING btree (workspace_owner_id); + CREATE INDEX idx_custom_roles_id ON custom_roles USING btree (id); CREATE UNIQUE INDEX idx_custom_roles_name_lower ON custom_roles USING btree (lower(name)); @@ -2906,6 +2970,15 @@ forward without requiring a migration to clean up historical data.'; ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; +ALTER TABLE ONLY connection_logs + ADD CONSTRAINT connection_logs_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE; + +ALTER TABLE ONLY connection_logs + ADD CONSTRAINT connection_logs_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE; + +ALTER TABLE ONLY connection_logs + ADD CONSTRAINT connection_logs_workspace_owner_id_fkey FOREIGN KEY (workspace_owner_id) REFERENCES users(id) ON DELETE CASCADE; + ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_secret_key_id_fkey FOREIGN KEY (secret_key_id) REFERENCES dbcrypt_keys(active_key_digest); diff --git a/coderd/database/foreign_key_constraint.go b/coderd/database/foreign_key_constraint.go index b3b2d631aaa4d..c3aaf7342a97c 100644 --- a/coderd/database/foreign_key_constraint.go +++ b/coderd/database/foreign_key_constraint.go @@ -7,6 +7,9 @@ type ForeignKeyConstraint string // ForeignKeyConstraint enums. const ( ForeignKeyAPIKeysUserIDUUID ForeignKeyConstraint = "api_keys_user_id_uuid_fkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; + ForeignKeyConnectionLogsOrganizationID ForeignKeyConstraint = "connection_logs_organization_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE; + ForeignKeyConnectionLogsWorkspaceID ForeignKeyConstraint = "connection_logs_workspace_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE; + ForeignKeyConnectionLogsWorkspaceOwnerID ForeignKeyConstraint = "connection_logs_workspace_owner_id_fkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_workspace_owner_id_fkey FOREIGN KEY (workspace_owner_id) REFERENCES users(id) ON DELETE CASCADE; ForeignKeyCryptoKeysSecretKeyID ForeignKeyConstraint = "crypto_keys_secret_key_id_fkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_secret_key_id_fkey FOREIGN KEY (secret_key_id) REFERENCES dbcrypt_keys(active_key_digest); ForeignKeyFkOauth2ProviderAppTokensUserID ForeignKeyConstraint = "fk_oauth2_provider_app_tokens_user_id" // ALTER TABLE ONLY oauth2_provider_app_tokens ADD CONSTRAINT fk_oauth2_provider_app_tokens_user_id FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; ForeignKeyGitAuthLinksOauthAccessTokenKeyID ForeignKeyConstraint = "git_auth_links_oauth_access_token_key_id_fkey" // ALTER TABLE ONLY external_auth_links ADD CONSTRAINT git_auth_links_oauth_access_token_key_id_fkey FOREIGN KEY (oauth_access_token_key_id) REFERENCES dbcrypt_keys(active_key_digest); diff --git a/coderd/database/migrations/000349_connection_logs.down.sql b/coderd/database/migrations/000349_connection_logs.down.sql new file mode 100644 index 0000000000000..1a00797086402 --- /dev/null +++ b/coderd/database/migrations/000349_connection_logs.down.sql @@ -0,0 +1,11 @@ +DROP INDEX IF EXISTS idx_connection_logs_workspace_id; +DROP INDEX IF EXISTS idx_connection_logs_workspace_owner_id; +DROP INDEX IF EXISTS idx_connection_logs_organization_id; +DROP INDEX IF EXISTS idx_connection_logs_connect_time_desc; +DROP INDEX IF EXISTS idx_connection_logs_connection_id_workspace_id_agent_name; + +DROP TABLE IF EXISTS connection_logs; + +DROP TYPE IF EXISTS connection_type; + +DROP TYPE IF EXISTS connection_status; diff --git a/coderd/database/migrations/000349_connection_logs.up.sql b/coderd/database/migrations/000349_connection_logs.up.sql new file mode 100644 index 0000000000000..b9d7f0cdda41c --- /dev/null +++ b/coderd/database/migrations/000349_connection_logs.up.sql @@ -0,0 +1,68 @@ +CREATE TYPE connection_status AS ENUM ( + 'connected', + 'disconnected' +); + +CREATE TYPE connection_type AS ENUM ( + -- SSH events + 'ssh', + 'vscode', + 'jetbrains', + 'reconnecting_pty', + -- Web events + 'workspace_app', + 'port_forwarding' +); + +CREATE TABLE connection_logs ( + id uuid NOT NULL, + connect_time timestamp with time zone NOT NULL, + organization_id uuid NOT NULL REFERENCES organizations (id) ON DELETE CASCADE, + workspace_owner_id uuid NOT NULL REFERENCES users (id) ON DELETE CASCADE, + workspace_id uuid NOT NULL REFERENCES workspaces (id) ON DELETE CASCADE, + workspace_name text NOT NULL, + agent_name text NOT NULL, + type connection_type NOT NULL, + ip inet NOT NULL, + code integer, + + -- Only set for web events + user_agent text, + user_id uuid, + slug_or_port text, + + -- Null for web events + connection_id uuid, + disconnect_time timestamp with time zone, -- Null until we upsert a disconnect log for the same connection_id. + disconnect_reason text, + + PRIMARY KEY (id) +); + + +COMMENT ON COLUMN connection_logs.code IS 'Either the HTTP status code of the web request, or the exit code of an SSH connection. For non-web connections, this is Null until we receive a disconnect event for the same connection_id.'; + +COMMENT ON COLUMN connection_logs.user_agent IS 'Null for SSH events. For web connections, this is the User-Agent header from the request.'; + +COMMENT ON COLUMN connection_logs.user_id IS 'Null for SSH events. For web connections, this is the ID of the user that made the request.'; + +COMMENT ON COLUMN connection_logs.slug_or_port IS 'Null for SSH events. For web connections, this is the slug of the app or the port number being forwarded.'; + +COMMENT ON COLUMN connection_logs.connection_id IS 'The SSH connection ID. Used to correlate connections and disconnections. As it originates from the agent, it is not guaranteed to be unique.'; + +COMMENT ON COLUMN connection_logs.disconnect_time IS 'The time the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id.'; + +COMMENT ON COLUMN connection_logs.disconnect_reason IS 'The reason the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id.'; + +COMMENT ON TYPE audit_action IS 'NOTE: `connect`, `disconnect`, `open`, and `close` are deprecated and no longer used - these events are now tracked in the connection_logs table.'; + +-- To associate connection closure events with the connection start events. +CREATE UNIQUE INDEX idx_connection_logs_connection_id_workspace_id_agent_name +ON connection_logs (connection_id, workspace_id, agent_name); + +COMMENT ON INDEX idx_connection_logs_connection_id_workspace_id_agent_name IS 'Connection ID is NULL for web events, but present for SSH events. Therefore, this index allows multiple web events for the same workspace & agent. For SSH events, the upsertion query handles duplicates on this index by upserting the disconnect_time and disconnect_reason for the same connection_id when the connection is closed.'; + +CREATE INDEX idx_connection_logs_connect_time_desc ON connection_logs USING btree (connect_time DESC); +CREATE INDEX idx_connection_logs_organization_id ON connection_logs USING btree (organization_id); +CREATE INDEX idx_connection_logs_workspace_owner_id ON connection_logs USING btree (workspace_owner_id); +CREATE INDEX idx_connection_logs_workspace_id ON connection_logs USING btree (workspace_id); diff --git a/coderd/database/migrations/testdata/fixtures/000349_connection_logs.up.sql b/coderd/database/migrations/testdata/fixtures/000349_connection_logs.up.sql new file mode 100644 index 0000000000000..bbddf5226bc29 --- /dev/null +++ b/coderd/database/migrations/testdata/fixtures/000349_connection_logs.up.sql @@ -0,0 +1,53 @@ +INSERT INTO connection_logs ( + id, + connect_time, + organization_id, + workspace_owner_id, + workspace_id, + workspace_name, + agent_name, + type, + code, + ip, + user_agent, + user_id, + slug_or_port, + connection_id, + disconnect_time, + disconnect_reason +) VALUES ( + '00000000-0000-0000-0000-000000000001', -- log id + '2023-10-01 12:00:00+00', -- start time + 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', -- organization id + 'a0061a8e-7db7-4585-838c-3116a003dd21', -- workspace owner id + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', -- workspace id + 'Test Workspace', -- workspace name + 'test-agent', -- agent name + 'ssh', -- type + 0, -- code + '127.0.0.1', -- ip + NULL, -- user agent + NULL, -- user id + NULL, -- slug or port + '00000000-0000-0000-0000-000000000003', -- connection id + '2023-10-01 12:00:10+00', -- close time + 'server shut down' -- reason +), +( + '00000000-0000-0000-0000-000000000002', -- log id + '2023-10-01 12:05:00+00', -- start time + 'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1', -- organization id + 'a0061a8e-7db7-4585-838c-3116a003dd21', -- workspace owner id + '3a9a1feb-e89d-457c-9d53-ac751b198ebe', -- workspace id + 'Test Workspace', -- workspace name + 'test-agent', -- agent name + 'workspace_app', -- type + 200, -- code + '127.0.0.1', + 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36', + 'a0061a8e-7db7-4585-838c-3116a003dd21', -- user id + 'code-server', -- slug or port + NULL, -- connection id (request ID) + NULL, -- close time + NULL -- reason +); diff --git a/coderd/database/modelmethods.go b/coderd/database/modelmethods.go index 07e1f2dc32352..b49fa113d4b12 100644 --- a/coderd/database/modelmethods.go +++ b/coderd/database/modelmethods.go @@ -117,6 +117,19 @@ func (w AuditLog) RBACObject() rbac.Object { return obj } +func (w GetConnectionLogsOffsetRow) RBACObject() rbac.Object { + return w.ConnectionLog.RBACObject() +} + +func (w ConnectionLog) RBACObject() rbac.Object { + obj := rbac.ResourceConnectionLog.WithID(w.ID) + if w.OrganizationID != uuid.Nil { + obj = obj.InOrg(w.OrganizationID) + } + + return obj +} + func (s APIKeyScope) ToRBAC() rbac.ScopeName { switch s { case APIKeyScopeAll: diff --git a/coderd/database/modelqueries.go b/coderd/database/modelqueries.go index 785ccf86afd27..6bb7483847a2e 100644 --- a/coderd/database/modelqueries.go +++ b/coderd/database/modelqueries.go @@ -50,6 +50,7 @@ type customQuerier interface { workspaceQuerier userQuerier auditLogQuerier + connectionLogQuerier } type templateQuerier interface { @@ -611,6 +612,142 @@ func (q *sqlQuerier) CountAuthorizedAuditLogs(ctx context.Context, arg CountAudi return count, nil } +type connectionLogQuerier interface { + GetAuthorizedConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]GetConnectionLogsOffsetRow, error) + CountAuthorizedConnectionLogs(ctx context.Context, arg CountConnectionLogsParams, prepared rbac.PreparedAuthorized) (int64, error) +} + +func (q *sqlQuerier) GetAuthorizedConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]GetConnectionLogsOffsetRow, error) { + authorizedFilter, err := prepared.CompileToSQL(ctx, regosql.ConvertConfig{ + VariableConverter: regosql.ConnectionLogConverter(), + }) + if err != nil { + return nil, xerrors.Errorf("compile authorized filter: %w", err) + } + filtered, err := insertAuthorizedFilter(getConnectionLogsOffset, fmt.Sprintf(" AND %s", authorizedFilter)) + if err != nil { + return nil, xerrors.Errorf("insert authorized filter: %w", err) + } + + query := fmt.Sprintf("-- name: GetAuthorizedConnectionLogsOffset :many\n%s", filtered) + rows, err := q.db.QueryContext(ctx, query, + arg.OrganizationID, + arg.WorkspaceOwner, + arg.WorkspaceOwnerID, + arg.WorkspaceOwnerEmail, + arg.Type, + arg.UserID, + arg.Username, + arg.UserEmail, + arg.ConnectedAfter, + arg.ConnectedBefore, + arg.WorkspaceID, + arg.ConnectionID, + arg.Status, + arg.OffsetOpt, + arg.LimitOpt, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetConnectionLogsOffsetRow + for rows.Next() { + var i GetConnectionLogsOffsetRow + if err := rows.Scan( + &i.ConnectionLog.ID, + &i.ConnectionLog.ConnectTime, + &i.ConnectionLog.OrganizationID, + &i.ConnectionLog.WorkspaceOwnerID, + &i.ConnectionLog.WorkspaceID, + &i.ConnectionLog.WorkspaceName, + &i.ConnectionLog.AgentName, + &i.ConnectionLog.Type, + &i.ConnectionLog.Ip, + &i.ConnectionLog.Code, + &i.ConnectionLog.UserAgent, + &i.ConnectionLog.UserID, + &i.ConnectionLog.SlugOrPort, + &i.ConnectionLog.ConnectionID, + &i.ConnectionLog.DisconnectTime, + &i.ConnectionLog.DisconnectReason, + &i.UserUsername, + &i.UserName, + &i.UserEmail, + &i.UserCreatedAt, + &i.UserUpdatedAt, + &i.UserLastSeenAt, + &i.UserStatus, + &i.UserLoginType, + &i.UserRoles, + &i.UserAvatarUrl, + &i.UserDeleted, + &i.UserQuietHoursSchedule, + &i.WorkspaceOwnerUsername, + &i.OrganizationName, + &i.OrganizationDisplayName, + &i.OrganizationIcon, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func (q *sqlQuerier) CountAuthorizedConnectionLogs(ctx context.Context, arg CountConnectionLogsParams, prepared rbac.PreparedAuthorized) (int64, error) { + authorizedFilter, err := prepared.CompileToSQL(ctx, regosql.ConvertConfig{ + VariableConverter: regosql.ConnectionLogConverter(), + }) + if err != nil { + return 0, xerrors.Errorf("compile authorized filter: %w", err) + } + filtered, err := insertAuthorizedFilter(countConnectionLogs, fmt.Sprintf(" AND %s", authorizedFilter)) + if err != nil { + return 0, xerrors.Errorf("insert authorized filter: %w", err) + } + + query := fmt.Sprintf("-- name: CountAuthorizedConnectionLogs :one\n%s", filtered) + rows, err := q.db.QueryContext(ctx, query, + arg.OrganizationID, + arg.WorkspaceOwner, + arg.WorkspaceOwnerID, + arg.WorkspaceOwnerEmail, + arg.Type, + arg.UserID, + arg.Username, + arg.UserEmail, + arg.ConnectedAfter, + arg.ConnectedBefore, + arg.WorkspaceID, + arg.ConnectionID, + arg.Status, + ) + if err != nil { + return 0, err + } + defer rows.Close() + var count int64 + for rows.Next() { + if err := rows.Scan(&count); err != nil { + return 0, err + } + } + if err := rows.Close(); err != nil { + return 0, err + } + if err := rows.Err(); err != nil { + return 0, err + } + return count, nil +} + func insertAuthorizedFilter(query string, replaceWith string) (string, error) { if !strings.Contains(query, authorizedQueryPlaceholder) { return "", xerrors.Errorf("query does not contain authorized replace string, this is not an authorized query") diff --git a/coderd/database/modelqueries_internal_test.go b/coderd/database/modelqueries_internal_test.go index 4f675a1b60785..275ed947a3e4c 100644 --- a/coderd/database/modelqueries_internal_test.go +++ b/coderd/database/modelqueries_internal_test.go @@ -76,6 +76,19 @@ func TestAuditLogsQueryConsistency(t *testing.T) { } } +// Same as TestAuditLogsQueryConsistency, but for connection logs. +func TestConnectionLogsQueryConsistency(t *testing.T) { + t.Parallel() + + getWhereClause := extractWhereClause(getConnectionLogsOffset) + require.NotEmpty(t, getWhereClause, "getConnectionLogsOffset query should have a WHERE clause") + + countWhereClause := extractWhereClause(countConnectionLogs) + require.NotEmpty(t, countWhereClause, "countConnectionLogs query should have a WHERE clause") + + require.Equal(t, getWhereClause, countWhereClause, "getConnectionLogsOffset and countConnectionLogs queries should have the same WHERE clause") +} + // extractWhereClause extracts the WHERE clause from a SQL query string func extractWhereClause(query string) string { // Find WHERE and get everything after it diff --git a/coderd/database/models.go b/coderd/database/models.go index 749de51118152..169f6a60be709 100644 --- a/coderd/database/models.go +++ b/coderd/database/models.go @@ -196,6 +196,7 @@ func AllAppSharingLevelValues() []AppSharingLevel { } } +// NOTE: `connect`, `disconnect`, `open`, and `close` are deprecated and no longer used - these events are now tracked in the connection_logs table. type AuditAction string const ( @@ -415,6 +416,134 @@ func AllBuildReasonValues() []BuildReason { } } +type ConnectionStatus string + +const ( + ConnectionStatusConnected ConnectionStatus = "connected" + ConnectionStatusDisconnected ConnectionStatus = "disconnected" +) + +func (e *ConnectionStatus) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = ConnectionStatus(s) + case string: + *e = ConnectionStatus(s) + default: + return fmt.Errorf("unsupported scan type for ConnectionStatus: %T", src) + } + return nil +} + +type NullConnectionStatus struct { + ConnectionStatus ConnectionStatus `json:"connection_status"` + Valid bool `json:"valid"` // Valid is true if ConnectionStatus is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullConnectionStatus) Scan(value interface{}) error { + if value == nil { + ns.ConnectionStatus, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.ConnectionStatus.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullConnectionStatus) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.ConnectionStatus), nil +} + +func (e ConnectionStatus) Valid() bool { + switch e { + case ConnectionStatusConnected, + ConnectionStatusDisconnected: + return true + } + return false +} + +func AllConnectionStatusValues() []ConnectionStatus { + return []ConnectionStatus{ + ConnectionStatusConnected, + ConnectionStatusDisconnected, + } +} + +type ConnectionType string + +const ( + ConnectionTypeSsh ConnectionType = "ssh" + ConnectionTypeVscode ConnectionType = "vscode" + ConnectionTypeJetbrains ConnectionType = "jetbrains" + ConnectionTypeReconnectingPty ConnectionType = "reconnecting_pty" + ConnectionTypeWorkspaceApp ConnectionType = "workspace_app" + ConnectionTypePortForwarding ConnectionType = "port_forwarding" +) + +func (e *ConnectionType) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = ConnectionType(s) + case string: + *e = ConnectionType(s) + default: + return fmt.Errorf("unsupported scan type for ConnectionType: %T", src) + } + return nil +} + +type NullConnectionType struct { + ConnectionType ConnectionType `json:"connection_type"` + Valid bool `json:"valid"` // Valid is true if ConnectionType is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullConnectionType) Scan(value interface{}) error { + if value == nil { + ns.ConnectionType, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.ConnectionType.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullConnectionType) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.ConnectionType), nil +} + +func (e ConnectionType) Valid() bool { + switch e { + case ConnectionTypeSsh, + ConnectionTypeVscode, + ConnectionTypeJetbrains, + ConnectionTypeReconnectingPty, + ConnectionTypeWorkspaceApp, + ConnectionTypePortForwarding: + return true + } + return false +} + +func AllConnectionTypeValues() []ConnectionType { + return []ConnectionType{ + ConnectionTypeSsh, + ConnectionTypeVscode, + ConnectionTypeJetbrains, + ConnectionTypeReconnectingPty, + ConnectionTypeWorkspaceApp, + ConnectionTypePortForwarding, + } +} + type CryptoKeyFeature string const ( @@ -2784,6 +2913,32 @@ type AuditLog struct { ResourceIcon string `db:"resource_icon" json:"resource_icon"` } +type ConnectionLog struct { + ID uuid.UUID `db:"id" json:"id"` + ConnectTime time.Time `db:"connect_time" json:"connect_time"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + WorkspaceOwnerID uuid.UUID `db:"workspace_owner_id" json:"workspace_owner_id"` + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + WorkspaceName string `db:"workspace_name" json:"workspace_name"` + AgentName string `db:"agent_name" json:"agent_name"` + Type ConnectionType `db:"type" json:"type"` + Ip pqtype.Inet `db:"ip" json:"ip"` + // Either the HTTP status code of the web request, or the exit code of an SSH connection. For non-web connections, this is Null until we receive a disconnect event for the same connection_id. + Code sql.NullInt32 `db:"code" json:"code"` + // Null for SSH events. For web connections, this is the User-Agent header from the request. + UserAgent sql.NullString `db:"user_agent" json:"user_agent"` + // Null for SSH events. For web connections, this is the ID of the user that made the request. + UserID uuid.NullUUID `db:"user_id" json:"user_id"` + // Null for SSH events. For web connections, this is the slug of the app or the port number being forwarded. + SlugOrPort sql.NullString `db:"slug_or_port" json:"slug_or_port"` + // The SSH connection ID. Used to correlate connections and disconnections. As it originates from the agent, it is not guaranteed to be unique. + ConnectionID uuid.NullUUID `db:"connection_id" json:"connection_id"` + // The time the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id. + DisconnectTime sql.NullTime `db:"disconnect_time" json:"disconnect_time"` + // The reason the connection was closed. Null for web connections. For other connections, this is null until we receive a disconnect event for the same connection_id. + DisconnectReason sql.NullString `db:"disconnect_reason" json:"disconnect_reason"` +} + type CryptoKey struct { Feature CryptoKeyFeature `db:"feature" json:"feature"` Sequence int32 `db:"sequence" json:"sequence"` diff --git a/coderd/database/querier.go b/coderd/database/querier.go index b83c7415a60c8..24893a9197815 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -66,6 +66,7 @@ type sqlcQuerier interface { CleanTailnetLostPeers(ctx context.Context) error CleanTailnetTunnels(ctx context.Context) error CountAuditLogs(ctx context.Context, arg CountAuditLogsParams) (int64, error) + CountConnectionLogs(ctx context.Context, arg CountConnectionLogsParams) (int64, error) // CountInProgressPrebuilds returns the number of in-progress prebuilds, grouped by preset ID and transition. // Prebuild considered in-progress if it's in the "starting", "stopping", or "deleting" state. CountInProgressPrebuilds(ctx context.Context) ([]CountInProgressPrebuildsRow, error) @@ -95,6 +96,7 @@ type sqlcQuerier interface { DeleteOAuth2ProviderAppCodesByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppCodesByAppAndUserIDParams) error DeleteOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) error DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppTokensByAppAndUserIDParams) error + DeleteOldAuditLogConnectionEvents(ctx context.Context, arg DeleteOldAuditLogConnectionEventsParams) error // Delete all notification messages which have not been updated for over a week. DeleteOldNotificationMessages(ctx context.Context) error // Delete provisioner daemons that have been created at least a week ago @@ -156,6 +158,7 @@ type sqlcQuerier interface { // This function returns roles for authorization purposes. Implied member roles // are included. GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error) + GetConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams) ([]GetConnectionLogsOffsetRow, error) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) GetCryptoKeyByFeatureAndSequence(ctx context.Context, arg GetCryptoKeyByFeatureAndSequenceParams) (CryptoKey, error) GetCryptoKeys(ctx context.Context) ([]CryptoKey, error) @@ -647,6 +650,7 @@ type sqlcQuerier interface { UpsertAnnouncementBanners(ctx context.Context, value string) error UpsertAppSecurityKey(ctx context.Context, value string) error UpsertApplicationName(ctx context.Context, value string) error + UpsertConnectionLog(ctx context.Context, arg UpsertConnectionLogParams) (ConnectionLog, error) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error // The default proxy is implied and not actually stored in the database. // So we need to store it's configuration here for display purposes. diff --git a/coderd/database/querier_test.go b/coderd/database/querier_test.go index 789fc85655afb..20b07450364af 100644 --- a/coderd/database/querier_test.go +++ b/coderd/database/querier_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "sort" "testing" "time" @@ -13,6 +14,7 @@ import ( "github.com/google/uuid" "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" + "github.com/sqlc-dev/pqtype" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,6 +32,7 @@ import ( "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisionersdk" "github.com/coder/coder/v2/testutil" ) @@ -2085,6 +2088,785 @@ func auditOnlyIDs[T database.AuditLog | database.GetAuditLogsOffsetRow](logs []T return ids } +func TestGetAuthorizedConnectionLogsOffset(t *testing.T) { + t.Parallel() + + var allLogs []database.ConnectionLog + db, _ := dbtestutil.NewDB(t) + authz := rbac.NewAuthorizer(prometheus.NewRegistry()) + authDb := dbauthz.New(db, authz, slogtest.Make(t, &slogtest.Options{}), coderdtest.AccessControlStorePointer()) + + orgA := dbfake.Organization(t, db).Do() + orgB := dbfake.Organization(t, db).Do() + + user := dbgen.User(t, db, database.User{}) + + tpl := dbgen.Template(t, db, database.Template{ + OrganizationID: orgA.Org.ID, + CreatedBy: user.ID, + }) + + wsID := uuid.New() + createTemplateVersion(t, db, tpl, tvArgs{ + WorkspaceTransition: database.WorkspaceTransitionStart, + Status: database.ProvisionerJobStatusSucceeded, + CreateWorkspace: true, + WorkspaceID: wsID, + }) + + // This map is a simple way to insert a given number of organizations + // and audit logs for each organization. + // map[orgID][]ConnectionLogID + orgConnectionLogs := map[uuid.UUID][]uuid.UUID{ + orgA.Org.ID: {uuid.New(), uuid.New()}, + orgB.Org.ID: {uuid.New(), uuid.New()}, + } + orgIDs := make([]uuid.UUID, 0, len(orgConnectionLogs)) + for orgID := range orgConnectionLogs { + orgIDs = append(orgIDs, orgID) + } + for orgID, ids := range orgConnectionLogs { + for _, id := range ids { + allLogs = append(allLogs, dbgen.ConnectionLog(t, authDb, database.UpsertConnectionLogParams{ + WorkspaceID: wsID, + WorkspaceOwnerID: user.ID, + ID: id, + OrganizationID: orgID, + })) + } + } + + // Now fetch all the logs + ctx := testutil.Context(t, testutil.WaitLong) + auditorRole, err := rbac.RoleByName(rbac.RoleAuditor()) + require.NoError(t, err) + + memberRole, err := rbac.RoleByName(rbac.RoleMember()) + require.NoError(t, err) + + orgAuditorRoles := func(t *testing.T, orgID uuid.UUID) rbac.Role { + t.Helper() + + role, err := rbac.RoleByName(rbac.ScopedRoleOrgAuditor(orgID)) + require.NoError(t, err) + return role + } + + t.Run("NoAccess", func(t *testing.T) { + t.Parallel() + + // Given: A user who is a member of 0 organizations + memberCtx := dbauthz.As(ctx, rbac.Subject{ + FriendlyName: "member", + ID: uuid.NewString(), + Roles: rbac.Roles{memberRole}, + Scope: rbac.ScopeAll, + }) + + // When: The user queries for connection logs + logs, err := authDb.GetConnectionLogsOffset(memberCtx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + // Then: No logs returned + require.Len(t, logs, 0, "no logs should be returned") + // And: The count matches the number of logs returned + count, err := authDb.CountConnectionLogs(memberCtx, database.CountConnectionLogsParams{}) + require.NoError(t, err) + require.EqualValues(t, len(logs), count) + }) + + t.Run("SiteWideAuditor", func(t *testing.T) { + t.Parallel() + + // Given: A site wide auditor + siteAuditorCtx := dbauthz.As(ctx, rbac.Subject{ + FriendlyName: "owner", + ID: uuid.NewString(), + Roles: rbac.Roles{auditorRole}, + Scope: rbac.ScopeAll, + }) + + // When: the auditor queries for connection logs + logs, err := authDb.GetConnectionLogsOffset(siteAuditorCtx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + // Then: All logs are returned + require.ElementsMatch(t, connectionOnlyIDs(allLogs), connectionOnlyIDs(logs)) + // And: The count matches the number of logs returned + count, err := authDb.CountConnectionLogs(siteAuditorCtx, database.CountConnectionLogsParams{}) + require.NoError(t, err) + require.EqualValues(t, len(logs), count) + }) + + t.Run("SingleOrgAuditor", func(t *testing.T) { + t.Parallel() + + orgID := orgIDs[0] + // Given: An organization scoped auditor + orgAuditCtx := dbauthz.As(ctx, rbac.Subject{ + FriendlyName: "org-auditor", + ID: uuid.NewString(), + Roles: rbac.Roles{orgAuditorRoles(t, orgID)}, + Scope: rbac.ScopeAll, + }) + + // When: The auditor queries for connection logs + logs, err := authDb.GetConnectionLogsOffset(orgAuditCtx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + // Then: Only the logs for the organization are returned + require.ElementsMatch(t, orgConnectionLogs[orgID], connectionOnlyIDs(logs)) + // And: The count matches the number of logs returned + count, err := authDb.CountConnectionLogs(orgAuditCtx, database.CountConnectionLogsParams{}) + require.NoError(t, err) + require.EqualValues(t, len(logs), count) + }) + + t.Run("TwoOrgAuditors", func(t *testing.T) { + t.Parallel() + + first := orgIDs[0] + second := orgIDs[1] + // Given: A user who is an auditor for two organizations + multiOrgAuditCtx := dbauthz.As(ctx, rbac.Subject{ + FriendlyName: "org-auditor", + ID: uuid.NewString(), + Roles: rbac.Roles{orgAuditorRoles(t, first), orgAuditorRoles(t, second)}, + Scope: rbac.ScopeAll, + }) + + // When: The user queries for connection logs + logs, err := authDb.GetConnectionLogsOffset(multiOrgAuditCtx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + // Then: All logs for both organizations are returned + require.ElementsMatch(t, append(orgConnectionLogs[first], orgConnectionLogs[second]...), connectionOnlyIDs(logs)) + // And: The count matches the number of logs returned + count, err := authDb.CountConnectionLogs(multiOrgAuditCtx, database.CountConnectionLogsParams{}) + require.NoError(t, err) + require.EqualValues(t, len(logs), count) + }) + + t.Run("ErroneousOrg", func(t *testing.T) { + t.Parallel() + + // Given: A user who is an auditor for an organization that has 0 logs + userCtx := dbauthz.As(ctx, rbac.Subject{ + FriendlyName: "org-auditor", + ID: uuid.NewString(), + Roles: rbac.Roles{orgAuditorRoles(t, uuid.New())}, + Scope: rbac.ScopeAll, + }) + + // When: The user queries for audit logs + logs, err := authDb.GetConnectionLogsOffset(userCtx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + // Then: No logs are returned + require.Len(t, logs, 0, "no logs should be returned") + // And: The count matches the number of logs returned + count, err := authDb.CountConnectionLogs(userCtx, database.CountConnectionLogsParams{}) + require.NoError(t, err) + require.EqualValues(t, len(logs), count) + }) +} + +func TestCountConnectionLogs(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitLong) + + db, _ := dbtestutil.NewDB(t) + + orgA := dbfake.Organization(t, db).Do() + userA := dbgen.User(t, db, database.User{}) + tplA := dbgen.Template(t, db, database.Template{OrganizationID: orgA.Org.ID, CreatedBy: userA.ID}) + wsA := dbgen.Workspace(t, db, database.WorkspaceTable{OwnerID: userA.ID, OrganizationID: orgA.Org.ID, TemplateID: tplA.ID}) + + orgB := dbfake.Organization(t, db).Do() + userB := dbgen.User(t, db, database.User{}) + tplB := dbgen.Template(t, db, database.Template{OrganizationID: orgB.Org.ID, CreatedBy: userB.ID}) + wsB := dbgen.Workspace(t, db, database.WorkspaceTable{OwnerID: userB.ID, OrganizationID: orgB.Org.ID, TemplateID: tplB.ID}) + + // Create logs for two different orgs. + for i := 0; i < 20; i++ { + dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + OrganizationID: wsA.OrganizationID, + WorkspaceOwnerID: wsA.OwnerID, + WorkspaceID: wsA.ID, + Type: database.ConnectionTypeSsh, + }) + } + for i := 0; i < 10; i++ { + dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + OrganizationID: wsB.OrganizationID, + WorkspaceOwnerID: wsB.OwnerID, + WorkspaceID: wsB.ID, + Type: database.ConnectionTypeSsh, + }) + } + + // Count with a filter for orgA. + countParams := database.CountConnectionLogsParams{ + OrganizationID: orgA.Org.ID, + } + totalCount, err := db.CountConnectionLogs(ctx, countParams) + require.NoError(t, err) + require.Equal(t, int64(20), totalCount) + + // Get a paginated result for the same filter. + getParams := database.GetConnectionLogsOffsetParams{ + OrganizationID: orgA.Org.ID, + LimitOpt: 5, + OffsetOpt: 10, + } + logs, err := db.GetConnectionLogsOffset(ctx, getParams) + require.NoError(t, err) + require.Len(t, logs, 5) + + // The count with the filter should remain the same, independent of pagination. + countAfterGet, err := db.CountConnectionLogs(ctx, countParams) + require.NoError(t, err) + require.Equal(t, int64(20), countAfterGet) +} + +func TestConnectionLogsOffsetFilters(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitLong) + + db, _ := dbtestutil.NewDB(t) + + orgA := dbfake.Organization(t, db).Do() + orgB := dbfake.Organization(t, db).Do() + + user1 := dbgen.User(t, db, database.User{ + Username: "user1", + Email: "user1@test.com", + }) + user2 := dbgen.User(t, db, database.User{ + Username: "user2", + Email: "user2@test.com", + }) + user3 := dbgen.User(t, db, database.User{ + Username: "user3", + Email: "user3@test.com", + }) + + ws1Tpl := dbgen.Template(t, db, database.Template{OrganizationID: orgA.Org.ID, CreatedBy: user1.ID}) + ws1 := dbgen.Workspace(t, db, database.WorkspaceTable{ + OwnerID: user1.ID, + OrganizationID: orgA.Org.ID, + TemplateID: ws1Tpl.ID, + }) + ws2Tpl := dbgen.Template(t, db, database.Template{OrganizationID: orgB.Org.ID, CreatedBy: user2.ID}) + ws2 := dbgen.Workspace(t, db, database.WorkspaceTable{ + OwnerID: user2.ID, + OrganizationID: orgB.Org.ID, + TemplateID: ws2Tpl.ID, + }) + + now := dbtime.Now() + log1ConnID := uuid.New() + log1 := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-4 * time.Hour), + OrganizationID: ws1.OrganizationID, + WorkspaceOwnerID: ws1.OwnerID, + WorkspaceID: ws1.ID, + WorkspaceName: ws1.Name, + Type: database.ConnectionTypeWorkspaceApp, + ConnectionStatus: database.ConnectionStatusConnected, + UserID: uuid.NullUUID{UUID: user1.ID, Valid: true}, + UserAgent: sql.NullString{String: "Mozilla/5.0", Valid: true}, + SlugOrPort: sql.NullString{String: "code-server", Valid: true}, + ConnectionID: uuid.NullUUID{UUID: log1ConnID, Valid: true}, + }) + + log2ConnID := uuid.New() + log2 := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-3 * time.Hour), + OrganizationID: ws1.OrganizationID, + WorkspaceOwnerID: ws1.OwnerID, + WorkspaceID: ws1.ID, + WorkspaceName: ws1.Name, + Type: database.ConnectionTypeVscode, + ConnectionStatus: database.ConnectionStatusConnected, + ConnectionID: uuid.NullUUID{UUID: log2ConnID, Valid: true}, + }) + + // Mark log2 as disconnected + log2 = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-2 * time.Hour), + ConnectionID: log2.ConnectionID, + WorkspaceID: ws1.ID, + WorkspaceOwnerID: ws1.OwnerID, + AgentName: log2.AgentName, + ConnectionStatus: database.ConnectionStatusDisconnected, + + OrganizationID: log2.OrganizationID, + }) + + log3ConnID := uuid.New() + log3 := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-2 * time.Hour), + OrganizationID: ws2.OrganizationID, + WorkspaceOwnerID: ws2.OwnerID, + WorkspaceID: ws2.ID, + WorkspaceName: ws2.Name, + Type: database.ConnectionTypeSsh, + ConnectionStatus: database.ConnectionStatusConnected, + UserID: uuid.NullUUID{UUID: user2.ID, Valid: true}, + ConnectionID: uuid.NullUUID{UUID: log3ConnID, Valid: true}, + }) + + // Mark log3 as disconnected + log3 = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-1 * time.Hour), + ConnectionID: log3.ConnectionID, + WorkspaceOwnerID: log3.WorkspaceOwnerID, + WorkspaceID: ws2.ID, + AgentName: log3.AgentName, + ConnectionStatus: database.ConnectionStatusDisconnected, + + OrganizationID: log3.OrganizationID, + }) + + log4 := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-1 * time.Hour), + OrganizationID: ws2.OrganizationID, + WorkspaceOwnerID: ws2.OwnerID, + WorkspaceID: ws2.ID, + WorkspaceName: ws2.Name, + Type: database.ConnectionTypeVscode, + ConnectionStatus: database.ConnectionStatusConnected, + UserID: uuid.NullUUID{UUID: user3.ID, Valid: true}, + }) + + testCases := []struct { + name string + params database.GetConnectionLogsOffsetParams + expectedLogIDs []uuid.UUID + }{ + { + name: "NoFilter", + params: database.GetConnectionLogsOffsetParams{}, + expectedLogIDs: []uuid.UUID{ + log1.ID, log2.ID, log3.ID, log4.ID, + }, + }, + { + name: "OrganizationID", + params: database.GetConnectionLogsOffsetParams{ + OrganizationID: orgB.Org.ID, + }, + expectedLogIDs: []uuid.UUID{log3.ID, log4.ID}, + }, + { + name: "WorkspaceOwner", + params: database.GetConnectionLogsOffsetParams{ + WorkspaceOwner: user1.Username, + }, + expectedLogIDs: []uuid.UUID{log1.ID, log2.ID}, + }, + { + name: "WorkspaceOwnerID", + params: database.GetConnectionLogsOffsetParams{ + WorkspaceOwnerID: user1.ID, + }, + expectedLogIDs: []uuid.UUID{log1.ID, log2.ID}, + }, + { + name: "WorkspaceOwnerEmail", + params: database.GetConnectionLogsOffsetParams{ + WorkspaceOwnerEmail: user2.Email, + }, + expectedLogIDs: []uuid.UUID{log3.ID, log4.ID}, + }, + { + name: "Type", + params: database.GetConnectionLogsOffsetParams{ + Type: string(database.ConnectionTypeVscode), + }, + expectedLogIDs: []uuid.UUID{log2.ID, log4.ID}, + }, + { + name: "UserID", + params: database.GetConnectionLogsOffsetParams{ + UserID: user1.ID, + }, + expectedLogIDs: []uuid.UUID{log1.ID}, + }, + { + name: "Username", + params: database.GetConnectionLogsOffsetParams{ + Username: user1.Username, + }, + expectedLogIDs: []uuid.UUID{log1.ID}, + }, + { + name: "UserEmail", + params: database.GetConnectionLogsOffsetParams{ + UserEmail: user3.Email, + }, + expectedLogIDs: []uuid.UUID{log4.ID}, + }, + { + name: "ConnectedAfter", + params: database.GetConnectionLogsOffsetParams{ + ConnectedAfter: now.Add(-90 * time.Minute), // 1.5 hours ago + }, + expectedLogIDs: []uuid.UUID{log4.ID}, + }, + { + name: "ConnectedBefore", + params: database.GetConnectionLogsOffsetParams{ + ConnectedBefore: now.Add(-150 * time.Minute), + }, + expectedLogIDs: []uuid.UUID{log1.ID, log2.ID}, + }, + { + name: "WorkspaceID", + params: database.GetConnectionLogsOffsetParams{ + WorkspaceID: ws2.ID, + }, + expectedLogIDs: []uuid.UUID{log3.ID, log4.ID}, + }, + { + name: "ConnectionID", + params: database.GetConnectionLogsOffsetParams{ + ConnectionID: log1.ConnectionID.UUID, + }, + expectedLogIDs: []uuid.UUID{log1.ID}, + }, + { + name: "StatusOngoing", + params: database.GetConnectionLogsOffsetParams{ + Status: string(codersdk.ConnectionLogStatusOngoing), + }, + expectedLogIDs: []uuid.UUID{log4.ID}, + }, + { + name: "StatusCompleted", + params: database.GetConnectionLogsOffsetParams{ + Status: string(codersdk.ConnectionLogStatusCompleted), + }, + expectedLogIDs: []uuid.UUID{log2.ID, log3.ID}, + }, + { + name: "OrganizationAndTypeAndStatus", + params: database.GetConnectionLogsOffsetParams{ + OrganizationID: orgA.Org.ID, + Type: string(database.ConnectionTypeVscode), + Status: string(codersdk.ConnectionLogStatusCompleted), + }, + expectedLogIDs: []uuid.UUID{log2.ID}, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + logs, err := db.GetConnectionLogsOffset(ctx, tc.params) + require.NoError(t, err) + count, err := db.CountConnectionLogs(ctx, database.CountConnectionLogsParams{ + OrganizationID: tc.params.OrganizationID, + WorkspaceOwner: tc.params.WorkspaceOwner, + Type: tc.params.Type, + UserID: tc.params.UserID, + Username: tc.params.Username, + UserEmail: tc.params.UserEmail, + ConnectedAfter: tc.params.ConnectedAfter, + ConnectedBefore: tc.params.ConnectedBefore, + WorkspaceID: tc.params.WorkspaceID, + ConnectionID: tc.params.ConnectionID, + Status: tc.params.Status, + WorkspaceOwnerID: tc.params.WorkspaceOwnerID, + WorkspaceOwnerEmail: tc.params.WorkspaceOwnerEmail, + }) + require.NoError(t, err) + require.ElementsMatch(t, tc.expectedLogIDs, connectionOnlyIDs(logs)) + require.Equal(t, len(tc.expectedLogIDs), int(count), "CountConnectionLogs should match the number of returned logs (no offset or limit)") + }) + } +} + +func connectionOnlyIDs[T database.ConnectionLog | database.GetConnectionLogsOffsetRow](logs []T) []uuid.UUID { + ids := make([]uuid.UUID, 0, len(logs)) + for _, log := range logs { + switch log := any(log).(type) { + case database.ConnectionLog: + ids = append(ids, log.ID) + case database.GetConnectionLogsOffsetRow: + ids = append(ids, log.ConnectionLog.ID) + default: + panic("unreachable") + } + } + return ids +} + +func TestUpsertConnectionLog(t *testing.T) { + t.Parallel() + createWorkspace := func(t *testing.T, db database.Store) database.WorkspaceTable { + u := dbgen.User(t, db, database.User{}) + o := dbgen.Organization(t, db, database.Organization{}) + tpl := dbgen.Template(t, db, database.Template{ + OrganizationID: o.ID, + CreatedBy: u.ID, + }) + return dbgen.Workspace(t, db, database.WorkspaceTable{ + ID: uuid.New(), + OwnerID: u.ID, + OrganizationID: o.ID, + AutomaticUpdates: database.AutomaticUpdatesNever, + TemplateID: tpl.ID, + }) + } + + t.Run("ConnectThenDisconnect", func(t *testing.T) { + t.Parallel() + db, _ := dbtestutil.NewDB(t) + ctx := context.Background() + + ws := createWorkspace(t, db) + + connectionID := uuid.New() + agentName := "test-agent" + + // 1. Insert a 'connect' event. + connectTime := dbtime.Now() + connectParams := database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: connectTime, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceID: ws.ID, + WorkspaceName: ws.Name, + AgentName: agentName, + Type: database.ConnectionTypeSsh, + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + ConnectionStatus: database.ConnectionStatusConnected, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + Valid: true, + }, + } + + log1, err := db.UpsertConnectionLog(ctx, connectParams) + require.NoError(t, err) + require.Equal(t, connectParams.ID, log1.ID) + require.False(t, log1.DisconnectTime.Valid, "DisconnectTime should not be set on connect") + + // Check that one row exists. + rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{LimitOpt: 10}) + require.NoError(t, err) + require.Len(t, rows, 1) + + // 2. Insert a 'disconnected' event for the same connection. + disconnectTime := connectTime.Add(time.Second) + disconnectParams := database.UpsertConnectionLogParams{ + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + WorkspaceID: ws.ID, + AgentName: agentName, + ConnectionStatus: database.ConnectionStatusDisconnected, + + // Updated to: + Time: disconnectTime, + DisconnectReason: sql.NullString{String: "test disconnect", Valid: true}, + Code: sql.NullInt32{Int32: 1, Valid: true}, + + // Ignored + ID: uuid.New(), + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceName: ws.Name, + Type: database.ConnectionTypeSsh, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 254), + }, + Valid: true, + }, + } + + log2, err := db.UpsertConnectionLog(ctx, disconnectParams) + require.NoError(t, err) + + // Updated + require.Equal(t, log1.ID, log2.ID) + require.True(t, log2.DisconnectTime.Valid) + require.True(t, disconnectTime.Equal(log2.DisconnectTime.Time)) + require.Equal(t, disconnectParams.DisconnectReason.String, log2.DisconnectReason.String) + + rows, err = db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + require.Len(t, rows, 1) + }) + + t.Run("ConnectDoesNotUpdate", func(t *testing.T) { + t.Parallel() + db, _ := dbtestutil.NewDB(t) + ctx := context.Background() + + ws := createWorkspace(t, db) + + connectionID := uuid.New() + agentName := "test-agent" + + // 1. Insert a 'connect' event. + connectTime := dbtime.Now() + connectParams := database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: connectTime, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceID: ws.ID, + WorkspaceName: ws.Name, + AgentName: agentName, + Type: database.ConnectionTypeSsh, + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + ConnectionStatus: database.ConnectionStatusConnected, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + Valid: true, + }, + } + + log, err := db.UpsertConnectionLog(ctx, connectParams) + require.NoError(t, err) + + // 2. Insert another 'connect' event for the same connection. + connectTime2 := connectTime.Add(time.Second) + connectParams2 := database.UpsertConnectionLogParams{ + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + WorkspaceID: ws.ID, + AgentName: agentName, + ConnectionStatus: database.ConnectionStatusConnected, + + // Ignored + ID: uuid.New(), + Time: connectTime2, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceName: ws.Name, + Type: database.ConnectionTypeSsh, + Code: sql.NullInt32{Int32: 0, Valid: false}, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 254), + }, + Valid: true, + }, + } + + origLog, err := db.UpsertConnectionLog(ctx, connectParams2) + require.NoError(t, err) + require.Equal(t, log, origLog, "connect update should be a no-op") + + // Check that still only one row exists. + rows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + require.Len(t, rows, 1) + require.Equal(t, log, rows[0].ConnectionLog) + }) + + t.Run("DisconnectThenConnect", func(t *testing.T) { + t.Parallel() + + db, _ := dbtestutil.NewDB(t) + ctx := context.Background() + + ws := createWorkspace(t, db) + + connectionID := uuid.New() + agentName := "test-agent" + + // Insert just a 'disconect' event + disconnectTime := dbtime.Now() + disconnectParams := database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: disconnectTime, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceID: ws.ID, + WorkspaceName: ws.Name, + AgentName: agentName, + Type: database.ConnectionTypeSsh, + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + ConnectionStatus: database.ConnectionStatusDisconnected, + DisconnectReason: sql.NullString{String: "server shutting down", Valid: true}, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + Valid: true, + }, + } + + _, err := db.UpsertConnectionLog(ctx, disconnectParams) + require.NoError(t, err) + + firstRows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + require.Len(t, firstRows, 1) + + // We expect the connection event to be marked as closed with the start + // and close time being the same. + require.True(t, firstRows[0].ConnectionLog.DisconnectTime.Valid) + require.Equal(t, disconnectTime, firstRows[0].ConnectionLog.DisconnectTime.Time.UTC()) + require.Equal(t, firstRows[0].ConnectionLog.ConnectTime.UTC(), firstRows[0].ConnectionLog.DisconnectTime.Time.UTC()) + + // Now insert a 'connect' event for the same connection. + // This should be a no op + connectTime := disconnectTime.Add(time.Second) + connectParams := database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: connectTime, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + WorkspaceID: ws.ID, + WorkspaceName: ws.Name, + AgentName: agentName, + Type: database.ConnectionTypeSsh, + ConnectionID: uuid.NullUUID{UUID: connectionID, Valid: true}, + ConnectionStatus: database.ConnectionStatusConnected, + DisconnectReason: sql.NullString{String: "reconnected", Valid: true}, + Code: sql.NullInt32{Int32: 0, Valid: false}, + Ip: pqtype.Inet{ + IPNet: net.IPNet{ + IP: net.IPv4(127, 0, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + Valid: true, + }, + } + + _, err = db.UpsertConnectionLog(ctx, connectParams) + require.NoError(t, err) + + secondRows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + require.Len(t, secondRows, 1) + require.Equal(t, firstRows, secondRows) + + // Upsert a disconnection, which should also be a no op + disconnectParams.DisconnectReason = sql.NullString{ + String: "updated close reason", + Valid: true, + } + _, err = db.UpsertConnectionLog(ctx, disconnectParams) + require.NoError(t, err) + thirdRows, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{}) + require.NoError(t, err) + require.Len(t, secondRows, 1) + // The close reason shouldn't be updated + require.Equal(t, secondRows, thirdRows) + }) +} + type tvArgs struct { Status database.ProvisionerJobStatus // CreateWorkspace is true if we should create a workspace for the template version diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 04ded71f1242a..0ef4553149465 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -566,6 +566,33 @@ func (q *sqlQuerier) CountAuditLogs(ctx context.Context, arg CountAuditLogsParam return count, err } +const deleteOldAuditLogConnectionEvents = `-- name: DeleteOldAuditLogConnectionEvents :exec +DELETE FROM audit_logs +WHERE id IN ( + SELECT id FROM audit_logs + WHERE + ( + action = 'connect' + OR action = 'disconnect' + OR action = 'open' + OR action = 'close' + ) + AND "time" < $1::timestamp with time zone + ORDER BY "time" ASC + LIMIT $2 +) +` + +type DeleteOldAuditLogConnectionEventsParams struct { + BeforeTime time.Time `db:"before_time" json:"before_time"` + LimitCount int32 `db:"limit_count" json:"limit_count"` +} + +func (q *sqlQuerier) DeleteOldAuditLogConnectionEvents(ctx context.Context, arg DeleteOldAuditLogConnectionEventsParams) error { + _, err := q.db.ExecContext(ctx, deleteOldAuditLogConnectionEvents, arg.BeforeTime, arg.LimitCount) + return err +} + const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many SELECT audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon, -- sqlc.embed(users) would be nice but it does not seem to play well with @@ -880,6 +907,509 @@ func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParam return i, err } +const countConnectionLogs = `-- name: CountConnectionLogs :one +SELECT + COUNT(*) AS count +FROM + connection_logs +JOIN users AS workspace_owner ON + connection_logs.workspace_owner_id = workspace_owner.id +LEFT JOIN users ON + connection_logs.user_id = users.id +JOIN organizations ON + connection_logs.organization_id = organizations.id +WHERE + -- Filter organization_id + CASE + WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.organization_id = $1 + ELSE true + END + -- Filter by workspace owner username + AND CASE + WHEN $2 :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE lower(username) = lower($2) AND deleted = false + ) + ELSE true + END + -- Filter by workspace_owner_id + AND CASE + WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + workspace_owner_id = $3 + ELSE true + END + -- Filter by workspace_owner_email + AND CASE + WHEN $4 :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE email = $4 AND deleted = false + ) + ELSE true + END + -- Filter by type + AND CASE + WHEN $5 :: text != '' THEN + type = $5 :: connection_type + ELSE true + END + -- Filter by user_id + AND CASE + WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = $6 + ELSE true + END + -- Filter by username + AND CASE + WHEN $7 :: text != '' THEN + user_id = ( + SELECT id FROM users + WHERE lower(username) = lower($7) AND deleted = false + ) + ELSE true + END + -- Filter by user_email + AND CASE + WHEN $8 :: text != '' THEN + users.email = $8 + ELSE true + END + -- Filter by connected_after + AND CASE + WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time >= $9 + ELSE true + END + -- Filter by connected_before + AND CASE + WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time <= $10 + ELSE true + END + -- Filter by workspace_id + AND CASE + WHEN $11 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.workspace_id = $11 + ELSE true + END + -- Filter by connection_id + AND CASE + WHEN $12 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.connection_id = $12 + ELSE true + END + -- Filter by whether the session has a disconnect_time + AND CASE + WHEN $13 :: text != '' THEN + (($13 = 'ongoing' AND disconnect_time IS NULL) OR + ($13 = 'completed' AND disconnect_time IS NOT NULL)) AND + -- Exclude web events, since we don't know their close time. + "type" NOT IN ('workspace_app', 'port_forwarding') + ELSE true + END + -- Authorize Filter clause will be injected below in + -- CountAuthorizedConnectionLogs + -- @authorize_filter +` + +type CountConnectionLogsParams struct { + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + WorkspaceOwner string `db:"workspace_owner" json:"workspace_owner"` + WorkspaceOwnerID uuid.UUID `db:"workspace_owner_id" json:"workspace_owner_id"` + WorkspaceOwnerEmail string `db:"workspace_owner_email" json:"workspace_owner_email"` + Type string `db:"type" json:"type"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + Username string `db:"username" json:"username"` + UserEmail string `db:"user_email" json:"user_email"` + ConnectedAfter time.Time `db:"connected_after" json:"connected_after"` + ConnectedBefore time.Time `db:"connected_before" json:"connected_before"` + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + ConnectionID uuid.UUID `db:"connection_id" json:"connection_id"` + Status string `db:"status" json:"status"` +} + +func (q *sqlQuerier) CountConnectionLogs(ctx context.Context, arg CountConnectionLogsParams) (int64, error) { + row := q.db.QueryRowContext(ctx, countConnectionLogs, + arg.OrganizationID, + arg.WorkspaceOwner, + arg.WorkspaceOwnerID, + arg.WorkspaceOwnerEmail, + arg.Type, + arg.UserID, + arg.Username, + arg.UserEmail, + arg.ConnectedAfter, + arg.ConnectedBefore, + arg.WorkspaceID, + arg.ConnectionID, + arg.Status, + ) + var count int64 + err := row.Scan(&count) + return count, err +} + +const getConnectionLogsOffset = `-- name: GetConnectionLogsOffset :many +SELECT + connection_logs.id, connection_logs.connect_time, connection_logs.organization_id, connection_logs.workspace_owner_id, connection_logs.workspace_id, connection_logs.workspace_name, connection_logs.agent_name, connection_logs.type, connection_logs.ip, connection_logs.code, connection_logs.user_agent, connection_logs.user_id, connection_logs.slug_or_port, connection_logs.connection_id, connection_logs.disconnect_time, connection_logs.disconnect_reason, + -- sqlc.embed(users) would be nice but it does not seem to play well with + -- left joins. This user metadata is necessary for parity with the audit logs + -- API. + users.username AS user_username, + users.name AS user_name, + users.email AS user_email, + users.created_at AS user_created_at, + users.updated_at AS user_updated_at, + users.last_seen_at AS user_last_seen_at, + users.status AS user_status, + users.login_type AS user_login_type, + users.rbac_roles AS user_roles, + users.avatar_url AS user_avatar_url, + users.deleted AS user_deleted, + users.quiet_hours_schedule AS user_quiet_hours_schedule, + workspace_owner.username AS workspace_owner_username, + organizations.name AS organization_name, + organizations.display_name AS organization_display_name, + organizations.icon AS organization_icon +FROM + connection_logs +JOIN users AS workspace_owner ON + connection_logs.workspace_owner_id = workspace_owner.id +LEFT JOIN users ON + connection_logs.user_id = users.id +JOIN organizations ON + connection_logs.organization_id = organizations.id +WHERE + -- Filter organization_id + CASE + WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.organization_id = $1 + ELSE true + END + -- Filter by workspace owner username + AND CASE + WHEN $2 :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE lower(username) = lower($2) AND deleted = false + ) + ELSE true + END + -- Filter by workspace_owner_id + AND CASE + WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + workspace_owner_id = $3 + ELSE true + END + -- Filter by workspace_owner_email + AND CASE + WHEN $4 :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE email = $4 AND deleted = false + ) + ELSE true + END + -- Filter by type + AND CASE + WHEN $5 :: text != '' THEN + type = $5 :: connection_type + ELSE true + END + -- Filter by user_id + AND CASE + WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = $6 + ELSE true + END + -- Filter by username + AND CASE + WHEN $7 :: text != '' THEN + user_id = ( + SELECT id FROM users + WHERE lower(username) = lower($7) AND deleted = false + ) + ELSE true + END + -- Filter by user_email + AND CASE + WHEN $8 :: text != '' THEN + users.email = $8 + ELSE true + END + -- Filter by connected_after + AND CASE + WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time >= $9 + ELSE true + END + -- Filter by connected_before + AND CASE + WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time <= $10 + ELSE true + END + -- Filter by workspace_id + AND CASE + WHEN $11 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.workspace_id = $11 + ELSE true + END + -- Filter by connection_id + AND CASE + WHEN $12 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.connection_id = $12 + ELSE true + END + -- Filter by whether the session has a disconnect_time + AND CASE + WHEN $13 :: text != '' THEN + (($13 = 'ongoing' AND disconnect_time IS NULL) OR + ($13 = 'completed' AND disconnect_time IS NOT NULL)) AND + -- Exclude web events, since we don't know their close time. + "type" NOT IN ('workspace_app', 'port_forwarding') + ELSE true + END + -- Authorize Filter clause will be injected below in + -- GetAuthorizedConnectionLogsOffset + -- @authorize_filter +ORDER BY + connect_time DESC +LIMIT + -- a limit of 0 means "no limit". The connection log table is unbounded + -- in size, and is expected to be quite large. Implement a default + -- limit of 100 to prevent accidental excessively large queries. + COALESCE(NULLIF($15 :: int, 0), 100) +OFFSET + $14 +` + +type GetConnectionLogsOffsetParams struct { + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + WorkspaceOwner string `db:"workspace_owner" json:"workspace_owner"` + WorkspaceOwnerID uuid.UUID `db:"workspace_owner_id" json:"workspace_owner_id"` + WorkspaceOwnerEmail string `db:"workspace_owner_email" json:"workspace_owner_email"` + Type string `db:"type" json:"type"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + Username string `db:"username" json:"username"` + UserEmail string `db:"user_email" json:"user_email"` + ConnectedAfter time.Time `db:"connected_after" json:"connected_after"` + ConnectedBefore time.Time `db:"connected_before" json:"connected_before"` + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + ConnectionID uuid.UUID `db:"connection_id" json:"connection_id"` + Status string `db:"status" json:"status"` + OffsetOpt int32 `db:"offset_opt" json:"offset_opt"` + LimitOpt int32 `db:"limit_opt" json:"limit_opt"` +} + +type GetConnectionLogsOffsetRow struct { + ConnectionLog ConnectionLog `db:"connection_log" json:"connection_log"` + UserUsername sql.NullString `db:"user_username" json:"user_username"` + UserName sql.NullString `db:"user_name" json:"user_name"` + UserEmail sql.NullString `db:"user_email" json:"user_email"` + UserCreatedAt sql.NullTime `db:"user_created_at" json:"user_created_at"` + UserUpdatedAt sql.NullTime `db:"user_updated_at" json:"user_updated_at"` + UserLastSeenAt sql.NullTime `db:"user_last_seen_at" json:"user_last_seen_at"` + UserStatus NullUserStatus `db:"user_status" json:"user_status"` + UserLoginType NullLoginType `db:"user_login_type" json:"user_login_type"` + UserRoles pq.StringArray `db:"user_roles" json:"user_roles"` + UserAvatarUrl sql.NullString `db:"user_avatar_url" json:"user_avatar_url"` + UserDeleted sql.NullBool `db:"user_deleted" json:"user_deleted"` + UserQuietHoursSchedule sql.NullString `db:"user_quiet_hours_schedule" json:"user_quiet_hours_schedule"` + WorkspaceOwnerUsername string `db:"workspace_owner_username" json:"workspace_owner_username"` + OrganizationName string `db:"organization_name" json:"organization_name"` + OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"` + OrganizationIcon string `db:"organization_icon" json:"organization_icon"` +} + +func (q *sqlQuerier) GetConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams) ([]GetConnectionLogsOffsetRow, error) { + rows, err := q.db.QueryContext(ctx, getConnectionLogsOffset, + arg.OrganizationID, + arg.WorkspaceOwner, + arg.WorkspaceOwnerID, + arg.WorkspaceOwnerEmail, + arg.Type, + arg.UserID, + arg.Username, + arg.UserEmail, + arg.ConnectedAfter, + arg.ConnectedBefore, + arg.WorkspaceID, + arg.ConnectionID, + arg.Status, + arg.OffsetOpt, + arg.LimitOpt, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetConnectionLogsOffsetRow + for rows.Next() { + var i GetConnectionLogsOffsetRow + if err := rows.Scan( + &i.ConnectionLog.ID, + &i.ConnectionLog.ConnectTime, + &i.ConnectionLog.OrganizationID, + &i.ConnectionLog.WorkspaceOwnerID, + &i.ConnectionLog.WorkspaceID, + &i.ConnectionLog.WorkspaceName, + &i.ConnectionLog.AgentName, + &i.ConnectionLog.Type, + &i.ConnectionLog.Ip, + &i.ConnectionLog.Code, + &i.ConnectionLog.UserAgent, + &i.ConnectionLog.UserID, + &i.ConnectionLog.SlugOrPort, + &i.ConnectionLog.ConnectionID, + &i.ConnectionLog.DisconnectTime, + &i.ConnectionLog.DisconnectReason, + &i.UserUsername, + &i.UserName, + &i.UserEmail, + &i.UserCreatedAt, + &i.UserUpdatedAt, + &i.UserLastSeenAt, + &i.UserStatus, + &i.UserLoginType, + &i.UserRoles, + &i.UserAvatarUrl, + &i.UserDeleted, + &i.UserQuietHoursSchedule, + &i.WorkspaceOwnerUsername, + &i.OrganizationName, + &i.OrganizationDisplayName, + &i.OrganizationIcon, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const upsertConnectionLog = `-- name: UpsertConnectionLog :one +INSERT INTO connection_logs ( + id, + connect_time, + organization_id, + workspace_owner_id, + workspace_id, + workspace_name, + agent_name, + type, + code, + ip, + user_agent, + user_id, + slug_or_port, + connection_id, + disconnect_reason, + disconnect_time +) VALUES + ($1, $15, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, + -- If we've only received a disconnect event, mark the event as immediately + -- closed. + CASE + WHEN $16::connection_status = 'disconnected' + THEN $15 :: timestamp with time zone + ELSE NULL + END) +ON CONFLICT (connection_id, workspace_id, agent_name) +DO UPDATE SET + -- No-op if the connection is still open. + disconnect_time = CASE + WHEN $16::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.disconnect_time IS NULL + THEN EXCLUDED.connect_time + ELSE connection_logs.disconnect_time + END, + disconnect_reason = CASE + WHEN $16::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.disconnect_reason IS NULL + THEN EXCLUDED.disconnect_reason + ELSE connection_logs.disconnect_reason + END, + code = CASE + WHEN $16::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.code IS NULL + THEN EXCLUDED.code + ELSE connection_logs.code + END +RETURNING id, connect_time, organization_id, workspace_owner_id, workspace_id, workspace_name, agent_name, type, ip, code, user_agent, user_id, slug_or_port, connection_id, disconnect_time, disconnect_reason +` + +type UpsertConnectionLogParams struct { + ID uuid.UUID `db:"id" json:"id"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + WorkspaceOwnerID uuid.UUID `db:"workspace_owner_id" json:"workspace_owner_id"` + WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"` + WorkspaceName string `db:"workspace_name" json:"workspace_name"` + AgentName string `db:"agent_name" json:"agent_name"` + Type ConnectionType `db:"type" json:"type"` + Code sql.NullInt32 `db:"code" json:"code"` + Ip pqtype.Inet `db:"ip" json:"ip"` + UserAgent sql.NullString `db:"user_agent" json:"user_agent"` + UserID uuid.NullUUID `db:"user_id" json:"user_id"` + SlugOrPort sql.NullString `db:"slug_or_port" json:"slug_or_port"` + ConnectionID uuid.NullUUID `db:"connection_id" json:"connection_id"` + DisconnectReason sql.NullString `db:"disconnect_reason" json:"disconnect_reason"` + Time time.Time `db:"time" json:"time"` + ConnectionStatus ConnectionStatus `db:"connection_status" json:"connection_status"` +} + +func (q *sqlQuerier) UpsertConnectionLog(ctx context.Context, arg UpsertConnectionLogParams) (ConnectionLog, error) { + row := q.db.QueryRowContext(ctx, upsertConnectionLog, + arg.ID, + arg.OrganizationID, + arg.WorkspaceOwnerID, + arg.WorkspaceID, + arg.WorkspaceName, + arg.AgentName, + arg.Type, + arg.Code, + arg.Ip, + arg.UserAgent, + arg.UserID, + arg.SlugOrPort, + arg.ConnectionID, + arg.DisconnectReason, + arg.Time, + arg.ConnectionStatus, + ) + var i ConnectionLog + err := row.Scan( + &i.ID, + &i.ConnectTime, + &i.OrganizationID, + &i.WorkspaceOwnerID, + &i.WorkspaceID, + &i.WorkspaceName, + &i.AgentName, + &i.Type, + &i.Ip, + &i.Code, + &i.UserAgent, + &i.UserID, + &i.SlugOrPort, + &i.ConnectionID, + &i.DisconnectTime, + &i.DisconnectReason, + ) + return i, err +} + const deleteCryptoKey = `-- name: DeleteCryptoKey :one UPDATE crypto_keys SET secret = NULL, secret_key_id = NULL diff --git a/coderd/database/queries/auditlogs.sql b/coderd/database/queries/auditlogs.sql index 6269f21cd27e4..63e8c721c8e4c 100644 --- a/coderd/database/queries/auditlogs.sql +++ b/coderd/database/queries/auditlogs.sql @@ -237,3 +237,19 @@ WHERE -- Authorize Filter clause will be injected below in CountAuthorizedAuditLogs -- @authorize_filter ; + +-- name: DeleteOldAuditLogConnectionEvents :exec +DELETE FROM audit_logs +WHERE id IN ( + SELECT id FROM audit_logs + WHERE + ( + action = 'connect' + OR action = 'disconnect' + OR action = 'open' + OR action = 'close' + ) + AND "time" < @before_time::timestamp with time zone + ORDER BY "time" ASC + LIMIT @limit_count +); diff --git a/coderd/database/queries/connectionlogs.sql b/coderd/database/queries/connectionlogs.sql new file mode 100644 index 0000000000000..eb2d1b0cb171a --- /dev/null +++ b/coderd/database/queries/connectionlogs.sql @@ -0,0 +1,293 @@ +-- name: GetConnectionLogsOffset :many +SELECT + sqlc.embed(connection_logs), + -- sqlc.embed(users) would be nice but it does not seem to play well with + -- left joins. This user metadata is necessary for parity with the audit logs + -- API. + users.username AS user_username, + users.name AS user_name, + users.email AS user_email, + users.created_at AS user_created_at, + users.updated_at AS user_updated_at, + users.last_seen_at AS user_last_seen_at, + users.status AS user_status, + users.login_type AS user_login_type, + users.rbac_roles AS user_roles, + users.avatar_url AS user_avatar_url, + users.deleted AS user_deleted, + users.quiet_hours_schedule AS user_quiet_hours_schedule, + workspace_owner.username AS workspace_owner_username, + organizations.name AS organization_name, + organizations.display_name AS organization_display_name, + organizations.icon AS organization_icon +FROM + connection_logs +JOIN users AS workspace_owner ON + connection_logs.workspace_owner_id = workspace_owner.id +LEFT JOIN users ON + connection_logs.user_id = users.id +JOIN organizations ON + connection_logs.organization_id = organizations.id +WHERE + -- Filter organization_id + CASE + WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.organization_id = @organization_id + ELSE true + END + -- Filter by workspace owner username + AND CASE + WHEN @workspace_owner :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE lower(username) = lower(@workspace_owner) AND deleted = false + ) + ELSE true + END + -- Filter by workspace_owner_id + AND CASE + WHEN @workspace_owner_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + workspace_owner_id = @workspace_owner_id + ELSE true + END + -- Filter by workspace_owner_email + AND CASE + WHEN @workspace_owner_email :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE email = @workspace_owner_email AND deleted = false + ) + ELSE true + END + -- Filter by type + AND CASE + WHEN @type :: text != '' THEN + type = @type :: connection_type + ELSE true + END + -- Filter by user_id + AND CASE + WHEN @user_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = @user_id + ELSE true + END + -- Filter by username + AND CASE + WHEN @username :: text != '' THEN + user_id = ( + SELECT id FROM users + WHERE lower(username) = lower(@username) AND deleted = false + ) + ELSE true + END + -- Filter by user_email + AND CASE + WHEN @user_email :: text != '' THEN + users.email = @user_email + ELSE true + END + -- Filter by connected_after + AND CASE + WHEN @connected_after :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time >= @connected_after + ELSE true + END + -- Filter by connected_before + AND CASE + WHEN @connected_before :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time <= @connected_before + ELSE true + END + -- Filter by workspace_id + AND CASE + WHEN @workspace_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.workspace_id = @workspace_id + ELSE true + END + -- Filter by connection_id + AND CASE + WHEN @connection_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.connection_id = @connection_id + ELSE true + END + -- Filter by whether the session has a disconnect_time + AND CASE + WHEN @status :: text != '' THEN + ((@status = 'ongoing' AND disconnect_time IS NULL) OR + (@status = 'completed' AND disconnect_time IS NOT NULL)) AND + -- Exclude web events, since we don't know their close time. + "type" NOT IN ('workspace_app', 'port_forwarding') + ELSE true + END + -- Authorize Filter clause will be injected below in + -- GetAuthorizedConnectionLogsOffset + -- @authorize_filter +ORDER BY + connect_time DESC +LIMIT + -- a limit of 0 means "no limit". The connection log table is unbounded + -- in size, and is expected to be quite large. Implement a default + -- limit of 100 to prevent accidental excessively large queries. + COALESCE(NULLIF(@limit_opt :: int, 0), 100) +OFFSET + @offset_opt; + +-- name: CountConnectionLogs :one +SELECT + COUNT(*) AS count +FROM + connection_logs +JOIN users AS workspace_owner ON + connection_logs.workspace_owner_id = workspace_owner.id +LEFT JOIN users ON + connection_logs.user_id = users.id +JOIN organizations ON + connection_logs.organization_id = organizations.id +WHERE + -- Filter organization_id + CASE + WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.organization_id = @organization_id + ELSE true + END + -- Filter by workspace owner username + AND CASE + WHEN @workspace_owner :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE lower(username) = lower(@workspace_owner) AND deleted = false + ) + ELSE true + END + -- Filter by workspace_owner_id + AND CASE + WHEN @workspace_owner_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + workspace_owner_id = @workspace_owner_id + ELSE true + END + -- Filter by workspace_owner_email + AND CASE + WHEN @workspace_owner_email :: text != '' THEN + workspace_owner_id = ( + SELECT id FROM users + WHERE email = @workspace_owner_email AND deleted = false + ) + ELSE true + END + -- Filter by type + AND CASE + WHEN @type :: text != '' THEN + type = @type :: connection_type + ELSE true + END + -- Filter by user_id + AND CASE + WHEN @user_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = @user_id + ELSE true + END + -- Filter by username + AND CASE + WHEN @username :: text != '' THEN + user_id = ( + SELECT id FROM users + WHERE lower(username) = lower(@username) AND deleted = false + ) + ELSE true + END + -- Filter by user_email + AND CASE + WHEN @user_email :: text != '' THEN + users.email = @user_email + ELSE true + END + -- Filter by connected_after + AND CASE + WHEN @connected_after :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time >= @connected_after + ELSE true + END + -- Filter by connected_before + AND CASE + WHEN @connected_before :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + connect_time <= @connected_before + ELSE true + END + -- Filter by workspace_id + AND CASE + WHEN @workspace_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.workspace_id = @workspace_id + ELSE true + END + -- Filter by connection_id + AND CASE + WHEN @connection_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + connection_logs.connection_id = @connection_id + ELSE true + END + -- Filter by whether the session has a disconnect_time + AND CASE + WHEN @status :: text != '' THEN + ((@status = 'ongoing' AND disconnect_time IS NULL) OR + (@status = 'completed' AND disconnect_time IS NOT NULL)) AND + -- Exclude web events, since we don't know their close time. + "type" NOT IN ('workspace_app', 'port_forwarding') + ELSE true + END + -- Authorize Filter clause will be injected below in + -- CountAuthorizedConnectionLogs + -- @authorize_filter +; + +-- name: UpsertConnectionLog :one +INSERT INTO connection_logs ( + id, + connect_time, + organization_id, + workspace_owner_id, + workspace_id, + workspace_name, + agent_name, + type, + code, + ip, + user_agent, + user_id, + slug_or_port, + connection_id, + disconnect_reason, + disconnect_time +) VALUES + ($1, @time, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, + -- If we've only received a disconnect event, mark the event as immediately + -- closed. + CASE + WHEN @connection_status::connection_status = 'disconnected' + THEN @time :: timestamp with time zone + ELSE NULL + END) +ON CONFLICT (connection_id, workspace_id, agent_name) +DO UPDATE SET + -- No-op if the connection is still open. + disconnect_time = CASE + WHEN @connection_status::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.disconnect_time IS NULL + THEN EXCLUDED.connect_time + ELSE connection_logs.disconnect_time + END, + disconnect_reason = CASE + WHEN @connection_status::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.disconnect_reason IS NULL + THEN EXCLUDED.disconnect_reason + ELSE connection_logs.disconnect_reason + END, + code = CASE + WHEN @connection_status::connection_status = 'disconnected' + -- Can only be set once + AND connection_logs.code IS NULL + THEN EXCLUDED.code + ELSE connection_logs.code + END +RETURNING *; diff --git a/coderd/database/types.go b/coderd/database/types.go index a4a723d02b466..6d0f036fe692c 100644 --- a/coderd/database/types.go +++ b/coderd/database/types.go @@ -4,10 +4,12 @@ import ( "database/sql/driver" "encoding/json" "fmt" + "net" "strings" "time" "github.com/google/uuid" + "github.com/sqlc-dev/pqtype" "golang.org/x/xerrors" "github.com/coder/coder/v2/coderd/rbac/policy" @@ -237,3 +239,19 @@ func (a *UserLinkClaims) Scan(src interface{}) error { func (a UserLinkClaims) Value() (driver.Value, error) { return json.Marshal(a) } + +func ParseIP(ipStr string) pqtype.Inet { + ip := net.ParseIP(ipStr) + ipNet := net.IPNet{} + if ip != nil { + ipNet = net.IPNet{ + IP: ip, + Mask: net.CIDRMask(len(ip)*8, len(ip)*8), + } + } + + return pqtype.Inet{ + IPNet: ipNet, + Valid: ip != nil, + } +} diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go index b3af136997c9c..38c95e67410c9 100644 --- a/coderd/database/unique_constraint.go +++ b/coderd/database/unique_constraint.go @@ -9,6 +9,7 @@ const ( UniqueAgentStatsPkey UniqueConstraint = "agent_stats_pkey" // ALTER TABLE ONLY workspace_agent_stats ADD CONSTRAINT agent_stats_pkey PRIMARY KEY (id); UniqueAPIKeysPkey UniqueConstraint = "api_keys_pkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id); UniqueAuditLogsPkey UniqueConstraint = "audit_logs_pkey" // ALTER TABLE ONLY audit_logs ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id); + UniqueConnectionLogsPkey UniqueConstraint = "connection_logs_pkey" // ALTER TABLE ONLY connection_logs ADD CONSTRAINT connection_logs_pkey PRIMARY KEY (id); UniqueCryptoKeysPkey UniqueConstraint = "crypto_keys_pkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_pkey PRIMARY KEY (feature, sequence); UniqueCustomRolesUniqueKey UniqueConstraint = "custom_roles_unique_key" // ALTER TABLE ONLY custom_roles ADD CONSTRAINT custom_roles_unique_key UNIQUE (name, organization_id); UniqueDbcryptKeysActiveKeyDigestKey UniqueConstraint = "dbcrypt_keys_active_key_digest_key" // ALTER TABLE ONLY dbcrypt_keys ADD CONSTRAINT dbcrypt_keys_active_key_digest_key UNIQUE (active_key_digest); @@ -100,6 +101,7 @@ const ( UniqueWorkspaceResourcesPkey UniqueConstraint = "workspace_resources_pkey" // ALTER TABLE ONLY workspace_resources ADD CONSTRAINT workspace_resources_pkey PRIMARY KEY (id); UniqueWorkspacesPkey UniqueConstraint = "workspaces_pkey" // ALTER TABLE ONLY workspaces ADD CONSTRAINT workspaces_pkey PRIMARY KEY (id); UniqueIndexAPIKeyName UniqueConstraint = "idx_api_key_name" // CREATE UNIQUE INDEX idx_api_key_name ON api_keys USING btree (user_id, token_name) WHERE (login_type = 'token'::login_type); + UniqueIndexConnectionLogsConnectionIDWorkspaceIDAgentName UniqueConstraint = "idx_connection_logs_connection_id_workspace_id_agent_name" // CREATE UNIQUE INDEX idx_connection_logs_connection_id_workspace_id_agent_name ON connection_logs USING btree (connection_id, workspace_id, agent_name); UniqueIndexCustomRolesNameLower UniqueConstraint = "idx_custom_roles_name_lower" // CREATE UNIQUE INDEX idx_custom_roles_name_lower ON custom_roles USING btree (lower(name)); UniqueIndexOrganizationNameLower UniqueConstraint = "idx_organization_name_lower" // CREATE UNIQUE INDEX idx_organization_name_lower ON organizations USING btree (lower(name)) WHERE (deleted = false); UniqueIndexProvisionerDaemonsOrgNameOwnerKey UniqueConstraint = "idx_provisioner_daemons_org_name_owner_key" // CREATE UNIQUE INDEX idx_provisioner_daemons_org_name_owner_key ON provisioner_daemons USING btree (organization_id, name, lower(COALESCE((tags ->> 'owner'::text), ''::text))); diff --git a/coderd/dynamicparameters/resolver.go b/coderd/dynamicparameters/resolver.go index bd8e2294cf136..7fc67d29a0d55 100644 --- a/coderd/dynamicparameters/resolver.go +++ b/coderd/dynamicparameters/resolver.go @@ -55,19 +55,21 @@ func ResolveParameters( values[preset.Name] = parameterValue{Source: sourcePreset, Value: preset.Value} } - // originalValues is going to be used to detect if a user tried to change + // originalInputValues is going to be used to detect if a user tried to change // an immutable parameter after the first build. - originalValues := make(map[string]parameterValue, len(values)) + // The actual input values are mutated based on attributes like mutability + // and ephemerality. + originalInputValues := make(map[string]parameterValue, len(values)) for name, value := range values { // Store the original values for later use. - originalValues[name] = value + originalInputValues[name] = value } // Render the parameters using the values that were supplied to the previous build. // // This is how the form should look to the user on their workspace settings page. // This is the original form truth that our validations should initially be based on. - output, diags := renderer.Render(ctx, ownerID, values.ValuesMap()) + output, diags := renderer.Render(ctx, ownerID, previousValuesMap) if diags.HasErrors() { // Top level diagnostics should break the build. Previous values (and new) should // always be valid. If there is a case where this is not true, then this has to @@ -91,22 +93,6 @@ func ResolveParameters( delete(values, parameter.Name) } } - - // Immutable parameters should also not be allowed to be changed from - // the previous build. Remove any values taken from the preset or - // new build params. This forces the value to be the same as it was before. - // - // We do this so the next form render uses the original immutable value. - if !firstBuild && !parameter.Mutable { - delete(values, parameter.Name) - prev, ok := previousValuesMap[parameter.Name] - if ok { - values[parameter.Name] = parameterValue{ - Value: prev, - Source: sourcePrevious, - } - } - } } // This is the final set of values that will be used. Any errors at this stage @@ -116,7 +102,7 @@ func ResolveParameters( return nil, parameterValidationError(diags) } - // parameterNames is going to be used to remove any excess values that were left + // parameterNames is going to be used to remove any excess values left // around without a parameter. parameterNames := make(map[string]struct{}, len(output.Parameters)) parameterError := parameterValidationError(nil) @@ -124,15 +110,20 @@ func ResolveParameters( parameterNames[parameter.Name] = struct{}{} if !firstBuild && !parameter.Mutable { - originalValue, ok := originalValues[parameter.Name] + // previousValuesMap should be used over the first render output + // for the previous state of parameters. The previous build + // should emit all values, so the previousValuesMap should be + // complete with all parameter values (user specified and defaults) + originalValue, ok := previousValuesMap[parameter.Name] + // Immutable parameters should not be changed after the first build. - // If the value matches the original value, that is fine. + // If the value matches the previous input value, that is fine. // - // If the original value is not set, that means this is a new parameter. New + // If the previous value is not set, that means this is a new parameter. New // immutable parameters are allowed. This is an opinionated choice to prevent // workspaces failing to update or delete. Ideally we would block this, as // immutable parameters should only be able to be set at creation time. - if ok && parameter.Value.AsString() != originalValue.Value { + if ok && parameter.Value.AsString() != originalValue { var src *hcl.Range if parameter.Source != nil { src = ¶meter.Source.HCLBlock().TypeRange diff --git a/coderd/dynamicparameters/resolver_test.go b/coderd/dynamicparameters/resolver_test.go index ec5218613ff03..e6675e6f4c7dc 100644 --- a/coderd/dynamicparameters/resolver_test.go +++ b/coderd/dynamicparameters/resolver_test.go @@ -10,6 +10,7 @@ import ( "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/dynamicparameters" "github.com/coder/coder/v2/coderd/dynamicparameters/rendermock" + "github.com/coder/coder/v2/coderd/httpapi/httperror" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" "github.com/coder/preview" @@ -56,4 +57,69 @@ func TestResolveParameters(t *testing.T) { require.NoError(t, err) require.Equal(t, map[string]string{"immutable": "foo"}, values) }) + + // Tests a parameter going from mutable -> immutable + t.Run("BecameImmutable", func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + render := rendermock.NewMockRenderer(ctrl) + + mutable := previewtypes.ParameterData{ + Name: "immutable", + Type: previewtypes.ParameterTypeString, + FormType: provider.ParameterFormTypeInput, + Mutable: true, + DefaultValue: previewtypes.StringLiteral("foo"), + Required: true, + } + immutable := mutable + immutable.Mutable = false + + // A single immutable parameter with no previous value. + render.EXPECT(). + Render(gomock.Any(), gomock.Any(), gomock.Any()). + // Return the mutable param first + Return(&preview.Output{ + Parameters: []previewtypes.Parameter{ + { + ParameterData: mutable, + Value: previewtypes.StringLiteral("foo"), + Diagnostics: nil, + }, + }, + }, nil) + + render.EXPECT(). + Render(gomock.Any(), gomock.Any(), gomock.Any()). + // Then the immutable param + Return(&preview.Output{ + Parameters: []previewtypes.Parameter{ + { + ParameterData: immutable, + // The user set the value to bar + Value: previewtypes.StringLiteral("bar"), + Diagnostics: nil, + }, + }, + }, nil) + + ctx := testutil.Context(t, testutil.WaitShort) + _, err := dynamicparameters.ResolveParameters(ctx, uuid.New(), render, false, + []database.WorkspaceBuildParameter{ + {Name: "immutable", Value: "foo"}, // Previous value foo + }, + []codersdk.WorkspaceBuildParameter{ + {Name: "immutable", Value: "bar"}, // New value + }, + []database.TemplateVersionPresetParameter{}, // No preset values + ) + require.Error(t, err) + resp, ok := httperror.IsResponder(err) + require.True(t, ok) + + _, respErr := resp.Response() + require.Len(t, respErr.Validations, 1) + require.Contains(t, respErr.Validations[0].Error(), "is not mutable") + }) } diff --git a/coderd/members.go b/coderd/members.go index 5a031fe7eab90..0bd5bb1fbc8bd 100644 --- a/coderd/members.go +++ b/coderd/members.go @@ -195,7 +195,7 @@ func (api *API) paginatedMembers(rw http.ResponseWriter, r *http.Request) { var ( ctx = r.Context() organization = httpmw.OrganizationParam(r) - paginationParams, ok = parsePagination(rw, r) + paginationParams, ok = ParsePagination(rw, r) ) if !ok { return diff --git a/coderd/pagination.go b/coderd/pagination.go index 0d01220d195e7..011f8df9e7bd4 100644 --- a/coderd/pagination.go +++ b/coderd/pagination.go @@ -9,9 +9,9 @@ import ( "github.com/coder/coder/v2/codersdk" ) -// parsePagination extracts pagination query params from the http request. +// ParsePagination extracts pagination query params from the http request. // If an error is encountered, the error is written to w and ok is set to false. -func parsePagination(w http.ResponseWriter, r *http.Request) (p codersdk.Pagination, ok bool) { +func ParsePagination(w http.ResponseWriter, r *http.Request) (p codersdk.Pagination, ok bool) { ctx := r.Context() queryParams := r.URL.Query() parser := httpapi.NewQueryParamParser() diff --git a/coderd/pagination_internal_test.go b/coderd/pagination_test.go similarity index 96% rename from coderd/pagination_internal_test.go rename to coderd/pagination_test.go index 18d98c2fab319..f6e1aab7067f4 100644 --- a/coderd/pagination_internal_test.go +++ b/coderd/pagination_test.go @@ -1,4 +1,4 @@ -package coderd +package coderd_test import ( "context" @@ -10,6 +10,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/require" + "github.com/coder/coder/v2/coderd" "github.com/coder/coder/v2/codersdk" ) @@ -123,7 +124,7 @@ func TestPagination(t *testing.T) { query.Set("offset", c.Offset) r.URL.RawQuery = query.Encode() - params, ok := parsePagination(rw, r) + params, ok := coderd.ParsePagination(rw, r) if c.ExpectedError == "" { require.True(t, ok, "expect ok") require.Equal(t, c.ExpectedParams, params, "expected params") diff --git a/coderd/rbac/authz.go b/coderd/rbac/authz.go index f57ed2585c068..fcb6621a34cee 100644 --- a/coderd/rbac/authz.go +++ b/coderd/rbac/authz.go @@ -65,6 +65,7 @@ const ( SubjectTypeUser SubjectType = "user" SubjectTypeProvisionerd SubjectType = "provisionerd" SubjectTypeAutostart SubjectType = "autostart" + SubjectTypeConnectionLogger SubjectType = "connection_logger" SubjectTypeJobReaper SubjectType = "job_reaper" SubjectTypeResourceMonitor SubjectType = "resource_monitor" SubjectTypeCryptoKeyRotator SubjectType = "crypto_key_rotator" diff --git a/coderd/rbac/object_gen.go b/coderd/rbac/object_gen.go index d0d5dc4aab0fe..5fb3cc2bd8a3b 100644 --- a/coderd/rbac/object_gen.go +++ b/coderd/rbac/object_gen.go @@ -54,6 +54,14 @@ var ( Type: "audit_log", } + // ResourceConnectionLog + // Valid Actions + // - "ActionRead" :: read connection logs + // - "ActionUpdate" :: upsert connection log entries + ResourceConnectionLog = Object{ + Type: "connection_log", + } + // ResourceCryptoKey // Valid Actions // - "ActionCreate" :: create crypto keys @@ -368,6 +376,7 @@ func AllResources() []Objecter { ResourceAssignOrgRole, ResourceAssignRole, ResourceAuditLog, + ResourceConnectionLog, ResourceCryptoKey, ResourceDebugInfo, ResourceDeploymentConfig, diff --git a/coderd/rbac/policy/policy.go b/coderd/rbac/policy/policy.go index a3ad614439c9a..a10abfb9605ca 100644 --- a/coderd/rbac/policy/policy.go +++ b/coderd/rbac/policy/policy.go @@ -138,6 +138,12 @@ var RBACPermissions = map[string]PermissionDefinition{ ActionCreate: actDef("create new audit log entries"), }, }, + "connection_log": { + Actions: map[Action]ActionDefinition{ + ActionRead: actDef("read connection logs"), + ActionUpdate: actDef("upsert connection log entries"), + }, + }, "deployment_config": { Actions: map[Action]ActionDefinition{ ActionRead: actDef("read deployment config"), diff --git a/coderd/rbac/regosql/configs.go b/coderd/rbac/regosql/configs.go index 2cb03b238f471..69d425d9dba2f 100644 --- a/coderd/rbac/regosql/configs.go +++ b/coderd/rbac/regosql/configs.go @@ -50,6 +50,20 @@ func AuditLogConverter() *sqltypes.VariableConverter { return matcher } +func ConnectionLogConverter() *sqltypes.VariableConverter { + matcher := sqltypes.NewVariableConverter().RegisterMatcher( + resourceIDMatcher(), + sqltypes.StringVarMatcher("COALESCE(connection_logs.organization_id :: text, '')", []string{"input", "object", "org_owner"}), + // Connection logs have no user owner, only owner by an organization. + sqltypes.AlwaysFalse(userOwnerMatcher()), + ) + matcher.RegisterMatcher( + sqltypes.AlwaysFalse(groupACLMatcher(matcher)), + sqltypes.AlwaysFalse(userACLMatcher(matcher)), + ) + return matcher +} + func UserConverter() *sqltypes.VariableConverter { matcher := sqltypes.NewVariableConverter().RegisterMatcher( resourceIDMatcher(), diff --git a/coderd/rbac/roles.go b/coderd/rbac/roles.go index ebc7ff8f12070..b8d3f959ce477 100644 --- a/coderd/rbac/roles.go +++ b/coderd/rbac/roles.go @@ -315,6 +315,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) { Site: Permissions(map[string][]policy.Action{ ResourceAssignOrgRole.Type: {policy.ActionRead}, ResourceAuditLog.Type: {policy.ActionRead}, + ResourceConnectionLog.Type: {policy.ActionRead}, // Allow auditors to see the resources that audit logs reflect. ResourceTemplate.Type: {policy.ActionRead, policy.ActionViewInsights}, ResourceUser.Type: {policy.ActionRead}, @@ -456,7 +457,8 @@ func ReloadBuiltinRoles(opts *RoleOptions) { Site: []Permission{}, Org: map[string][]Permission{ organizationID.String(): Permissions(map[string][]policy.Action{ - ResourceAuditLog.Type: {policy.ActionRead}, + ResourceAuditLog.Type: {policy.ActionRead}, + ResourceConnectionLog.Type: {policy.ActionRead}, // Allow auditors to see the resources that audit logs reflect. ResourceTemplate.Type: {policy.ActionRead, policy.ActionViewInsights}, ResourceGroup.Type: {policy.ActionRead}, diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go index 3e6f7d1e330d5..267a99993e642 100644 --- a/coderd/rbac/roles_test.go +++ b/coderd/rbac/roles_test.go @@ -849,6 +849,15 @@ func TestRolePermissions(t *testing.T) { }, }, }, + { + Name: "ConnectionLogs", + Actions: []policy.Action{policy.ActionRead, policy.ActionUpdate}, + Resource: rbac.ResourceConnectionLog, + AuthorizeMap: map[bool][]hasAuthSubjects{ + true: {owner}, + false: {setOtherOrg, setOrgNotMe, memberMe, orgMemberMe, templateAdmin, userAdmin}, + }, + }, } // We expect every permission to be tested above. diff --git a/coderd/searchquery/search.go b/coderd/searchquery/search.go index 634d4b6632ed3..d35f3c94b5ff7 100644 --- a/coderd/searchquery/search.go +++ b/coderd/searchquery/search.go @@ -86,6 +86,63 @@ func AuditLogs(ctx context.Context, db database.Store, query string) (database.G return filter, countFilter, parser.Errors } +func ConnectionLogs(ctx context.Context, db database.Store, query string, apiKey database.APIKey) (database.GetConnectionLogsOffsetParams, database.CountConnectionLogsParams, []codersdk.ValidationError) { + // Always lowercase for all searches. + query = strings.ToLower(query) + values, errors := searchTerms(query, func(term string, values url.Values) error { + values.Add("search", term) + return nil + }) + if len(errors) > 0 { + // nolint:exhaustruct // We don't need to initialize these structs because we return an error. + return database.GetConnectionLogsOffsetParams{}, database.CountConnectionLogsParams{}, errors + } + + parser := httpapi.NewQueryParamParser() + filter := database.GetConnectionLogsOffsetParams{ + OrganizationID: parseOrganization(ctx, db, parser, values, "organization"), + WorkspaceOwner: parser.String(values, "", "workspace_owner"), + WorkspaceOwnerEmail: parser.String(values, "", "workspace_owner_email"), + Type: string(httpapi.ParseCustom(parser, values, "", "type", httpapi.ParseEnum[database.ConnectionType])), + Username: parser.String(values, "", "username"), + UserEmail: parser.String(values, "", "user_email"), + ConnectedAfter: parser.Time3339Nano(values, time.Time{}, "connected_after"), + ConnectedBefore: parser.Time3339Nano(values, time.Time{}, "connected_before"), + WorkspaceID: parser.UUID(values, uuid.Nil, "workspace_id"), + ConnectionID: parser.UUID(values, uuid.Nil, "connection_id"), + Status: string(httpapi.ParseCustom(parser, values, "", "status", httpapi.ParseEnum[codersdk.ConnectionLogStatus])), + } + + if filter.Username == "me" { + filter.UserID = apiKey.UserID + filter.Username = "" + } + + if filter.WorkspaceOwner == "me" { + filter.WorkspaceOwnerID = apiKey.UserID + filter.WorkspaceOwner = "" + } + + // This MUST be kept in sync with the above + countFilter := database.CountConnectionLogsParams{ + OrganizationID: filter.OrganizationID, + WorkspaceOwner: filter.WorkspaceOwner, + WorkspaceOwnerID: filter.WorkspaceOwnerID, + WorkspaceOwnerEmail: filter.WorkspaceOwnerEmail, + Type: filter.Type, + UserID: filter.UserID, + Username: filter.Username, + UserEmail: filter.UserEmail, + ConnectedAfter: filter.ConnectedAfter, + ConnectedBefore: filter.ConnectedBefore, + WorkspaceID: filter.WorkspaceID, + ConnectionID: filter.ConnectionID, + Status: filter.Status, + } + parser.ErrorExcessParams(values) + return filter, countFilter, parser.Errors +} + func Users(query string) (database.GetUsersParams, []codersdk.ValidationError) { // Always lowercase for all searches. query = strings.ToLower(query) diff --git a/coderd/searchquery/search_test.go b/coderd/searchquery/search_test.go index ad5f2df966ef9..4744b57edff4a 100644 --- a/coderd/searchquery/search_test.go +++ b/coderd/searchquery/search_test.go @@ -408,6 +408,72 @@ func TestSearchAudit(t *testing.T) { } } +func TestSearchConnectionLogs(t *testing.T) { + t.Parallel() + t.Run("All", func(t *testing.T) { + t.Parallel() + + orgID := uuid.New() + workspaceOwnerID := uuid.New() + workspaceID := uuid.New() + connectionID := uuid.New() + + db, _ := dbtestutil.NewDB(t) + dbgen.Organization(t, db, database.Organization{ + ID: orgID, + Name: "testorg", + }) + dbgen.User(t, db, database.User{ + ID: workspaceOwnerID, + Username: "testowner", + Email: "owner@example.com", + }) + + query := fmt.Sprintf(`organization:testorg workspace_owner:testowner `+ + `workspace_owner_email:owner@example.com type:port_forwarding username:testuser `+ + `user_email:test@example.com connected_after:"2023-01-01T00:00:00Z" `+ + `connected_before:"2023-01-16T12:00:00+12:00" workspace_id:%s connection_id:%s status:ongoing`, + workspaceID.String(), connectionID.String()) + + values, _, errs := searchquery.ConnectionLogs(context.Background(), db, query, database.APIKey{}) + require.Len(t, errs, 0) + + expected := database.GetConnectionLogsOffsetParams{ + OrganizationID: orgID, + WorkspaceOwner: "testowner", + WorkspaceOwnerEmail: "owner@example.com", + Type: string(database.ConnectionTypePortForwarding), + Username: "testuser", + UserEmail: "test@example.com", + ConnectedAfter: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC), + ConnectedBefore: time.Date(2023, 1, 16, 0, 0, 0, 0, time.UTC), + WorkspaceID: workspaceID, + ConnectionID: connectionID, + Status: string(codersdk.ConnectionLogStatusOngoing), + } + + require.Equal(t, expected, values) + }) + + t.Run("Me", func(t *testing.T) { + t.Parallel() + + userID := uuid.New() + db, _ := dbtestutil.NewDB(t) + + query := `username:me workspace_owner:me` + values, _, errs := searchquery.ConnectionLogs(context.Background(), db, query, database.APIKey{UserID: userID}) + require.Len(t, errs, 0) + + expected := database.GetConnectionLogsOffsetParams{ + UserID: userID, + WorkspaceOwnerID: userID, + } + + require.Equal(t, expected, values) + }) +} + func TestSearchUsers(t *testing.T) { t.Parallel() testCases := []struct { diff --git a/coderd/templateversions.go b/coderd/templateversions.go index fa5a7ed1fe757..de069b5ca4723 100644 --- a/coderd/templateversions.go +++ b/coderd/templateversions.go @@ -807,7 +807,7 @@ func (api *API) templateVersionsByTemplate(rw http.ResponseWriter, r *http.Reque ctx := r.Context() template := httpmw.TemplateParam(r) - paginationParams, ok := parsePagination(rw, r) + paginationParams, ok := ParsePagination(rw, r) if !ok { return } diff --git a/coderd/users.go b/coderd/users.go index e2f6fd79c7d75..7fbb8e7d04cdf 100644 --- a/coderd/users.go +++ b/coderd/users.go @@ -290,7 +290,7 @@ func (api *API) GetUsers(rw http.ResponseWriter, r *http.Request) ([]database.Us return nil, -1, false } - paginationParams, ok := parsePagination(rw, r) + paginationParams, ok := ParsePagination(rw, r) if !ok { return nil, -1, false } diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go index 0ab28b340a1d1..3ae57d8394d43 100644 --- a/coderd/workspaceagents.go +++ b/coderd/workspaceagents.go @@ -801,6 +801,106 @@ func (api *API) workspaceAgentListeningPorts(rw http.ResponseWriter, r *http.Req httpapi.Write(ctx, rw, http.StatusOK, portsResponse) } +// @Summary Watch workspace agent for container updates. +// @ID watch-workspace-agent-for-container-updates +// @Security CoderSessionToken +// @Produce json +// @Tags Agents +// @Param workspaceagent path string true "Workspace agent ID" format(uuid) +// @Success 200 {object} codersdk.WorkspaceAgentListContainersResponse +// @Router /workspaceagents/{workspaceagent}/containers/watch [get] +func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Request) { + var ( + ctx = r.Context() + workspaceAgent = httpmw.WorkspaceAgentParam(r) + ) + + // If the agent is unreachable, the request will hang. Assume that if we + // don't get a response after 30s that the agent is unreachable. + dialCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + apiAgent, err := db2sdk.WorkspaceAgent( + api.DERPMap(), + *api.TailnetCoordinator.Load(), + workspaceAgent, + nil, + nil, + nil, + api.AgentInactiveDisconnectTimeout, + api.DeploymentValues.AgentFallbackTroubleshootingURL.String(), + ) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error reading workspace agent.", + Detail: err.Error(), + }) + return + } + if apiAgent.Status != codersdk.WorkspaceAgentConnected { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: fmt.Sprintf("Agent state is %q, it must be in the %q state.", apiAgent.Status, codersdk.WorkspaceAgentConnected), + }) + return + } + + agentConn, release, err := api.agentProvider.AgentConn(dialCtx, workspaceAgent.ID) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error dialing workspace agent.", + Detail: err.Error(), + }) + return + } + defer release() + + watcherLogger := api.Logger.Named("agent_container_watcher").With(slog.F("agent_id", workspaceAgent.ID)) + containersCh, closer, err := agentConn.WatchContainers(ctx, watcherLogger) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error watching agent's containers.", + Detail: err.Error(), + }) + return + } + defer closer.Close() + + conn, err := websocket.Accept(rw, r, nil) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Failed to upgrade connection to websocket.", + Detail: err.Error(), + }) + return + } + + // Here we close the websocket for reading, so that the websocket library will handle pings and + // close frames. + _ = conn.CloseRead(context.Background()) + + ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText) + defer wsNetConn.Close() + + go httpapi.Heartbeat(ctx, conn) + + encoder := json.NewEncoder(wsNetConn) + + for { + select { + case <-api.ctx.Done(): + return + + case <-ctx.Done(): + return + + case containers := <-containersCh: + if err := encoder.Encode(containers); err != nil { + api.Logger.Error(ctx, "encode containers", slog.Error(err)) + return + } + } + } +} + // @Summary Get running containers for workspace agent // @ID get-running-containers-for-workspace-agent // @Security CoderSessionToken diff --git a/coderd/workspaceagents_test.go b/coderd/workspaceagents_test.go index 899c863cc5fd6..30859cb6391e6 100644 --- a/coderd/workspaceagents_test.go +++ b/coderd/workspaceagents_test.go @@ -1386,6 +1386,192 @@ func TestWorkspaceAgentContainers(t *testing.T) { }) } +func TestWatchWorkspaceAgentDevcontainers(t *testing.T) { + t.Parallel() + + var ( + ctx = testutil.Context(t, testutil.WaitLong) + logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + mClock = quartz.NewMock(t) + updaterTickerTrap = mClock.Trap().TickerFunc("updaterLoop") + mCtrl = gomock.NewController(t) + mCCLI = acmock.NewMockContainerCLI(mCtrl) + + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{Logger: &logger}) + user = coderdtest.CreateFirstUser(t, client) + r = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + return agents + }).Do() + + fakeContainer1 = codersdk.WorkspaceAgentContainer{ + ID: "container1", + CreatedAt: dbtime.Now(), + FriendlyName: "container1", + Image: "busybox:latest", + Labels: map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project1", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project1/.devcontainer/devcontainer.json", + }, + Running: true, + Status: "running", + } + + fakeContainer2 = codersdk.WorkspaceAgentContainer{ + ID: "container1", + CreatedAt: dbtime.Now(), + FriendlyName: "container2", + Image: "busybox:latest", + Labels: map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project2", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project2/.devcontainer/devcontainer.json", + }, + Running: true, + Status: "running", + } + ) + + stages := []struct { + containers []codersdk.WorkspaceAgentContainer + expected codersdk.WorkspaceAgentListContainersResponse + }{ + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, + }, + }, + }, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, + }, + }, + }, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "stopped", + Container: nil, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, + }, + }, + }, + } + + // Set up initial state for immediate send on connection + mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stages[0].containers}, nil) + mCCLI.EXPECT().DetectArchitecture(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + _ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) { + o.Logger = logger.Named("agent") + o.Devcontainers = true + o.DevcontainerAPIOptions = []agentcontainers.Option{ + agentcontainers.WithClock(mClock), + agentcontainers.WithContainerCLI(mCCLI), + agentcontainers.WithWatcher(watcher.NewNoop()), + } + }) + + resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() + require.Len(t, resources, 1, "expected one resource") + require.Len(t, resources[0].Agents, 1, "expected one agent") + agentID := resources[0].Agents[0].ID + + updaterTickerTrap.MustWait(ctx).MustRelease(ctx) + defer updaterTickerTrap.Close() + + containers, closer, err := client.WatchWorkspaceAgentContainers(ctx, agentID) + require.NoError(t, err) + defer func() { + closer.Close() + }() + + // Read initial state sent immediately on connection + var got codersdk.WorkspaceAgentListContainersResponse + select { + case <-ctx.Done(): + case got = <-containers: + } + require.NoError(t, ctx.Err()) + + require.Equal(t, stages[0].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[0].expected.Devcontainers)) + for j, expectedDev := range stages[0].expected.Devcontainers { + gotDev := got.Devcontainers[j] + require.Equal(t, expectedDev.Name, gotDev.Name) + require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) + require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) + require.Equal(t, expectedDev.Status, gotDev.Status) + require.Equal(t, expectedDev.Container, gotDev.Container) + } + + // Process remaining stages through updater loop + for i, stage := range stages[1:] { + mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stage.containers}, nil) + + _, aw := mClock.AdvanceNext() + aw.MustWait(ctx) + + var got codersdk.WorkspaceAgentListContainersResponse + select { + case <-ctx.Done(): + case got = <-containers: + } + require.NoError(t, ctx.Err()) + + require.Equal(t, stages[i+1].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[i+1].expected.Devcontainers)) + for j, expectedDev := range stages[i+1].expected.Devcontainers { + gotDev := got.Devcontainers[j] + require.Equal(t, expectedDev.Name, gotDev.Name) + require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) + require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) + require.Equal(t, expectedDev.Status, gotDev.Status) + require.Equal(t, expectedDev.Container, gotDev.Container) + } + } +} + func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) { t.Parallel() diff --git a/coderd/workspaceagentsrpc.go b/coderd/workspaceagentsrpc.go index 1cbabad8ea622..0806118f2a832 100644 --- a/coderd/workspaceagentsrpc.go +++ b/coderd/workspaceagentsrpc.go @@ -139,7 +139,7 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) { Database: api.Database, NotificationsEnqueuer: api.NotificationsEnqueuer, Pubsub: api.Pubsub, - Auditor: &api.Auditor, + ConnectionLogger: &api.ConnectionLogger, DerpMapFn: api.DERPMap, TailnetCoordinator: &api.TailnetCoordinator, AppearanceFetcher: &api.AppearanceFetcher, diff --git a/coderd/workspaceapps/db.go b/coderd/workspaceapps/db.go index 0b598a6f0aab9..61a9e218edc7f 100644 --- a/coderd/workspaceapps/db.go +++ b/coderd/workspaceapps/db.go @@ -3,7 +3,6 @@ package workspaceapps import ( "context" "database/sql" - "encoding/json" "fmt" "net/http" "net/url" @@ -18,7 +17,7 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" - "github.com/coder/coder/v2/coderd/audit" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/cryptokeys" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" @@ -40,7 +39,7 @@ type DBTokenProvider struct { // DashboardURL is the main dashboard access URL for error pages. DashboardURL *url.URL Authorizer rbac.Authorizer - Auditor *atomic.Pointer[audit.Auditor] + ConnectionLogger *atomic.Pointer[connectionlog.ConnectionLogger] Database database.Store DeploymentValues *codersdk.DeploymentValues OAuth2Configs *httpmw.OAuth2Configs @@ -54,7 +53,7 @@ var _ SignedTokenProvider = &DBTokenProvider{} func NewDBTokenProvider(log slog.Logger, accessURL *url.URL, authz rbac.Authorizer, - auditor *atomic.Pointer[audit.Auditor], + connectionLogger *atomic.Pointer[connectionlog.ConnectionLogger], db database.Store, cfg *codersdk.DeploymentValues, oauth2Cfgs *httpmw.OAuth2Configs, @@ -73,7 +72,7 @@ func NewDBTokenProvider(log slog.Logger, Logger: log, DashboardURL: accessURL, Authorizer: authz, - Auditor: auditor, + ConnectionLogger: connectionLogger, Database: db, DeploymentValues: cfg, OAuth2Configs: oauth2Cfgs, @@ -95,7 +94,7 @@ func (p *DBTokenProvider) Issue(ctx context.Context, rw http.ResponseWriter, r * // // permissions. dangerousSystemCtx := dbauthz.AsSystemRestricted(ctx) - aReq, commitAudit := p.auditInitRequest(ctx, rw, r) + aReq, commitAudit := p.connLogInitRequest(ctx, rw, r) defer commitAudit() appReq := issueReq.AppRequest.Normalize() @@ -386,20 +385,20 @@ func (p *DBTokenProvider) authorizeRequest(ctx context.Context, roles *rbac.Subj return false, warnings, nil } -type auditRequest struct { +type connLogRequest struct { time time.Time apiKey *database.APIKey dbReq *databaseRequest } -// auditInitRequest creates a new audit session and audit log for the given -// request, if one does not already exist. If an audit session already exists, -// it will be updated with the current timestamp. A session is used to reduce -// the number of audit logs created. +// connLogInitRequest creates a new connection log session and connect log for the +// given request, if one does not already exist. If a connection log session +// already exists, it will be updated with the current timestamp. A session is used to +// reduce the number of connection logs created. // // A session is unique to the agent, app, user and users IP. If any of these -// values change, a new session and audit log is created. -func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) (aReq *auditRequest, commit func()) { +// values change, a new session and connect log is created. +func (p *DBTokenProvider) connLogInitRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) (aReq *connLogRequest, commit func()) { // Get the status writer from the request context so we can figure // out the HTTP status and autocommit the audit log. sw, ok := w.(*tracing.StatusWriter) @@ -407,12 +406,12 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW panic("dev error: http.ResponseWriter is not *tracing.StatusWriter") } - aReq = &auditRequest{ + aReq = &connLogRequest{ time: dbtime.Now(), } - // Set the commit function on the status writer to create an audit - // log, this ensures that the status and response body are available. + // Set the commit function on the status writer to create a connection log + // this ensures that the status and response body are available. var committed bool return aReq, func() { if committed { @@ -422,7 +421,7 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW if aReq.dbReq == nil { // App doesn't exist, there's information in the Request - // struct but we need UUIDs for audit logging. + // struct but we need UUIDs for connection logging. return } @@ -434,28 +433,25 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW ip := r.RemoteAddr // Approximation of the status code. - statusCode := sw.Status + // #nosec G115 - Safe conversion as HTTP status code is expected to be within int32 range (typically 100-599) + var statusCode int32 = int32(sw.Status) if statusCode == 0 { statusCode = http.StatusOK } - type additionalFields struct { - audit.AdditionalFields - SlugOrPort string `json:"slug_or_port,omitempty"` - } - appInfo := additionalFields{ - AdditionalFields: audit.AdditionalFields{ - WorkspaceOwner: aReq.dbReq.Workspace.OwnerUsername, - WorkspaceName: aReq.dbReq.Workspace.Name, - WorkspaceID: aReq.dbReq.Workspace.ID, - }, - } + var ( + connType database.ConnectionType + slugOrPort = aReq.dbReq.AppSlugOrPort + ) + switch { case aReq.dbReq.AccessMethod == AccessMethodTerminal: - appInfo.SlugOrPort = "terminal" + connType = database.ConnectionTypeWorkspaceApp + slugOrPort = "terminal" case aReq.dbReq.App.ID == uuid.Nil: - // If this isn't an app or a terminal, it's a port. - appInfo.SlugOrPort = aReq.dbReq.AppSlugOrPort + connType = database.ConnectionTypePortForwarding + default: + connType = database.ConnectionTypeWorkspaceApp } // If we end up logging, ensure relevant fields are set. @@ -465,7 +461,7 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW slog.F("app_id", aReq.dbReq.App.ID), slog.F("user_id", userID), slog.F("user_agent", userAgent), - slog.F("app_slug_or_port", appInfo.SlugOrPort), + slog.F("app_slug_or_port", slugOrPort), slog.F("status_code", statusCode), ) @@ -485,9 +481,8 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW UserID: userID, // Can be unset, in which case uuid.Nil is fine. Ip: ip, UserAgent: userAgent, - SlugOrPort: appInfo.SlugOrPort, - // #nosec G115 - Safe conversion as HTTP status code is expected to be within int32 range (typically 100-599) - StatusCode: int32(statusCode), + SlugOrPort: slugOrPort, + StatusCode: statusCode, StartedAt: aReq.time, UpdatedAt: aReq.time, }) @@ -500,7 +495,7 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW if err != nil { logger.Error(ctx, "update workspace app audit session failed", slog.Error(err)) - // Avoid spamming the audit log if deduplication failed, this should + // Avoid spamming the connection log if deduplication failed, this should // only happen if there are problems communicating with the database. return } @@ -511,51 +506,37 @@ func (p *DBTokenProvider) auditInitRequest(ctx context.Context, w http.ResponseW return } - // Marshal additional fields only if we're writing an audit log entry. - appInfoBytes, err := json.Marshal(appInfo) - if err != nil { - logger.Error(ctx, "marshal additional fields failed", slog.Error(err)) - } + connLogger := *p.ConnectionLogger.Load() + + err = connLogger.Upsert(ctx, database.UpsertConnectionLogParams{ + ID: uuid.New(), + Time: aReq.time, + OrganizationID: aReq.dbReq.Workspace.OrganizationID, + WorkspaceOwnerID: aReq.dbReq.Workspace.OwnerID, + WorkspaceID: aReq.dbReq.Workspace.ID, + WorkspaceName: aReq.dbReq.Workspace.Name, + AgentName: aReq.dbReq.Agent.Name, + Type: connType, + Code: sql.NullInt32{ + Int32: statusCode, + Valid: true, + }, + Ip: database.ParseIP(ip), + UserAgent: sql.NullString{Valid: userAgent != "", String: userAgent}, + UserID: uuid.NullUUID{ + UUID: userID, + Valid: userID != uuid.Nil, + }, + SlugOrPort: sql.NullString{Valid: slugOrPort != "", String: slugOrPort}, + ConnectionStatus: database.ConnectionStatusConnected, - // We use the background audit function instead of init request - // here because we don't know the resource type ahead of time. - // This also allows us to log unauthenticated access. - auditor := *p.Auditor.Load() - requestID := httpmw.RequestID(r) - switch { - case aReq.dbReq.App.ID != uuid.Nil: - audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.WorkspaceApp]{ - Audit: auditor, - Log: logger, - - Action: database.AuditActionOpen, - OrganizationID: aReq.dbReq.Workspace.OrganizationID, - UserID: userID, - RequestID: requestID, - Time: aReq.time, - Status: statusCode, - IP: ip, - UserAgent: userAgent, - New: aReq.dbReq.App, - AdditionalFields: appInfoBytes, - }) - default: - // Web terminal, port app, etc. - audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.WorkspaceAgent]{ - Audit: auditor, - Log: logger, - - Action: database.AuditActionOpen, - OrganizationID: aReq.dbReq.Workspace.OrganizationID, - UserID: userID, - RequestID: requestID, - Time: aReq.time, - Status: statusCode, - IP: ip, - UserAgent: userAgent, - New: aReq.dbReq.Agent, - AdditionalFields: appInfoBytes, - }) + // N/A + ConnectionID: uuid.NullUUID{}, + DisconnectReason: sql.NullString{}, + }) + if err != nil { + logger.Error(ctx, "upsert connection log failed", slog.Error(err)) + return } } } diff --git a/coderd/workspaceapps/db_test.go b/coderd/workspaceapps/db_test.go index a1f3fb452fbe5..e78762c035565 100644 --- a/coderd/workspaceapps/db_test.go +++ b/coderd/workspaceapps/db_test.go @@ -3,7 +3,6 @@ package workspaceapps_test import ( "context" "database/sql" - "encoding/json" "fmt" "io" "net" @@ -22,10 +21,9 @@ import ( "github.com/stretchr/testify/require" "github.com/coder/coder/v2/agent/agenttest" - "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/connectionlog" "github.com/coder/coder/v2/coderd/database" - "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/jwtutils" "github.com/coder/coder/v2/coderd/tracing" @@ -83,12 +81,12 @@ func Test_ResolveRequest(t *testing.T) { deploymentValues.Dangerous.AllowPathAppSharing = true deploymentValues.Dangerous.AllowPathAppSiteOwnerAccess = true - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() t.Cleanup(func() { if t.Failed() { return } - assert.Len(t, auditor.AuditLogs(), 0, "one or more test cases produced unexpected audit logs, did you replace the auditor or forget to call ResetLogs?") + assert.Len(t, connLogger.ConnectionLogs(), 0, "one or more test cases produced unexpected connection logs, did you replace the auditor or forget to call ResetLogs?") }) client, closer, api := coderdtest.NewWithAPI(t, &coderdtest.Options{ AppHostname: "*.test.coder.com", @@ -105,7 +103,7 @@ func Test_ResolveRequest(t *testing.T) { "CF-Connecting-IP", }, }, - Auditor: auditor, + ConnectionLogger: connLogger, }) t.Cleanup(func() { _ = closer.Close() @@ -231,23 +229,8 @@ func Test_ResolveRequest(t *testing.T) { } require.NotEqual(t, uuid.Nil, agentID) - //nolint:gocritic // This is a test, allow dbauthz.AsSystemRestricted. - agent, err := api.Database.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), agentID) - require.NoError(t, err) - - //nolint:gocritic // This is a test, allow dbauthz.AsSystemRestricted. - apps, err := api.Database.GetWorkspaceAppsByAgentID(dbauthz.AsSystemRestricted(ctx), agentID) - require.NoError(t, err) - appsBySlug := make(map[string]database.WorkspaceApp, len(apps)) - for _, app := range apps { - appsBySlug[app.Slug] = app - } - // Reset audit logs so cleanup check can pass. - auditor.ResetLogs() - - assertAuditAgent := auditAsserter[database.WorkspaceAgent](workspace) - assertAuditApp := auditAsserter[database.WorkspaceApp](workspace) + connLogger.Reset() t.Run("OK", func(t *testing.T) { t.Parallel() @@ -285,9 +268,9 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: app, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) - auditableUA := "Tidua" + auditableUA := "Noitcennoc" t.Log("app", app) rw := httptest.NewRecorder() @@ -297,7 +280,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set("User-Agent", auditableUA) // Try resolving the request without a token. - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -333,8 +316,8 @@ func Test_ResolveRequest(t *testing.T) { require.Equal(t, codersdk.SignedAppTokenCookie, cookie.Name) require.Equal(t, req.BasePath, cookie.Path) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "audit log count") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) var parsedToken workspaceapps.SignedToken err := jwtutils.Verify(ctx, api.AppSigningKeyCache, cookie.Value, &parsedToken) @@ -350,7 +333,7 @@ func Test_ResolveRequest(t *testing.T) { r.AddCookie(cookie) r.RemoteAddr = auditableIP - secondToken, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + secondToken, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -363,7 +346,7 @@ func Test_ResolveRequest(t *testing.T) { require.WithinDuration(t, token.Expiry.Time(), secondToken.Expiry.Time(), 2*time.Second) secondToken.Expiry = token.Expiry require.Equal(t, token, secondToken) - require.Len(t, auditor.AuditLogs(), 1, "no new audit log, FromRequest returned the same token and is not audited") + require.Len(t, connLogger.ConnectionLogs(), 1, "no new connection log, FromRequest returned the same token and is not logged") } }) } @@ -382,7 +365,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: app, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) t.Log("app", app) @@ -391,7 +374,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, secondUserClient.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -406,14 +389,15 @@ func Test_ResolveRequest(t *testing.T) { require.Nil(t, token) require.NotZero(t, w.StatusCode) require.Equal(t, http.StatusNotFound, w.StatusCode) + require.Len(t, connLogger.ConnectionLogs(), 1) return } require.True(t, ok) require.NotNil(t, token) require.Zero(t, w.StatusCode) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], secondUser.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, secondUser.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) } }) @@ -430,14 +414,14 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: app, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) t.Log("app", app) rw := httptest.NewRecorder() r := httptest.NewRequest("GET", "/app", nil) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -452,8 +436,8 @@ func Test_ResolveRequest(t *testing.T) { require.NotZero(t, rw.Code) require.NotEqual(t, http.StatusOK, rw.Code) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], uuid.Nil, nil) - require.Len(t, auditor.AuditLogs(), 1, "audit log for unauthenticated requests") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, uuid.Nil) + require.Len(t, connLogger.ConnectionLogs(), 1) } else { if !assert.True(t, ok) { dump, err := httputil.DumpResponse(w, true) @@ -466,8 +450,8 @@ func Test_ResolveRequest(t *testing.T) { t.Fatalf("expected 200 (or unset) response code, got %d", rw.Code) } - assertAuditApp(t, rw, r, auditor, appsBySlug[app], uuid.Nil, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, uuid.Nil) + require.Len(t, connLogger.ConnectionLogs(), 1) } _ = w.Body.Close() } @@ -479,12 +463,12 @@ func Test_ResolveRequest(t *testing.T) { req := (workspaceapps.Request{ AccessMethod: "invalid", }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() r := httptest.NewRequest("GET", "/app", nil) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -494,7 +478,7 @@ func Test_ResolveRequest(t *testing.T) { }) require.False(t, ok) require.Nil(t, token) - require.Len(t, auditor.AuditLogs(), 0, "no audit logs for invalid requests") + require.Len(t, connLogger.ConnectionLogs(), 0) }) t.Run("SplitWorkspaceAndAgent", func(t *testing.T) { @@ -562,7 +546,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNamePublic, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -570,7 +554,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -591,11 +575,11 @@ func Test_ResolveRequest(t *testing.T) { require.Equal(t, token.AgentNameOrID, c.agent) require.Equal(t, token.WorkspaceID, workspace.ID) require.Equal(t, token.AgentID, agentID) - assertAuditApp(t, rw, r, auditor, appsBySlug[token.AppSlugOrPort], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, token.AppSlugOrPort, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) } else { require.Nil(t, token) - require.Len(t, auditor.AuditLogs(), 0, "no audit logs") + require.Len(t, connLogger.ConnectionLogs(), 0) } _ = w.Body.Close() }) @@ -637,7 +621,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameOwner, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -651,7 +635,7 @@ func Test_ResolveRequest(t *testing.T) { // Even though the token is invalid, we should still perform request // resolution without failure since we'll just ignore the bad token. - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -676,8 +660,8 @@ func Test_ResolveRequest(t *testing.T) { require.NoError(t, err) require.Equal(t, appNameOwner, parsedToken.AppSlugOrPort) - assertAuditApp(t, rw, r, auditor, appsBySlug[appNameOwner], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, appNameOwner, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) t.Run("PortPathBlocked", func(t *testing.T) { @@ -692,7 +676,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: "8080", }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -700,7 +684,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -715,7 +699,7 @@ func Test_ResolveRequest(t *testing.T) { _ = w.Body.Close() // TODO(mafredri): Verify this is the correct status code. require.Equal(t, http.StatusInternalServerError, w.StatusCode) - require.Len(t, auditor.AuditLogs(), 0, "no audit logs for port path blocked requests") + require.Len(t, connLogger.ConnectionLogs(), 0, "no connection logs for port path blocked requests") }) t.Run("PortSubdomain", func(t *testing.T) { @@ -730,7 +714,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: "9090", }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -738,7 +722,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -749,11 +733,8 @@ func Test_ResolveRequest(t *testing.T) { require.True(t, ok) require.Equal(t, req.AppSlugOrPort, token.AppSlugOrPort) require.Equal(t, "http://127.0.0.1:9090", token.AppURL) - - assertAuditAgent(t, rw, r, auditor, agent, me.ID, map[string]any{ - "slug_or_port": "9090", - }) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, "9090", database.ConnectionTypePortForwarding, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) t.Run("PortSubdomainHTTPSS", func(t *testing.T) { @@ -768,7 +749,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: "9090ss", }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -776,7 +757,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - _, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + _, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -792,7 +773,7 @@ func Test_ResolveRequest(t *testing.T) { require.NoError(t, err) require.Contains(t, string(b), "404 - Application Not Found") require.Equal(t, http.StatusNotFound, w.StatusCode) - require.Len(t, auditor.AuditLogs(), 0, "no audit logs for invalid requests") + require.Len(t, connLogger.ConnectionLogs(), 0) }) t.Run("SubdomainEndsInS", func(t *testing.T) { @@ -807,7 +788,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameEndsInS, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -815,7 +796,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -825,8 +806,8 @@ func Test_ResolveRequest(t *testing.T) { }) require.True(t, ok) require.Equal(t, req.AppSlugOrPort, token.AppSlugOrPort) - assertAuditApp(t, rw, r, auditor, appsBySlug[appNameEndsInS], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, appNameEndsInS, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) t.Run("Terminal", func(t *testing.T) { @@ -838,7 +819,7 @@ func Test_ResolveRequest(t *testing.T) { AgentNameOrID: agentID.String(), }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -846,7 +827,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -862,10 +843,8 @@ func Test_ResolveRequest(t *testing.T) { require.Equal(t, req.AgentNameOrID, token.Request.AgentNameOrID) require.Empty(t, token.AppSlugOrPort) require.Empty(t, token.AppURL) - assertAuditAgent(t, rw, r, auditor, agent, me.ID, map[string]any{ - "slug_or_port": "terminal", - }) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, "terminal", database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) t.Run("InsufficientPermissions", func(t *testing.T) { @@ -880,7 +859,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameOwner, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -888,7 +867,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, secondUserClient.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -898,8 +877,8 @@ func Test_ResolveRequest(t *testing.T) { }) require.False(t, ok) require.Nil(t, token) - assertAuditApp(t, rw, r, auditor, appsBySlug[appNameOwner], secondUser.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, appNameOwner, database.ConnectionTypeWorkspaceApp, secondUser.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) t.Run("UserNotFound", func(t *testing.T) { @@ -913,7 +892,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameOwner, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -921,7 +900,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -931,7 +910,7 @@ func Test_ResolveRequest(t *testing.T) { }) require.False(t, ok) require.Nil(t, token) - require.Len(t, auditor.AuditLogs(), 0, "no audit logs for user not found") + require.Len(t, connLogger.ConnectionLogs(), 0) }) t.Run("RedirectSubdomainAuth", func(t *testing.T) { @@ -946,7 +925,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameOwner, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -955,7 +934,7 @@ func Test_ResolveRequest(t *testing.T) { r.Host = "app.com" r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -972,8 +951,8 @@ func Test_ResolveRequest(t *testing.T) { require.Equal(t, http.StatusSeeOther, w.StatusCode) // Note that we don't capture the owner UUID here because the apiKey // check/authorization exits early. - assertAuditApp(t, rw, r, auditor, appsBySlug[appNameOwner], uuid.Nil, nil) - require.Len(t, auditor.AuditLogs(), 1, "autit log entry for redirect") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, appNameOwner, database.ConnectionTypeWorkspaceApp, uuid.Nil) + require.Len(t, connLogger.ConnectionLogs(), 1) loc, err := w.Location() require.NoError(t, err) @@ -1012,7 +991,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameAgentUnhealthy, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -1020,7 +999,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1034,8 +1013,8 @@ func Test_ResolveRequest(t *testing.T) { w := rw.Result() defer w.Body.Close() require.Equal(t, http.StatusBadGateway, w.StatusCode) - assertAuditApp(t, rw, r, auditor, appsBySlug[appNameAgentUnhealthy], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentNameUnhealthy, appNameAgentUnhealthy, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) body, err := io.ReadAll(w.Body) require.NoError(t, err) @@ -1075,7 +1054,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameInitializing, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -1083,7 +1062,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1093,8 +1072,8 @@ func Test_ResolveRequest(t *testing.T) { }) require.True(t, ok, "ResolveRequest failed, should pass even though app is initializing") require.NotNil(t, token) - assertAuditApp(t, rw, r, auditor, appsBySlug[token.AppSlugOrPort], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, token.AppSlugOrPort, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) // Unhealthy apps are now permitted to connect anyways. This wasn't always @@ -1133,7 +1112,7 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: appNameUnhealthy, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) rw := httptest.NewRecorder() @@ -1141,7 +1120,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - token, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + token, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1151,11 +1130,11 @@ func Test_ResolveRequest(t *testing.T) { }) require.True(t, ok, "ResolveRequest failed, should pass even though app is unhealthy") require.NotNil(t, token) - assertAuditApp(t, rw, r, auditor, appsBySlug[token.AppSlugOrPort], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, token.AppSlugOrPort, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) }) - t.Run("AuditLogging", func(t *testing.T) { + t.Run("ConnectionLogging", func(t *testing.T) { t.Parallel() for _, app := range allApps { @@ -1168,18 +1147,18 @@ func Test_ResolveRequest(t *testing.T) { AppSlugOrPort: app, }).Normalize() - auditor := audit.NewMock() + connLogger := connectionlog.NewFake() auditableIP := testutil.RandomIPv6(t) t.Log("app", app) - // First request, new audit log. + // First request, new connection log. rw := httptest.NewRecorder() r := httptest.NewRequest("GET", "/app", nil) r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - _, ok := workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + _, ok := workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1188,8 +1167,8 @@ func Test_ResolveRequest(t *testing.T) { AppRequest: req, }) require.True(t, ok) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 1, "single audit log") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 1) // Second request, no audit log because the session is active. rw = httptest.NewRecorder() @@ -1197,7 +1176,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - _, ok = workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + _, ok = workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1206,7 +1185,7 @@ func Test_ResolveRequest(t *testing.T) { AppRequest: req, }) require.True(t, ok) - require.Len(t, auditor.AuditLogs(), 1, "single audit log, previous session active") + require.Len(t, connLogger.ConnectionLogs(), 1, "single connection log, previous session active") // Third request, session timed out, new audit log. rw = httptest.NewRecorder() @@ -1214,7 +1193,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - sessionTimeoutTokenProvider := signedTokenProviderWithAuditor(t, api.WorkspaceAppsProvider, auditor, 0) + sessionTimeoutTokenProvider := signedTokenProviderWithConnLogger(t, api.WorkspaceAppsProvider, connLogger, 0) _, ok = workspaceappsResolveRequest(t, nil, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: sessionTimeoutTokenProvider, @@ -1224,8 +1203,8 @@ func Test_ResolveRequest(t *testing.T) { AppRequest: req, }) require.True(t, ok) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 2, "two audit logs, session timed out") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 2, "two connection logs, session timed out") // Fourth request, new IP produces new audit log. auditableIP = testutil.RandomIPv6(t) @@ -1234,7 +1213,7 @@ func Test_ResolveRequest(t *testing.T) { r.Header.Set(codersdk.SessionTokenHeader, client.SessionToken()) r.RemoteAddr = auditableIP - _, ok = workspaceappsResolveRequest(t, auditor, rw, r, workspaceapps.ResolveRequestOptions{ + _, ok = workspaceappsResolveRequest(t, connLogger, rw, r, workspaceapps.ResolveRequestOptions{ Logger: api.Logger, SignedTokenProvider: api.WorkspaceAppsProvider, DashboardURL: api.AccessURL, @@ -1243,16 +1222,16 @@ func Test_ResolveRequest(t *testing.T) { AppRequest: req, }) require.True(t, ok) - assertAuditApp(t, rw, r, auditor, appsBySlug[app], me.ID, nil) - require.Len(t, auditor.AuditLogs(), 3, "three audit logs, new IP") + assertConnLogContains(t, rw, r, connLogger, workspace, agentName, app, database.ConnectionTypeWorkspaceApp, me.ID) + require.Len(t, connLogger.ConnectionLogs(), 3, "three connection logs, new IP") } }) } -func workspaceappsResolveRequest(t testing.TB, auditor audit.Auditor, w http.ResponseWriter, r *http.Request, opts workspaceapps.ResolveRequestOptions) (token *workspaceapps.SignedToken, ok bool) { +func workspaceappsResolveRequest(t testing.TB, connLogger connectionlog.ConnectionLogger, w http.ResponseWriter, r *http.Request, opts workspaceapps.ResolveRequestOptions) (token *workspaceapps.SignedToken, ok bool) { t.Helper() - if opts.SignedTokenProvider != nil && auditor != nil { - opts.SignedTokenProvider = signedTokenProviderWithAuditor(t, opts.SignedTokenProvider, auditor, time.Hour) + if opts.SignedTokenProvider != nil && connLogger != nil { + opts.SignedTokenProvider = signedTokenProviderWithConnLogger(t, opts.SignedTokenProvider, connLogger, time.Hour) } tracing.StatusWriterMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1264,52 +1243,41 @@ func workspaceappsResolveRequest(t testing.TB, auditor audit.Auditor, w http.Res return token, ok } -func signedTokenProviderWithAuditor(t testing.TB, provider workspaceapps.SignedTokenProvider, auditor audit.Auditor, sessionTimeout time.Duration) workspaceapps.SignedTokenProvider { +func signedTokenProviderWithConnLogger(t testing.TB, provider workspaceapps.SignedTokenProvider, connLogger connectionlog.ConnectionLogger, sessionTimeout time.Duration) workspaceapps.SignedTokenProvider { t.Helper() p, ok := provider.(*workspaceapps.DBTokenProvider) require.True(t, ok, "provider is not a DBTokenProvider") shallowCopy := *p - shallowCopy.Auditor = &atomic.Pointer[audit.Auditor]{} - shallowCopy.Auditor.Store(&auditor) + shallowCopy.ConnectionLogger = &atomic.Pointer[connectionlog.ConnectionLogger]{} + shallowCopy.ConnectionLogger.Store(&connLogger) shallowCopy.WorkspaceAppAuditSessionTimeout = sessionTimeout return &shallowCopy } -func auditAsserter[T audit.Auditable](workspace codersdk.Workspace) func(t testing.TB, rr *httptest.ResponseRecorder, r *http.Request, auditor *audit.MockAuditor, auditable T, userID uuid.UUID, additionalFields map[string]any) { - return func(t testing.TB, rr *httptest.ResponseRecorder, r *http.Request, auditor *audit.MockAuditor, auditable T, userID uuid.UUID, additionalFields map[string]any) { - t.Helper() - - resp := rr.Result() - defer resp.Body.Close() - - require.True(t, auditor.Contains(t, database.AuditLog{ - OrganizationID: workspace.OrganizationID, - Action: database.AuditActionOpen, - ResourceType: audit.ResourceType(auditable), - ResourceID: audit.ResourceID(auditable), - ResourceTarget: audit.ResourceTarget(auditable), - UserID: userID, - Ip: audit.ParseIP(r.RemoteAddr), - UserAgent: sql.NullString{Valid: r.UserAgent() != "", String: r.UserAgent()}, - StatusCode: int32(resp.StatusCode), //nolint:gosec - }), "audit log") - - // Verify additional fields, assume the last log entry. - alog := auditor.AuditLogs()[len(auditor.AuditLogs())-1] - - // Contains does not verify uuid.Nil. - if userID == uuid.Nil { - require.Equal(t, uuid.Nil, alog.UserID, "unauthenticated user") - } +func assertConnLogContains(t *testing.T, rr *httptest.ResponseRecorder, r *http.Request, connLogger *connectionlog.FakeConnectionLogger, workspace codersdk.Workspace, agentName string, slugOrPort string, typ database.ConnectionType, userID uuid.UUID) { + t.Helper() - add := make(map[string]any) - if len(alog.AdditionalFields) > 0 { - err := json.Unmarshal([]byte(alog.AdditionalFields), &add) - require.NoError(t, err, "audit log unmarhsal additional fields") - } - for k, v := range additionalFields { - require.Equal(t, v, add[k], "audit log additional field %s: additional fields: %v", k, add) - } - } + resp := rr.Result() + defer resp.Body.Close() + + require.True(t, connLogger.Contains(t, database.UpsertConnectionLogParams{ + OrganizationID: workspace.OrganizationID, + WorkspaceOwnerID: workspace.OwnerID, + WorkspaceID: workspace.ID, + WorkspaceName: workspace.Name, + AgentName: agentName, + Type: typ, + Ip: database.ParseIP(r.RemoteAddr), + UserAgent: sql.NullString{Valid: r.UserAgent() != "", String: r.UserAgent()}, + Code: sql.NullInt32{ + Int32: int32(resp.StatusCode), // nolint:gosec + Valid: true, + }, + UserID: uuid.NullUUID{ + UUID: userID, + Valid: true, + }, + SlugOrPort: sql.NullString{Valid: slugOrPort != "", String: slugOrPort}, + })) } diff --git a/coderd/workspacebuilds.go b/coderd/workspacebuilds.go index c8b1008280b09..88774c63368ca 100644 --- a/coderd/workspacebuilds.go +++ b/coderd/workspacebuilds.go @@ -119,7 +119,7 @@ func (api *API) workspaceBuilds(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() workspace := httpmw.WorkspaceParam(r) - paginationParams, ok := parsePagination(rw, r) + paginationParams, ok := ParsePagination(rw, r) if !ok { return } diff --git a/coderd/workspacebuilds_test.go b/coderd/workspacebuilds_test.go index ebab0770b71b4..0855d6091f7e4 100644 --- a/coderd/workspacebuilds_test.go +++ b/coderd/workspacebuilds_test.go @@ -494,11 +494,14 @@ func TestWorkspaceBuildsProvisionerState(t *testing.T) { require.NoError(t, err) coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, build.ID) - // Validate that the deletion was audited. - require.True(t, auditor.Contains(t, database.AuditLog{ - ResourceID: build.ID, - Action: database.AuditActionDelete, - })) + // Validate that the deletion was audited. This happens after the transaction + // is committed, so it may not show up in the mock auditor immediately. + testutil.Eventually(ctx, t, func(context.Context) bool { + return auditor.Contains(t, database.AuditLog{ + ResourceID: build.ID, + Action: database.AuditActionDelete, + }) + }, testutil.IntervalFast) }) t.Run("NoProvisioners", func(t *testing.T) { @@ -535,11 +538,14 @@ func TestWorkspaceBuildsProvisionerState(t *testing.T) { require.Empty(t, ws) require.Equal(t, http.StatusGone, coderdtest.SDKError(t, err).StatusCode()) - // Validate that the deletion was audited. - require.True(t, auditor.Contains(t, database.AuditLog{ - ResourceID: build.ID, - Action: database.AuditActionDelete, - })) + // Validate that the deletion was audited. This happens after the transaction + // is committed, so it may not show up in the mock auditor immediately. + testutil.Eventually(ctx, t, func(context.Context) bool { + return auditor.Contains(t, database.AuditLog{ + ResourceID: build.ID, + Action: database.AuditActionDelete, + }) + }, testutil.IntervalFast) }) }) } diff --git a/coderd/workspaces.go b/coderd/workspaces.go index ecb624d1bc09f..32b412946907e 100644 --- a/coderd/workspaces.go +++ b/coderd/workspaces.go @@ -146,7 +146,7 @@ func (api *API) workspaces(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() apiKey := httpmw.APIKey(r) - page, ok := parsePagination(rw, r) + page, ok := ParsePagination(rw, r) if !ok { return } @@ -2231,6 +2231,7 @@ func convertWorkspace( if latestAppStatus.ID == uuid.Nil { appStatus = nil } + return codersdk.Workspace{ ID: workspace.ID, CreatedAt: workspace.CreatedAt, @@ -2265,6 +2266,7 @@ func convertWorkspace( AllowRenames: allowRenames, Favorite: requesterFavorite, NextStartAt: nextStartAt, + IsPrebuild: workspace.IsPrebuild(), }, nil } diff --git a/codersdk/agentsdk/agentsdk.go b/codersdk/agentsdk/agentsdk.go index f44c19b998e21..a78ee3c5608dd 100644 --- a/codersdk/agentsdk/agentsdk.go +++ b/codersdk/agentsdk/agentsdk.go @@ -37,18 +37,6 @@ import ( // log-source. This should be removed in the future. var ExternalLogSourceID = uuid.MustParse("3b579bf4-1ed8-4b99-87a8-e9a1e3410410") -// ConnectionType is the type of connection that the agent is receiving. -type ConnectionType string - -// Connection type enums. -const ( - ConnectionTypeUnspecified ConnectionType = "Unspecified" - ConnectionTypeSSH ConnectionType = "SSH" - ConnectionTypeVSCode ConnectionType = "VS Code" - ConnectionTypeJetBrains ConnectionType = "JetBrains" - ConnectionTypeReconnectingPTY ConnectionType = "Web Terminal" -) - // New returns a client that is used to interact with the // Coder API from a workspace agent. func New(serverURL *url.URL) *Client { diff --git a/codersdk/agentsdk/convert.go b/codersdk/agentsdk/convert.go index d01c9e527fce9..775ce06c73c69 100644 --- a/codersdk/agentsdk/convert.go +++ b/codersdk/agentsdk/convert.go @@ -408,40 +408,6 @@ func ProtoFromLifecycleState(s codersdk.WorkspaceAgentLifecycle) (proto.Lifecycl return proto.Lifecycle_State(caps), nil } -func ConnectionTypeFromProto(typ proto.Connection_Type) (ConnectionType, error) { - switch typ { - case proto.Connection_TYPE_UNSPECIFIED: - return ConnectionTypeUnspecified, nil - case proto.Connection_SSH: - return ConnectionTypeSSH, nil - case proto.Connection_VSCODE: - return ConnectionTypeVSCode, nil - case proto.Connection_JETBRAINS: - return ConnectionTypeJetBrains, nil - case proto.Connection_RECONNECTING_PTY: - return ConnectionTypeReconnectingPTY, nil - default: - return "", xerrors.Errorf("unknown connection type %q", typ) - } -} - -func ProtoFromConnectionType(typ ConnectionType) (proto.Connection_Type, error) { - switch typ { - case ConnectionTypeUnspecified: - return proto.Connection_TYPE_UNSPECIFIED, nil - case ConnectionTypeSSH: - return proto.Connection_SSH, nil - case ConnectionTypeVSCode: - return proto.Connection_VSCODE, nil - case ConnectionTypeJetBrains: - return proto.Connection_JETBRAINS, nil - case ConnectionTypeReconnectingPTY: - return proto.Connection_RECONNECTING_PTY, nil - default: - return 0, xerrors.Errorf("unknown connection type %q", typ) - } -} - func DevcontainersFromProto(pdcs []*proto.WorkspaceAgentDevcontainer) ([]codersdk.WorkspaceAgentDevcontainer, error) { ret := make([]codersdk.WorkspaceAgentDevcontainer, len(pdcs)) for i, pdc := range pdcs { diff --git a/codersdk/audit.go b/codersdk/audit.go index 49e597845b964..1e529202b5285 100644 --- a/codersdk/audit.go +++ b/codersdk/audit.go @@ -38,8 +38,12 @@ const ( ResourceTypeIdpSyncSettingsOrganization ResourceType = "idp_sync_settings_organization" ResourceTypeIdpSyncSettingsGroup ResourceType = "idp_sync_settings_group" ResourceTypeIdpSyncSettingsRole ResourceType = "idp_sync_settings_role" - ResourceTypeWorkspaceAgent ResourceType = "workspace_agent" - ResourceTypeWorkspaceApp ResourceType = "workspace_app" + // Deprecated: Workspace Agent connections are now included in the + // connection log. + ResourceTypeWorkspaceAgent ResourceType = "workspace_agent" + // Deprecated: Workspace App connections are now included in the + // connection log. + ResourceTypeWorkspaceApp ResourceType = "workspace_app" ) func (r ResourceType) FriendlyString() string { @@ -113,10 +117,17 @@ const ( AuditActionLogout AuditAction = "logout" AuditActionRegister AuditAction = "register" AuditActionRequestPasswordReset AuditAction = "request_password_reset" - AuditActionConnect AuditAction = "connect" - AuditActionDisconnect AuditAction = "disconnect" - AuditActionOpen AuditAction = "open" - AuditActionClose AuditAction = "close" + // Deprecated: Workspace connections are now included in the + // connection log. + AuditActionConnect AuditAction = "connect" + // Deprecated: Workspace disconnections are now included in the + // connection log. + AuditActionDisconnect AuditAction = "disconnect" + // Deprecated: Workspace App connections are now included in the + // connection log. + AuditActionOpen AuditAction = "open" + // Deprecated: This action is unused. + AuditActionClose AuditAction = "close" ) func (a AuditAction) Friendly() string { diff --git a/codersdk/connectionlog.go b/codersdk/connectionlog.go new file mode 100644 index 0000000000000..9dd78694b4e08 --- /dev/null +++ b/codersdk/connectionlog.go @@ -0,0 +1,126 @@ +package codersdk + +import ( + "context" + "encoding/json" + "net/http" + "net/netip" + "strings" + "time" + + "github.com/google/uuid" +) + +type ConnectionLog struct { + ID uuid.UUID `json:"id" format:"uuid"` + ConnectTime time.Time `json:"connect_time" format:"date-time"` + Organization MinimalOrganization `json:"organization"` + WorkspaceOwnerID uuid.UUID `json:"workspace_owner_id" format:"uuid"` + WorkspaceOwnerUsername string `json:"workspace_owner_username"` + WorkspaceID uuid.UUID `json:"workspace_id" format:"uuid"` + WorkspaceName string `json:"workspace_name"` + AgentName string `json:"agent_name"` + IP netip.Addr `json:"ip"` + Type ConnectionType `json:"type"` + + // WebInfo is only set when `type` is one of: + // - `ConnectionTypePortForwarding` + // - `ConnectionTypeWorkspaceApp` + WebInfo *ConnectionLogWebInfo `json:"web_info,omitempty"` + + // SSHInfo is only set when `type` is one of: + // - `ConnectionTypeSSH` + // - `ConnectionTypeReconnectingPTY` + // - `ConnectionTypeVSCode` + // - `ConnectionTypeJetBrains` + SSHInfo *ConnectionLogSSHInfo `json:"ssh_info,omitempty"` +} + +// ConnectionType is the type of connection that the agent is receiving. +type ConnectionType string + +const ( + ConnectionTypeSSH ConnectionType = "ssh" + ConnectionTypeVSCode ConnectionType = "vscode" + ConnectionTypeJetBrains ConnectionType = "jetbrains" + ConnectionTypeReconnectingPTY ConnectionType = "reconnecting_pty" + ConnectionTypeWorkspaceApp ConnectionType = "workspace_app" + ConnectionTypePortForwarding ConnectionType = "port_forwarding" +) + +// ConnectionLogStatus is the status of a connection log entry. +// It's the argument to the `status` filter when fetching connection logs. +type ConnectionLogStatus string + +const ( + ConnectionLogStatusOngoing ConnectionLogStatus = "ongoing" + ConnectionLogStatusCompleted ConnectionLogStatus = "completed" +) + +func (s ConnectionLogStatus) Valid() bool { + switch s { + case ConnectionLogStatusOngoing, ConnectionLogStatusCompleted: + return true + default: + return false + } +} + +type ConnectionLogWebInfo struct { + UserAgent string `json:"user_agent"` + // User is omitted if the connection event was from an unauthenticated user. + User *User `json:"user"` + SlugOrPort string `json:"slug_or_port"` + // StatusCode is the HTTP status code of the request. + StatusCode int32 `json:"status_code"` +} + +type ConnectionLogSSHInfo struct { + ConnectionID uuid.UUID `json:"connection_id" format:"uuid"` + // DisconnectTime is omitted if a disconnect event with the same connection ID + // has not yet been seen. + DisconnectTime *time.Time `json:"disconnect_time,omitempty" format:"date-time"` + // DisconnectReason is omitted if a disconnect event with the same connection ID + // has not yet been seen. + DisconnectReason string `json:"disconnect_reason,omitempty"` + // ExitCode is the exit code of the SSH session. It is omitted if a + // disconnect event with the same connection ID has not yet been seen. + ExitCode *int32 `json:"exit_code,omitempty"` +} + +type ConnectionLogsRequest struct { + SearchQuery string `json:"q,omitempty"` + Pagination +} + +type ConnectionLogResponse struct { + ConnectionLogs []ConnectionLog `json:"connection_logs"` + Count int64 `json:"count"` +} + +func (c *Client) ConnectionLogs(ctx context.Context, req ConnectionLogsRequest) (ConnectionLogResponse, error) { + res, err := c.Request(ctx, http.MethodGet, "/api/v2/connectionlog", nil, req.Pagination.asRequestOption(), func(r *http.Request) { + q := r.URL.Query() + var params []string + if req.SearchQuery != "" { + params = append(params, req.SearchQuery) + } + q.Set("q", strings.Join(params, " ")) + r.URL.RawQuery = q.Encode() + }) + if err != nil { + return ConnectionLogResponse{}, err + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return ConnectionLogResponse{}, ReadBodyAsError(res) + } + + var logRes ConnectionLogResponse + err = json.NewDecoder(res.Body).Decode(&logRes) + if err != nil { + return ConnectionLogResponse{}, err + } + return logRes, nil +} diff --git a/codersdk/deployment.go b/codersdk/deployment.go index 266baaee8cd9a..61c3c805a29a9 100644 --- a/codersdk/deployment.go +++ b/codersdk/deployment.go @@ -67,6 +67,7 @@ type FeatureName string const ( FeatureUserLimit FeatureName = "user_limit" FeatureAuditLog FeatureName = "audit_log" + FeatureConnectionLog FeatureName = "connection_log" FeatureBrowserOnly FeatureName = "browser_only" FeatureSCIM FeatureName = "scim" FeatureTemplateRBAC FeatureName = "template_rbac" @@ -90,6 +91,7 @@ const ( var FeatureNames = []FeatureName{ FeatureUserLimit, FeatureAuditLog, + FeatureConnectionLog, FeatureBrowserOnly, FeatureSCIM, FeatureTemplateRBAC, diff --git a/codersdk/rbacresources_gen.go b/codersdk/rbacresources_gen.go index 5ffcfed6b4c35..3e22d29c73297 100644 --- a/codersdk/rbacresources_gen.go +++ b/codersdk/rbacresources_gen.go @@ -9,6 +9,7 @@ const ( ResourceAssignOrgRole RBACResource = "assign_org_role" ResourceAssignRole RBACResource = "assign_role" ResourceAuditLog RBACResource = "audit_log" + ResourceConnectionLog RBACResource = "connection_log" ResourceCryptoKey RBACResource = "crypto_key" ResourceDebugInfo RBACResource = "debug_info" ResourceDeploymentConfig RBACResource = "deployment_config" @@ -72,6 +73,7 @@ var RBACResourceActions = map[RBACResource][]RBACAction{ ResourceAssignOrgRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead, ActionUnassign, ActionUpdate}, ResourceAssignRole: {ActionAssign, ActionRead, ActionUnassign}, ResourceAuditLog: {ActionCreate, ActionRead}, + ResourceConnectionLog: {ActionRead, ActionUpdate}, ResourceCryptoKey: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, ResourceDebugInfo: {ActionRead}, ResourceDeploymentConfig: {ActionRead, ActionUpdate}, diff --git a/codersdk/workspaceagents.go b/codersdk/workspaceagents.go index 2bfae8aac36cf..1eb37bb07c989 100644 --- a/codersdk/workspaceagents.go +++ b/codersdk/workspaceagents.go @@ -421,6 +421,19 @@ type WorkspaceAgentDevcontainer struct { Error string `json:"error,omitempty"` } +func (d WorkspaceAgentDevcontainer) Equals(other WorkspaceAgentDevcontainer) bool { + return d.ID == other.ID && + d.Name == other.Name && + d.WorkspaceFolder == other.WorkspaceFolder && + d.Status == other.Status && + d.Dirty == other.Dirty && + (d.Container == nil && other.Container == nil || + (d.Container != nil && other.Container != nil && d.Container.ID == other.Container.ID)) && + (d.Agent == nil && other.Agent == nil || + (d.Agent != nil && other.Agent != nil && *d.Agent == *other.Agent)) && + d.Error == other.Error +} + // WorkspaceAgentDevcontainerAgent represents the sub agent for a // devcontainer. type WorkspaceAgentDevcontainerAgent struct { @@ -520,6 +533,40 @@ func (c *Client) WorkspaceAgentListContainers(ctx context.Context, agentID uuid. return cr, json.NewDecoder(res.Body).Decode(&cr) } +func (c *Client) WatchWorkspaceAgentContainers(ctx context.Context, agentID uuid.UUID) (<-chan WorkspaceAgentListContainersResponse, io.Closer, error) { + reqURL, err := c.URL.Parse(fmt.Sprintf("/api/v2/workspaceagents/%s/containers/watch", agentID)) + if err != nil { + return nil, nil, err + } + + jar, err := cookiejar.New(nil) + if err != nil { + return nil, nil, xerrors.Errorf("create cookie jar: %w", err) + } + + jar.SetCookies(reqURL, []*http.Cookie{{ + Name: SessionTokenCookie, + Value: c.SessionToken(), + }}) + + conn, res, err := websocket.Dial(ctx, reqURL.String(), &websocket.DialOptions{ + CompressionMode: websocket.CompressionDisabled, + HTTPClient: &http.Client{ + Jar: jar, + Transport: c.HTTPClient.Transport, + }, + }) + if err != nil { + if res == nil { + return nil, nil, err + } + return nil, nil, ReadBodyAsError(res) + } + + d := wsjson.NewDecoder[WorkspaceAgentListContainersResponse](conn, websocket.MessageText, c.logger) + return d.Chan(), d, nil +} + // WorkspaceAgentRecreateDevcontainer recreates the devcontainer with the given ID. func (c *Client) WorkspaceAgentRecreateDevcontainer(ctx context.Context, agentID uuid.UUID, devcontainerID string) (Response, error) { res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/workspaceagents/%s/containers/devcontainers/%s/recreate", agentID, devcontainerID), nil) diff --git a/codersdk/workspaces.go b/codersdk/workspaces.go index c776f2cf5a473..871a9d5b3fd31 100644 --- a/codersdk/workspaces.go +++ b/codersdk/workspaces.go @@ -66,6 +66,12 @@ type Workspace struct { AllowRenames bool `json:"allow_renames"` Favorite bool `json:"favorite"` NextStartAt *time.Time `json:"next_start_at" format:"date-time"` + // IsPrebuild indicates whether the workspace is a prebuilt workspace. + // Prebuilt workspaces are owned by the prebuilds system user and have specific behavior, + // such as being managed differently from regular workspaces. + // Once a prebuilt workspace is claimed by a user, it transitions to a regular workspace, + // and IsPrebuild returns false. + IsPrebuild bool `json:"is_prebuild"` } func (w Workspace) FullName() string { diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go index ee0b36e5a0c23..ce66d5e1b8a70 100644 --- a/codersdk/workspacesdk/agentconn.go +++ b/codersdk/workspacesdk/agentconn.go @@ -20,10 +20,14 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/net/speedtest" + "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/tracing" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/healthsdk" + "github.com/coder/coder/v2/codersdk/wsjson" "github.com/coder/coder/v2/tailnet" + "github.com/coder/websocket" ) // NewAgentConn creates a new WorkspaceAgentConn. `conn` may be unique @@ -387,6 +391,30 @@ func (c *AgentConn) ListContainers(ctx context.Context) (codersdk.WorkspaceAgent return resp, json.NewDecoder(res.Body).Decode(&resp) } +func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<-chan codersdk.WorkspaceAgentListContainersResponse, io.Closer, error) { + ctx, span := tracing.StartSpan(ctx) + defer span.End() + + host := net.JoinHostPort(c.agentAddress().String(), strconv.Itoa(AgentHTTPAPIServerPort)) + url := fmt.Sprintf("http://%s%s", host, "/api/v0/containers/watch") + + conn, res, err := websocket.Dial(ctx, url, &websocket.DialOptions{ + HTTPClient: c.apiClient(), + }) + if err != nil { + if res == nil { + return nil, nil, err + } + return nil, nil, codersdk.ReadBodyAsError(res) + } + if res != nil && res.Body != nil { + defer res.Body.Close() + } + + d := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger) + return d.Chan(), d, nil +} + // RecreateDevcontainer recreates a devcontainer with the given container. // This is a blocking call and will wait for the container to be recreated. func (c *AgentConn) RecreateDevcontainer(ctx context.Context, devcontainerID string) (codersdk.Response, error) { diff --git a/docs/about/contributing/backend.md b/docs/about/contributing/backend.md index 1ffafa62fb324..ad5d91bcda879 100644 --- a/docs/about/contributing/backend.md +++ b/docs/about/contributing/backend.md @@ -16,9 +16,9 @@ Need help or have questions? Join the conversation on our [Discord server](https To understand how the backend fits into the broader system, we recommend reviewing the following resources: -* [General Concepts](../admin/infrastructure/validated-architectures/index.md#general-concepts): Essential concepts and language used to describe how Coder is structured and operated. +* [General Concepts](../../admin/infrastructure/validated-architectures/index.md#general-concepts): Essential concepts and language used to describe how Coder is structured and operated. -* [Architecture](../admin/infrastructure/architecture.md): A high-level overview of the infrastructure layout, key services, and how components interact. +* [Architecture](../../admin/infrastructure/architecture.md): A high-level overview of the infrastructure layout, key services, and how components interact. These sections provide the necessary context for navigating and contributing to the backend effectively. @@ -168,9 +168,9 @@ There are two types of fixtures that are used to test that migrations don't break existing Coder deployments: * Partial fixtures - [`migrations/testdata/fixtures`](../../coderd/database/migrations/testdata/fixtures) + [`migrations/testdata/fixtures`](../../../coderd/database/migrations/testdata/fixtures) * Full database dumps - [`migrations/testdata/full_dumps`](../../coderd/database/migrations/testdata/full_dumps) + [`migrations/testdata/full_dumps`](../../../coderd/database/migrations/testdata/full_dumps) Both types behave like database migrations (they also [`migrate`](https://github.com/golang-migrate/migrate)). Their behavior mirrors @@ -193,7 +193,7 @@ To add a new partial fixture, run the following command: ``` Then add some queries to insert data and commit the file to the repo. See -[`000024_example.up.sql`](../../coderd/database/migrations/testdata/fixtures/000024_example.up.sql) +[`000024_example.up.sql`](../../../coderd/database/migrations/testdata/fixtures/000024_example.up.sql) for an example. To create a full dump, run a fully fledged Coder deployment and use it to diff --git a/docs/about/screenshots.md b/docs/about/screenshots.md index ddf71b823f7fc..dff7ea75946d8 100644 --- a/docs/about/screenshots.md +++ b/docs/about/screenshots.md @@ -2,19 +2,19 @@ ## Log in -![Install Coder in your cloud or air-gapped on-premises. Developers simply log in via their browser to access their Workspaces.](../images/screenshots/login.png) +![Install Coder in your cloud or air-gapped on-premises. Developers simply log in via their browser to access their Workspaces.](../images/screenshots/coder-login.png) Install Coder in your cloud or air-gapped on-premises. Developers simply log in via their browser to access their Workspaces. ## Templates -![Developers provision their own ephemeral Workspaces in minutes using pre-defined Templates that include approved tooling and infrastructure.](../images/screenshots/templates_listing.png) +![Developers provision their own ephemeral Workspaces in minutes using pre-defined Templates that include approved tooling and infrastructure.](../images/screenshots/templates-listing.png) Developers provision their own ephemeral Workspaces in minutes using pre-defined Templates that include approved tooling and infrastructure. -![Template administrators can either create a new Template from scratch or choose a Starter Template](../images/screenshots/starter_templates.png) +![Template administrators can either create a new Template from scratch or choose a Starter Template](../images/screenshots/starter-templates.png) Template administrators can either create a new Template from scratch or choose a Starter Template. @@ -26,25 +26,25 @@ underlying infrastructure that Coder Workspaces run on. ## Workspaces -![Developers create and delete their own workspaces. Coder administrators can easily enforce Workspace scheduling and autostop policies to ensure idle Workspaces don’t burn unnecessary cloud budget.](../images/screenshots/workspaces_listing.png) +![Developers create and delete their own workspaces. Coder administrators can easily enforce Workspace scheduling and autostop policies to ensure idle Workspaces don’t burn unnecessary cloud budget.](../images/screenshots/workspaces-listing.png) Developers create and delete their own workspaces. Coder administrators can easily enforce Workspace scheduling and autostop policies to ensure idle Workspaces don’t burn unnecessary cloud budget. -![Developers launch their favorite web-based or desktop IDE, browse files, or access their Workspace’s Terminal.](../images/screenshots/workspace_launch.png) +![Developers launch their favorite web-based or desktop IDE, browse files, or access their Workspace’s Terminal.](../images/screenshots/workspace-running-with-topbar.png) Developers launch their favorite web-based or desktop IDE, browse files, or access their Workspace’s Terminal. ## Administration -![Coder administrators can access Template usage insights to understand which Templates are most popular and how well they perform for developers.](../images/screenshots/templates_insights.png) +![Coder administrators can access Template usage insights to understand which Templates are most popular and how well they perform for developers.](../images/screenshots/template-insights.png) Coder administrators can access Template usage insights to understand which Templates are most popular and how well they perform for developers. -![Coder administrators can control *every* aspect of their Coder deployment.](../images/screenshots/settings.png) +![Coder administrators can control *every* aspect of their Coder deployment.](../images/screenshots/admin-settings.png) Coder administrators can control *every* aspect of their Coder deployment. diff --git a/docs/admin/infrastructure/validated-architectures/index.md b/docs/admin/infrastructure/validated-architectures/index.md index fee01e777fbfe..6bd18f7f3c132 100644 --- a/docs/admin/infrastructure/validated-architectures/index.md +++ b/docs/admin/infrastructure/validated-architectures/index.md @@ -313,6 +313,44 @@ considerations: active users. - Enable High Availability mode for database engine for large scale deployments. +#### Recommended instance types by cloud provider + +For production deployments, we recommend using dedicated compute instances rather than burstable instances (like AWS t-family) which provide inconsistent CPU performance. Below are recommended instance types for each major cloud provider: + +##### AWS (RDS/Aurora PostgreSQL) + +- **Small deployments (<1000 users)**: `db.m6i.large` (2 vCPU, 8 GB RAM) or `db.r6i.large` (2 vCPU, 16 GB RAM) +- **Medium deployments (1000-2000 users)**: `db.m6i.xlarge` (4 vCPU, 16 GB RAM) or `db.r6i.xlarge` (4 vCPU, 32 GB RAM) +- **Large deployments (2000+ users)**: `db.m6i.2xlarge` (8 vCPU, 32 GB RAM) or `db.r6i.2xlarge` (8 vCPU, 64 GB RAM) + +[Comparison](https://instances.vantage.sh/rds?memory_expr=%3E%3D0&vcpus_expr=%3E%3D0&memory_per_vcpu_expr=%3E%3D0&gpu_memory_expr=%3E%3D0&gpus_expr=%3E%3D0&maxips_expr=%3E%3D0&storage_expr=%3E%3D0&filter=db.r6i.large%7Cdb.m6i.large%7Cdb.m6i.xlarge%7Cdb.r6i.xlarge%7Cdb.r6i.2xlarge%7Cdb.m6i.2xlarge®ion=us-east-1&pricing_unit=instance&cost_duration=hourly&reserved_term=yrTerm1Standard.noUpfront&compare_on=true) + +##### Azure (Azure Database for PostgreSQL) + +- **Small deployments (<1000 users)**: `Standard_D2s_v5` (2 vCPU, 8 GB RAM) or `Standard_E2s_v5` (2 vCPU, 16 GB RAM) +- **Medium deployments (1000-2000 users)**: `Standard_D4s_v5` (4 vCPU, 16 GB RAM) or `Standard_E4s_v5` (4 vCPU, 32 GB RAM) +- **Large deployments (2000+ users)**: `Standard_D8s_v5` (8 vCPU, 32 GB RAM) or `Standard_E8s_v5` (8 vCPU, 64 GB RAM) + +[Comparison](https://instances.vantage.sh/azure?memory_expr=%3E%3D0&vcpus_expr=%3E%3D0&memory_per_vcpu_expr=%3E%3D0&gpu_memory_expr=%3E%3D0&gpus_expr=%3E%3D0&maxips_expr=%3E%3D0&storage_expr=%3E%3D0&filter=d2s-v5%7Ce2s-v5%7Cd4s-v5%7Ce4s-v5%7Ce8s-v5%7Cd8s-v5®ion=us-east&pricing_unit=instance&cost_duration=hourly&reserved_term=yrTerm1Standard.allUpfront&compare_on=true) + +##### Google Cloud (Cloud SQL for PostgreSQL) + +- **Small deployments (<1000 users)**: `db-perf-optimized-N-2` (2 vCPU, 16 GB RAM) +- **Medium deployments (1000-2000 users)**: `db-perf-optimized-N-4` (4 vCPU, 32 GB RAM) +- **Large deployments (2000+ users)**: `db-perf-optimized-N-8` (8 vCPU, 64 GB RAM) + +[Comparison](https://cloud.google.com/sql/docs/postgres/machine-series-overview#n2) + +##### Storage recommendations + +For optimal database performance, use the following storage types: + +- **AWS RDS/Aurora**: Use `gp3` (General Purpose SSD) volumes with at least 3,000 IOPS for production workloads. For high-performance requirements, consider `io1` or `io2` volumes with provisioned IOPS. + +- **Azure Database for PostgreSQL**: Use Premium SSD (P-series) with appropriate IOPS and throughput provisioning. Standard SSD can be used for development/test environments. + +- **Google Cloud SQL**: Use SSD persistent disks for production workloads. Standard (HDD) persistent disks are suitable only for development or low-performance requirements. + If you enable [database encryption](../../../admin/security/database-encryption.md) in Coder, consider allocating an additional CPU core to every `coderd` replica. diff --git a/docs/admin/monitoring/connection-logs.md b/docs/admin/monitoring/connection-logs.md new file mode 100644 index 0000000000000..b69bb2db186a8 --- /dev/null +++ b/docs/admin/monitoring/connection-logs.md @@ -0,0 +1,111 @@ +# Connection Logs + +> [!NOTE] +> Connection logs require a +> [Premium license](https://coder.com/pricing#compare-plans). +> For more details, [contact your account team](https://coder.com/contact). + +The **Connection Log** page in the dashboard allows Auditors to monitor workspace agent connections. + +## Workspace App Connections + +The connection log contains a complete record of all workspace app connections. +These originate from within the Coder deployment, and thus the connection log +is a source of truth for these events. + +## Browser Port Forwarding + +The connection log contains a complete record of all workspace port forwarding +performed via the dashboard. + +## SSH and IDE Sessions + +The connection log aims to capture a record of all workspace SSH and IDE sessions. +These events are reported by workspace agents, and their receipt by the server +is not guaranteed. + +## How to Filter Connection Logs + +You can filter connection logs by the following parameters: + +- `organization` - The name or ID of the organization of the workspace being + connected to. +- `workspace_owner` - The username of the owner of the workspace being connected + to. +- `type` - The type of the connection, such as SSH, VS Code, or workspace app. + For more connection types, refer to the + [CoderSDK documentation](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#ConnectionType). +- `username`: The name of the user who initiated the connection. + Results will not include SSH or IDE sessions. +- `user_email`: The email of the user who initiated the connection. + Results will not include SSH or IDE sessions. +- `connected_after`: The time after which the connection started. + Uses the RFC3339Nano format. +- `connected_before`: The time before which the connection started. + Uses the RFC3339Nano format. +- `workspace_id`: The ID of the workspace being connected to. +- `connection_id`: The ID of the connection. +- `status`: The status of the connection, either `ongoing` or `completed`. + Some events are neither ongoing nor completed, such as the opening of a + workspace app. + +## Capturing/Exporting Connection Logs + +In addition to the Coder dashboard, there are multiple ways to consume or query +connection events. + +### REST API + +You can retrieve connection logs via the Coder API. +Visit the +[`get-connection-logs` endpoint documentation](../../reference/api/enterprise.md#get-connection-logs) +for details. + +### Service Logs + +Connection events are also dispatched as service logs and can be captured and +categorized using any log management tool such as [Splunk](https://splunk.com). + +Example of a [JSON formatted](../../reference/cli/server.md#--log-json) +connection log entry, when an SSH connection is made: + +```json +{ + "ts": "2025-07-03T05:09:41.929840747Z", + "level": "INFO", + "msg": "connection_log", + "caller": "/home/coder/coder/enterprise/audit/backends/slog.go:38", + "func": "github.com/coder/coder/v2/enterprise/audit/backends.(*SlogExporter).ExportStruct", + "logger_names": ["coderd"], + "fields": { + "request_id": "916ad077-e120-4861-8640-f449d56d2bae", + "ID": "ca5dfc63-dc43-463a-bb3e-38526866fd4b", + "OrganizationID": "1a2bb67e-0117-4168-92e0-58138989a7f5", + "WorkspaceOwnerID": "fe8f4bab-3128-41f1-8fec-1cc0755affe5", + "WorkspaceID": "05567e23-31e2-4c00-bd05-4d499d437347", + "WorkspaceName": "dev", + "AgentName": "main", + "Type": "ssh", + "Code": null, + "Ip": "fd7a:115c:a1e0:4b86:9046:80e:6c70:33b7", + "UserAgent": "", + "UserID": null, + "SlugOrPort": "", + "ConnectionID": "7a6fafdc-e3d0-43cb-a1b7-1f19802d7908", + "DisconnectReason": "", + "Time": "2025-07-10T10:14:38.942776145Z", + "ConnectionStatus": "connected" + } +} +``` + +Example of a [human readable](../../reference/cli/server.md#--log-human) +connection log entry, when `code-server` is opened: + +```console +[API] 2025-07-03 06:57:16.157 [info] coderd: connection_log request_id=de3f6004-6cc1-4880-a296-d7c6ca1abf75 ID=f0249951-d454-48f6-9504-e73340fa07b7 Time="2025-07-03T06:57:16.144719Z" OrganizationID=0665a54f-0b77-4a58-94aa-59646fa38a74 WorkspaceOwnerID=6dea5f8c-ecec-4cf0-a5bd-bc2c63af2efa WorkspaceID=3c0b37c8-e58c-4980-b9a1-2732410480a5 WorkspaceName=dev AgentName=main Type=workspace_app Code=200 Ip=127.0.0.1 UserAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" UserID=6dea5f8c-ecec-4cf0-a5bd-bc2c63af2efa SlugOrPort=code-server ConnectionID= DisconnectReason="" ConnectionStatus=connected +``` + +## How to Enable Connection Logs + +This feature is only available with a [Premium license](../licensing/index.md). diff --git a/docs/admin/security/audit-logs.md b/docs/admin/security/audit-logs.md index af033d02df2d5..9aca854e46b85 100644 --- a/docs/admin/security/audit-logs.md +++ b/docs/admin/security/audit-logs.md @@ -1,6 +1,11 @@ # Audit Logs -Audit Logs allows **Auditors** to monitor user operations in their deployment. +**Audit Logs** allows Auditors to monitor user operations in their deployment. + +> [!NOTE] +> Audit logs require a +> [Premium license](https://coder.com/pricing#compare-plans). +> For more details, [contact your account team](https://coder.com/contact). ## Tracked Events @@ -30,55 +35,49 @@ We track the following resources: | Template
write, delete | |
FieldTracked
active_version_idtrue
activity_bumptrue
allow_user_autostarttrue
allow_user_autostoptrue
allow_user_cancel_workspace_jobstrue
autostart_block_days_of_weektrue
autostop_requirement_days_of_weektrue
autostop_requirement_weekstrue
created_atfalse
created_bytrue
created_by_avatar_urlfalse
created_by_namefalse
created_by_usernamefalse
default_ttltrue
deletedfalse
deprecatedtrue
descriptiontrue
display_nametrue
failure_ttltrue
group_acltrue
icontrue
idtrue
max_port_sharing_leveltrue
nametrue
organization_display_namefalse
organization_iconfalse
organization_idfalse
organization_namefalse
provisionertrue
require_active_versiontrue
time_til_dormanttrue
time_til_dormant_autodeletetrue
updated_atfalse
use_classic_parameter_flowtrue
user_acltrue
| | TemplateVersion
create, write | |
FieldTracked
archivedtrue
created_atfalse
created_bytrue
created_by_avatar_urlfalse
created_by_namefalse
created_by_usernamefalse
external_auth_providersfalse
has_ai_taskfalse
idtrue
job_idfalse
messagefalse
nametrue
organization_idfalse
readmetrue
source_example_idfalse
template_idtrue
updated_atfalse
| | User
create, write, delete | |
FieldTracked
avatar_urlfalse
created_atfalse
deletedtrue
emailtrue
github_com_user_idfalse
hashed_one_time_passcodefalse
hashed_passwordtrue
idtrue
is_systemtrue
last_seen_atfalse
login_typetrue
nametrue
one_time_passcode_expires_attrue
quiet_hours_scheduletrue
rbac_rolestrue
statustrue
updated_atfalse
usernametrue
| -| WorkspaceAgent
connect, disconnect | |
FieldTracked
api_key_scopefalse
api_versionfalse
architecturefalse
auth_instance_idfalse
auth_tokenfalse
connection_timeout_secondsfalse
created_atfalse
deletedfalse
directoryfalse
disconnected_atfalse
display_appsfalse
display_orderfalse
environment_variablesfalse
expanded_directoryfalse
first_connected_atfalse
idfalse
instance_metadatafalse
last_connected_atfalse
last_connected_replica_idfalse
lifecycle_statefalse
logs_lengthfalse
logs_overflowedfalse
motd_filefalse
namefalse
operating_systemfalse
parent_idfalse
ready_atfalse
resource_idfalse
resource_metadatafalse
started_atfalse
subsystemsfalse
troubleshooting_urlfalse
updated_atfalse
versionfalse
| -| WorkspaceApp
open, close | |
FieldTracked
agent_idfalse
commandfalse
created_atfalse
display_groupfalse
display_namefalse
display_orderfalse
externalfalse
healthfalse
healthcheck_intervalfalse
healthcheck_thresholdfalse
healthcheck_urlfalse
hiddenfalse
iconfalse
idfalse
open_infalse
sharing_levelfalse
slugfalse
subdomainfalse
urlfalse
| | WorkspaceBuild
start, stop | |
FieldTracked
ai_task_sidebar_app_idfalse
build_numberfalse
created_atfalse
daily_costfalse
deadlinefalse
has_ai_taskfalse
idfalse
initiator_by_avatar_urlfalse
initiator_by_namefalse
initiator_by_usernamefalse
initiator_idfalse
job_idfalse
max_deadlinefalse
provisioner_statefalse
reasonfalse
template_version_idtrue
template_version_preset_idfalse
transitionfalse
updated_atfalse
workspace_idfalse
| | WorkspaceProxy
| |
FieldTracked
created_attrue
deletedfalse
derp_enabledtrue
derp_onlytrue
display_nametrue
icontrue
idtrue
nametrue
region_idtrue
token_hashed_secrettrue
updated_atfalse
urltrue
versiontrue
wildcard_hostnametrue
| | WorkspaceTable
| |
FieldTracked
automatic_updatestrue
autostart_scheduletrue
created_atfalse
deletedfalse
deleting_attrue
dormant_attrue
favoritetrue
idtrue
last_used_atfalse
nametrue
next_start_attrue
organization_idfalse
owner_idtrue
template_idtrue
ttltrue
updated_atfalse
| -## Filtering logs - -In the Coder UI you can filter your audit logs using the pre-defined filter or -by using the Coder's filter query like the examples below: +## How to Filter Audit Logs -- `resource_type:workspace action:delete` to find deleted workspaces -- `resource_type:template action:create` to find created templates +You can filter audit logs by the following parameters: -The supported filters are: - -- `resource_type` - The type of the resource. It can be a workspace, template, - user, etc. You can - [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#ResourceType) - all the resource types that are supported. +- `resource_type` - The type of the resource, such as a workspace, template, + or user. For more resource types, refer to the + [CoderSDK package documentation](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#ResourceType). - `resource_id` - The ID of the resource. - `resource_target` - The name of the resource. Can be used instead of `resource_id`. -- `action`- The action applied to a resource. You can - [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#AuditAction) - all the actions that are supported. +- `action`- The action applied to a resource, such as `create` or `delete`. + For more actions, refer to the + [CoderSDK package documentation](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#AuditAction). - `username` - The username of the user who triggered the action. You can also use `me` as a convenient alias for the logged-in user. - `email` - The email of the user who triggered the action. - `date_from` - The inclusive start date with format `YYYY-MM-DD`. - `date_to` - The inclusive end date with format `YYYY-MM-DD`. -- `build_reason` - To be used with `resource_type:workspace_build`, the - [initiator](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#BuildReason) - behind the build start or stop. +- `build_reason` - The reason for the workspace build, if `resource_type` is + `workspace_build`. Refer to the + [CoderSDK package documentation](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#BuildReason) + for a list of valid build reasons. ## Capturing/Exporting Audit Logs -In addition to the user interface, there are multiple ways to consume or query +In addition to the Coder dashboard, there are multiple ways to consume or query audit trails. -## REST API +### REST API + +You can retrieve audit logs via the Coder API. -Audit logs can be accessed through our REST API. You can find detailed -information about this in our -[endpoint documentation](../../reference/api/audit.md#get-audit-logs). +Visit the +[`get-audit-logs` endpoint documentation](../../reference/api/audit.md#get-audit-logs) +for details. -## Service Logs +### Service Logs Audit trails are also dispatched as service logs and can be captured and categorized using any log management tool such as [Splunk](https://splunk.com). @@ -91,16 +90,16 @@ log entry: "ts": "2023-06-13T03:45:37.294730279Z", "level": "INFO", "msg": "audit_log", - "caller": "/home/runner/work/coder/coder/enterprise/audit/backends/slog.go:36", - "func": "github.com/coder/coder/enterprise/audit/backends.slogBackend.Export", + "caller": "/home/coder/coder/enterprise/audit/backends/slog.go:38", + "func": "github.com/coder/coder/v2/enterprise/audit/backends.(*SlogExporter).ExportStruct", "logger_names": ["coderd"], "fields": { "ID": "033a9ffa-b54d-4c10-8ec3-2aaf9e6d741a", "Time": "2023-06-13T03:45:37.288506Z", "UserID": "6c405053-27e3-484a-9ad7-bcb64e7bfde6", "OrganizationID": "00000000-0000-0000-0000-000000000000", - "Ip": "{IPNet:{IP:\u003cnil\u003e Mask:\u003cnil\u003e} Valid:false}", - "UserAgent": "{String: Valid:false}", + "Ip": null, + "UserAgent": null, "ResourceType": "workspace_build", "ResourceID": "ca5647e0-ef50-4202-a246-717e04447380", "ResourceTarget": "", @@ -126,7 +125,6 @@ log entry: 2023-06-13 03:43:29.233 [info] coderd: audit_log ID=95f7c392-da3e-480c-a579-8909f145fbe2 Time="2023-06-13T03:43:29.230422Z" UserID=6c405053-27e3-484a-9ad7-bcb64e7bfde6 OrganizationID=00000000-0000-0000-0000-000000000000 Ip= UserAgent= ResourceType=workspace_build ResourceID=988ae133-5b73-41e3-a55e-e1e9d3ef0b66 ResourceTarget="" Action=start Diff="{}" StatusCode=200 AdditionalFields="{\"workspace_name\":\"linux-container\",\"build_number\":\"7\",\"build_reason\":\"initiator\",\"workspace_owner\":\"\"}" RequestID=9682b1b5-7b9f-4bf2-9a39-9463f8e41cd6 ResourceIcon="" ``` -## Enabling this feature +## How to Enable Audit Logs -This feature is only available with a premium license. -[Learn more](../licensing/index.md) +This feature is only available with a [Premium license](../licensing/index.md). diff --git a/docs/admin/templates/extending-templates/parameters.md b/docs/admin/templates/extending-templates/parameters.md index d29cf8c29c194..5b380645c1b36 100644 --- a/docs/admin/templates/extending-templates/parameters.md +++ b/docs/admin/templates/extending-templates/parameters.md @@ -207,8 +207,8 @@ data "coder_parameter" "dotfiles_url" { Immutable parameters can only be set in these situations: - Creating a workspace for the first time. -- Updating a workspace to a new template version. This sets the initial value - for required parameters. +- Updating a workspace to a new template version. + This sets the initial value for required parameters. The idea is to prevent users from modifying fragile or persistent workspace resources like volumes, regions, and so on. @@ -224,9 +224,8 @@ data "coder_parameter" "region" { } ``` -You can modify a parameter's `mutable` attribute state anytime. In case of -emergency, you can temporarily allow for changing immutable parameters to fix an -operational issue, but it is not advised to overuse this opportunity. +If a required parameter is empty or if the workspace creation page detects an incompatibility between selected +parameters, the **Create workspace** button is disabled until the issues are resolved. ## Ephemeral parameters diff --git a/docs/images/platforms/docker/create-workspace.png b/docs/images/platforms/docker/create-workspace.png deleted file mode 100644 index 9959244a96f1c..0000000000000 Binary files a/docs/images/platforms/docker/create-workspace.png and /dev/null differ diff --git a/docs/images/platforms/docker/ides.png b/docs/images/platforms/docker/ides.png deleted file mode 100755 index 2293b7af636f1..0000000000000 Binary files a/docs/images/platforms/docker/ides.png and /dev/null differ diff --git a/docs/images/platforms/docker/login.png b/docs/images/platforms/docker/login.png deleted file mode 100755 index c5bad763e92a8..0000000000000 Binary files a/docs/images/platforms/docker/login.png and /dev/null differ diff --git a/docs/images/platforms/kubernetes/region-picker.png b/docs/images/platforms/kubernetes/region-picker.png deleted file mode 100644 index f40a3379010d7..0000000000000 Binary files a/docs/images/platforms/kubernetes/region-picker.png and /dev/null differ diff --git a/docs/images/platforms/kubernetes/starter-template.png b/docs/images/platforms/kubernetes/starter-template.png deleted file mode 100644 index ff81645d73f73..0000000000000 Binary files a/docs/images/platforms/kubernetes/starter-template.png and /dev/null differ diff --git a/docs/images/platforms/kubernetes/template-variables.png b/docs/images/platforms/kubernetes/template-variables.png deleted file mode 100644 index 2d0a9993e4385..0000000000000 Binary files a/docs/images/platforms/kubernetes/template-variables.png and /dev/null differ diff --git a/docs/images/screenshots/admin-settings.png b/docs/images/screenshots/admin-settings.png new file mode 100644 index 0000000000000..0b5c249544e83 Binary files /dev/null and b/docs/images/screenshots/admin-settings.png differ diff --git a/docs/images/screenshots/audit.png b/docs/images/screenshots/audit.png index 5538c67afd8e3..1340179ebc141 100644 Binary files a/docs/images/screenshots/audit.png and b/docs/images/screenshots/audit.png differ diff --git a/docs/images/screenshots/coder-login.png b/docs/images/screenshots/coder-login.png new file mode 100644 index 0000000000000..2757c225afff5 Binary files /dev/null and b/docs/images/screenshots/coder-login.png differ diff --git a/docs/images/screenshots/create-template.png b/docs/images/screenshots/create-template.png index e442a8557c42b..ef54f45d47319 100644 Binary files a/docs/images/screenshots/create-template.png and b/docs/images/screenshots/create-template.png differ diff --git a/docs/images/screenshots/healthcheck.png b/docs/images/screenshots/healthcheck.png index 5b42f716ca7b6..73143fbc9f1d7 100644 Binary files a/docs/images/screenshots/healthcheck.png and b/docs/images/screenshots/healthcheck.png differ diff --git a/docs/images/screenshots/login.png b/docs/images/screenshots/login.png deleted file mode 100644 index 9bfe85e9f4cea..0000000000000 Binary files a/docs/images/screenshots/login.png and /dev/null differ diff --git a/docs/images/screenshots/settings.png b/docs/images/screenshots/settings.png deleted file mode 100644 index cf3f19116fb13..0000000000000 Binary files a/docs/images/screenshots/settings.png and /dev/null differ diff --git a/docs/images/screenshots/starter-templates.png b/docs/images/screenshots/starter-templates.png new file mode 100644 index 0000000000000..51ac42c4bce5f Binary files /dev/null and b/docs/images/screenshots/starter-templates.png differ diff --git a/docs/images/screenshots/starter_templates.png b/docs/images/screenshots/starter_templates.png deleted file mode 100644 index 1eab19f2901cd..0000000000000 Binary files a/docs/images/screenshots/starter_templates.png and /dev/null differ diff --git a/docs/images/screenshots/template-insights.png b/docs/images/screenshots/template-insights.png new file mode 100644 index 0000000000000..605f49d780d8e Binary files /dev/null and b/docs/images/screenshots/template-insights.png differ diff --git a/docs/images/screenshots/templates-listing.png b/docs/images/screenshots/templates-listing.png new file mode 100644 index 0000000000000..e70158a4d7733 Binary files /dev/null and b/docs/images/screenshots/templates-listing.png differ diff --git a/docs/images/screenshots/templates_insights.png b/docs/images/screenshots/templates_insights.png deleted file mode 100644 index 8375661da2603..0000000000000 Binary files a/docs/images/screenshots/templates_insights.png and /dev/null differ diff --git a/docs/images/screenshots/templates_listing.png b/docs/images/screenshots/templates_listing.png deleted file mode 100644 index e887de4f4e2aa..0000000000000 Binary files a/docs/images/screenshots/templates_listing.png and /dev/null differ diff --git a/docs/images/screenshots/terraform.png b/docs/images/screenshots/terraform.png index d8780d650ea1f..654acb936bbd6 100644 Binary files a/docs/images/screenshots/terraform.png and b/docs/images/screenshots/terraform.png differ diff --git a/docs/images/screenshots/welcome-create-admin-user.png b/docs/images/screenshots/welcome-create-admin-user.png index fcb099bf888d2..c2fb24ebd9730 100644 Binary files a/docs/images/screenshots/welcome-create-admin-user.png and b/docs/images/screenshots/welcome-create-admin-user.png differ diff --git a/docs/images/screenshots/workspace-running-with-topbar.png b/docs/images/screenshots/workspace-running-with-topbar.png index ab3f6a78a9e6e..62b32d46bc3fa 100644 Binary files a/docs/images/screenshots/workspace-running-with-topbar.png and b/docs/images/screenshots/workspace-running-with-topbar.png differ diff --git a/docs/images/screenshots/workspace_launch.png b/docs/images/screenshots/workspace_launch.png deleted file mode 100644 index ab2092e7f5d7d..0000000000000 Binary files a/docs/images/screenshots/workspace_launch.png and /dev/null differ diff --git a/docs/images/screenshots/workspaces-listing.png b/docs/images/screenshots/workspaces-listing.png new file mode 100644 index 0000000000000..078dfbb4f6532 Binary files /dev/null and b/docs/images/screenshots/workspaces-listing.png differ diff --git a/docs/images/screenshots/workspaces_listing.png b/docs/images/screenshots/workspaces_listing.png deleted file mode 100644 index ee206c100f5ba..0000000000000 Binary files a/docs/images/screenshots/workspaces_listing.png and /dev/null differ diff --git a/docs/images/start/blank-workspaces.png b/docs/images/start/blank-workspaces.png deleted file mode 100644 index 3dcc74020e4b8..0000000000000 Binary files a/docs/images/start/blank-workspaces.png and /dev/null differ diff --git a/docs/images/templates/coder-login-web.png b/docs/images/templates/coder-login-web.png deleted file mode 100644 index 854c305d1b162..0000000000000 Binary files a/docs/images/templates/coder-login-web.png and /dev/null differ diff --git a/docs/manifest.json b/docs/manifest.json index 65555caa0df4f..93f8282c26c4a 100644 --- a/docs/manifest.json +++ b/docs/manifest.json @@ -765,6 +765,12 @@ "description": "Learn about Coder's automated health checks", "path": "./admin/monitoring/health-check.md" }, + { + "title": "Connection Logs", + "description": "Monitor connections to workspaces", + "path": "./admin/monitoring/connection-logs.md", + "state": ["premium"] + }, { "title": "Notifications", "description": "Configure notifications for your deployment", diff --git a/docs/reference/api/agents.md b/docs/reference/api/agents.md index cff5fef6f3f8a..54e9b0e6ad628 100644 --- a/docs/reference/api/agents.md +++ b/docs/reference/api/agents.md @@ -899,6 +899,111 @@ curl -X POST http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/co To perform this operation, you must be authenticated. [Learn more](authentication.md). +## Watch workspace agent for container updates + +### Code samples + +```shell +# Example request using curl +curl -X GET http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/containers/watch \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`GET /workspaceagents/{workspaceagent}/containers/watch` + +### Parameters + +| Name | In | Type | Required | Description | +|------------------|------|--------------|----------|--------------------| +| `workspaceagent` | path | string(uuid) | true | Workspace agent ID | + +### Example responses + +> 200 Response + +```json +{ + "containers": [ + { + "created_at": "2019-08-24T14:15:22Z", + "id": "string", + "image": "string", + "labels": { + "property1": "string", + "property2": "string" + }, + "name": "string", + "ports": [ + { + "host_ip": "string", + "host_port": 0, + "network": "string", + "port": 0 + } + ], + "running": true, + "status": "string", + "volumes": { + "property1": "string", + "property2": "string" + } + } + ], + "devcontainers": [ + { + "agent": { + "directory": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "name": "string" + }, + "config_path": "string", + "container": { + "created_at": "2019-08-24T14:15:22Z", + "id": "string", + "image": "string", + "labels": { + "property1": "string", + "property2": "string" + }, + "name": "string", + "ports": [ + { + "host_ip": "string", + "host_port": 0, + "network": "string", + "port": 0 + } + ], + "running": true, + "status": "string", + "volumes": { + "property1": "string", + "property2": "string" + } + }, + "dirty": true, + "error": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "name": "string", + "status": "running", + "workspace_folder": "string" + } + ], + "warnings": [ + "string" + ] +} +``` + +### Responses + +| Status | Meaning | Description | Schema | +|--------|---------------------------------------------------------|-------------|----------------------------------------------------------------------------------------------------------| +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.WorkspaceAgentListContainersResponse](schemas.md#codersdkworkspaceagentlistcontainersresponse) | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Coordinate workspace agent ### Code samples diff --git a/docs/reference/api/enterprise.md b/docs/reference/api/enterprise.md index 70821aa64f063..38e22bd85e277 100644 --- a/docs/reference/api/enterprise.md +++ b/docs/reference/api/enterprise.md @@ -207,6 +207,98 @@ curl -X PUT http://coder-server:8080/api/v2/appearance \ To perform this operation, you must be authenticated. [Learn more](authentication.md). +## Get connection logs + +### Code samples + +```shell +# Example request using curl +curl -X GET http://coder-server:8080/api/v2/connectionlog?limit=0 \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`GET /connectionlog` + +### Parameters + +| Name | In | Type | Required | Description | +|----------|-------|---------|----------|--------------| +| `q` | query | string | false | Search query | +| `limit` | query | integer | true | Page limit | +| `offset` | query | integer | false | Page offset | + +### Example responses + +> 200 Response + +```json +{ + "connection_logs": [ + { + "agent_name": "string", + "connect_time": "2019-08-24T14:15:22Z", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "ip": "string", + "organization": { + "display_name": "string", + "icon": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "name": "string" + }, + "ssh_info": { + "connection_id": "d3547de1-d1f2-4344-b4c2-17169b7526f9", + "disconnect_reason": "string", + "disconnect_time": "2019-08-24T14:15:22Z", + "exit_code": 0 + }, + "type": "ssh", + "web_info": { + "slug_or_port": "string", + "status_code": 0, + "user": { + "avatar_url": "http://example.com", + "created_at": "2019-08-24T14:15:22Z", + "email": "user@example.com", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "last_seen_at": "2019-08-24T14:15:22Z", + "login_type": "", + "name": "string", + "organization_ids": [ + "497f6eca-6276-4993-bfeb-53cbbbba6f08" + ], + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "status": "active", + "theme_preference": "string", + "updated_at": "2019-08-24T14:15:22Z", + "username": "string" + }, + "user_agent": "string" + }, + "workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9", + "workspace_name": "string", + "workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7", + "workspace_owner_username": "string" + } + ], + "count": 0 +} +``` + +### Responses + +| Status | Meaning | Description | Schema | +|--------|---------------------------------------------------------|-------------|----------------------------------------------------------------------------| +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.ConnectionLogResponse](schemas.md#codersdkconnectionlogresponse) | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Get entitlements ### Code samples diff --git a/docs/reference/api/members.md b/docs/reference/api/members.md index b19c859aa10c1..4b0adbf45e338 100644 --- a/docs/reference/api/members.md +++ b/docs/reference/api/members.md @@ -187,6 +187,7 @@ Status Code **200** | `resource_type` | `assign_org_role` | | `resource_type` | `assign_role` | | `resource_type` | `audit_log` | +| `resource_type` | `connection_log` | | `resource_type` | `crypto_key` | | `resource_type` | `debug_info` | | `resource_type` | `deployment_config` | @@ -356,6 +357,7 @@ Status Code **200** | `resource_type` | `assign_org_role` | | `resource_type` | `assign_role` | | `resource_type` | `audit_log` | +| `resource_type` | `connection_log` | | `resource_type` | `crypto_key` | | `resource_type` | `debug_info` | | `resource_type` | `deployment_config` | @@ -525,6 +527,7 @@ Status Code **200** | `resource_type` | `assign_org_role` | | `resource_type` | `assign_role` | | `resource_type` | `audit_log` | +| `resource_type` | `connection_log` | | `resource_type` | `crypto_key` | | `resource_type` | `debug_info` | | `resource_type` | `deployment_config` | @@ -663,6 +666,7 @@ Status Code **200** | `resource_type` | `assign_org_role` | | `resource_type` | `assign_role` | | `resource_type` | `audit_log` | +| `resource_type` | `connection_log` | | `resource_type` | `crypto_key` | | `resource_type` | `debug_info` | | `resource_type` | `deployment_config` | @@ -1023,6 +1027,7 @@ Status Code **200** | `resource_type` | `assign_org_role` | | `resource_type` | `assign_role` | | `resource_type` | `audit_log` | +| `resource_type` | `connection_log` | | `resource_type` | `crypto_key` | | `resource_type` | `debug_info` | | `resource_type` | `deployment_config` | diff --git a/docs/reference/api/schemas.md b/docs/reference/api/schemas.md index 6ca1cfb9dfe51..053a738413060 100644 --- a/docs/reference/api/schemas.md +++ b/docs/reference/api/schemas.md @@ -1085,6 +1085,228 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `p50` | number | false | | | | `p95` | number | false | | | +## codersdk.ConnectionLog + +```json +{ + "agent_name": "string", + "connect_time": "2019-08-24T14:15:22Z", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "ip": "string", + "organization": { + "display_name": "string", + "icon": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "name": "string" + }, + "ssh_info": { + "connection_id": "d3547de1-d1f2-4344-b4c2-17169b7526f9", + "disconnect_reason": "string", + "disconnect_time": "2019-08-24T14:15:22Z", + "exit_code": 0 + }, + "type": "ssh", + "web_info": { + "slug_or_port": "string", + "status_code": 0, + "user": { + "avatar_url": "http://example.com", + "created_at": "2019-08-24T14:15:22Z", + "email": "user@example.com", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "last_seen_at": "2019-08-24T14:15:22Z", + "login_type": "", + "name": "string", + "organization_ids": [ + "497f6eca-6276-4993-bfeb-53cbbbba6f08" + ], + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "status": "active", + "theme_preference": "string", + "updated_at": "2019-08-24T14:15:22Z", + "username": "string" + }, + "user_agent": "string" + }, + "workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9", + "workspace_name": "string", + "workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7", + "workspace_owner_username": "string" +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|----------------------------|----------------------------------------------------------------|----------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `agent_name` | string | false | | | +| `connect_time` | string | false | | | +| `id` | string | false | | | +| `ip` | string | false | | | +| `organization` | [codersdk.MinimalOrganization](#codersdkminimalorganization) | false | | | +| `ssh_info` | [codersdk.ConnectionLogSSHInfo](#codersdkconnectionlogsshinfo) | false | | Ssh info is only set when `type` is one of: - `ConnectionTypeSSH` - `ConnectionTypeReconnectingPTY` - `ConnectionTypeVSCode` - `ConnectionTypeJetBrains` | +| `type` | [codersdk.ConnectionType](#codersdkconnectiontype) | false | | | +| `web_info` | [codersdk.ConnectionLogWebInfo](#codersdkconnectionlogwebinfo) | false | | Web info is only set when `type` is one of: - `ConnectionTypePortForwarding` - `ConnectionTypeWorkspaceApp` | +| `workspace_id` | string | false | | | +| `workspace_name` | string | false | | | +| `workspace_owner_id` | string | false | | | +| `workspace_owner_username` | string | false | | | + +## codersdk.ConnectionLogResponse + +```json +{ + "connection_logs": [ + { + "agent_name": "string", + "connect_time": "2019-08-24T14:15:22Z", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "ip": "string", + "organization": { + "display_name": "string", + "icon": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "name": "string" + }, + "ssh_info": { + "connection_id": "d3547de1-d1f2-4344-b4c2-17169b7526f9", + "disconnect_reason": "string", + "disconnect_time": "2019-08-24T14:15:22Z", + "exit_code": 0 + }, + "type": "ssh", + "web_info": { + "slug_or_port": "string", + "status_code": 0, + "user": { + "avatar_url": "http://example.com", + "created_at": "2019-08-24T14:15:22Z", + "email": "user@example.com", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "last_seen_at": "2019-08-24T14:15:22Z", + "login_type": "", + "name": "string", + "organization_ids": [ + "497f6eca-6276-4993-bfeb-53cbbbba6f08" + ], + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "status": "active", + "theme_preference": "string", + "updated_at": "2019-08-24T14:15:22Z", + "username": "string" + }, + "user_agent": "string" + }, + "workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9", + "workspace_name": "string", + "workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7", + "workspace_owner_username": "string" + } + ], + "count": 0 +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|-------------------|-----------------------------------------------------------|----------|--------------|-------------| +| `connection_logs` | array of [codersdk.ConnectionLog](#codersdkconnectionlog) | false | | | +| `count` | integer | false | | | + +## codersdk.ConnectionLogSSHInfo + +```json +{ + "connection_id": "d3547de1-d1f2-4344-b4c2-17169b7526f9", + "disconnect_reason": "string", + "disconnect_time": "2019-08-24T14:15:22Z", + "exit_code": 0 +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|---------------------|---------|----------|--------------|---------------------------------------------------------------------------------------------------------------------------------------| +| `connection_id` | string | false | | | +| `disconnect_reason` | string | false | | Disconnect reason is omitted if a disconnect event with the same connection ID has not yet been seen. | +| `disconnect_time` | string | false | | Disconnect time is omitted if a disconnect event with the same connection ID has not yet been seen. | +| `exit_code` | integer | false | | Exit code is the exit code of the SSH session. It is omitted if a disconnect event with the same connection ID has not yet been seen. | + +## codersdk.ConnectionLogWebInfo + +```json +{ + "slug_or_port": "string", + "status_code": 0, + "user": { + "avatar_url": "http://example.com", + "created_at": "2019-08-24T14:15:22Z", + "email": "user@example.com", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "last_seen_at": "2019-08-24T14:15:22Z", + "login_type": "", + "name": "string", + "organization_ids": [ + "497f6eca-6276-4993-bfeb-53cbbbba6f08" + ], + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "status": "active", + "theme_preference": "string", + "updated_at": "2019-08-24T14:15:22Z", + "username": "string" + }, + "user_agent": "string" +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|----------------|--------------------------------|----------|--------------|---------------------------------------------------------------------------| +| `slug_or_port` | string | false | | | +| `status_code` | integer | false | | Status code is the HTTP status code of the request. | +| `user` | [codersdk.User](#codersdkuser) | false | | User is omitted if the connection event was from an unauthenticated user. | +| `user_agent` | string | false | | | + +## codersdk.ConnectionType + +```json +"ssh" +``` + +### Properties + +#### Enumerated Values + +| Value | +|--------------------| +| `ssh` | +| `vscode` | +| `jetbrains` | +| `reconnecting_pty` | +| `workspace_app` | +| `port_forwarding` | + ## codersdk.ConvertLoginRequest ```json @@ -6052,6 +6274,7 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith | `assign_org_role` | | `assign_role` | | `audit_log` | +| `connection_log` | | `crypto_key` | | `debug_info` | | `deployment_config` | @@ -8444,6 +8667,7 @@ If the schedule is empty, the user will be updated to use the default schedule.| "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -8683,38 +8907,39 @@ If the schedule is empty, the user will be updated to use the default schedule.| ### Properties -| Name | Type | Required | Restrictions | Description | -|---------------------------------------------|------------------------------------------------------------|----------|--------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `allow_renames` | boolean | false | | | -| `automatic_updates` | [codersdk.AutomaticUpdates](#codersdkautomaticupdates) | false | | | -| `autostart_schedule` | string | false | | | -| `created_at` | string | false | | | -| `deleting_at` | string | false | | Deleting at indicates the time at which the workspace will be permanently deleted. A workspace is eligible for deletion if it is dormant (a non-nil dormant_at value) and a value has been specified for time_til_dormant_autodelete on its template. | -| `dormant_at` | string | false | | Dormant at being non-nil indicates a workspace that is dormant. A dormant workspace is no longer accessible must be activated. It is subject to deletion if it breaches the duration of the time_til_ field on its template. | -| `favorite` | boolean | false | | | -| `health` | [codersdk.WorkspaceHealth](#codersdkworkspacehealth) | false | | Health shows the health of the workspace and information about what is causing an unhealthy status. | -| `id` | string | false | | | -| `last_used_at` | string | false | | | -| `latest_app_status` | [codersdk.WorkspaceAppStatus](#codersdkworkspaceappstatus) | false | | | -| `latest_build` | [codersdk.WorkspaceBuild](#codersdkworkspacebuild) | false | | | -| `name` | string | false | | | -| `next_start_at` | string | false | | | -| `organization_id` | string | false | | | -| `organization_name` | string | false | | | -| `outdated` | boolean | false | | | -| `owner_avatar_url` | string | false | | | -| `owner_id` | string | false | | | -| `owner_name` | string | false | | Owner name is the username of the owner of the workspace. | -| `template_active_version_id` | string | false | | | -| `template_allow_user_cancel_workspace_jobs` | boolean | false | | | -| `template_display_name` | string | false | | | -| `template_icon` | string | false | | | -| `template_id` | string | false | | | -| `template_name` | string | false | | | -| `template_require_active_version` | boolean | false | | | -| `template_use_classic_parameter_flow` | boolean | false | | | -| `ttl_ms` | integer | false | | | -| `updated_at` | string | false | | | +| Name | Type | Required | Restrictions | Description | +|---------------------------------------------|------------------------------------------------------------|----------|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `allow_renames` | boolean | false | | | +| `automatic_updates` | [codersdk.AutomaticUpdates](#codersdkautomaticupdates) | false | | | +| `autostart_schedule` | string | false | | | +| `created_at` | string | false | | | +| `deleting_at` | string | false | | Deleting at indicates the time at which the workspace will be permanently deleted. A workspace is eligible for deletion if it is dormant (a non-nil dormant_at value) and a value has been specified for time_til_dormant_autodelete on its template. | +| `dormant_at` | string | false | | Dormant at being non-nil indicates a workspace that is dormant. A dormant workspace is no longer accessible must be activated. It is subject to deletion if it breaches the duration of the time_til_ field on its template. | +| `favorite` | boolean | false | | | +| `health` | [codersdk.WorkspaceHealth](#codersdkworkspacehealth) | false | | Health shows the health of the workspace and information about what is causing an unhealthy status. | +| `id` | string | false | | | +| `is_prebuild` | boolean | false | | Is prebuild indicates whether the workspace is a prebuilt workspace. Prebuilt workspaces are owned by the prebuilds system user and have specific behavior, such as being managed differently from regular workspaces. Once a prebuilt workspace is claimed by a user, it transitions to a regular workspace, and IsPrebuild returns false. | +| `last_used_at` | string | false | | | +| `latest_app_status` | [codersdk.WorkspaceAppStatus](#codersdkworkspaceappstatus) | false | | | +| `latest_build` | [codersdk.WorkspaceBuild](#codersdkworkspacebuild) | false | | | +| `name` | string | false | | | +| `next_start_at` | string | false | | | +| `organization_id` | string | false | | | +| `organization_name` | string | false | | | +| `outdated` | boolean | false | | | +| `owner_avatar_url` | string | false | | | +| `owner_id` | string | false | | | +| `owner_name` | string | false | | Owner name is the username of the owner of the workspace. | +| `template_active_version_id` | string | false | | | +| `template_allow_user_cancel_workspace_jobs` | boolean | false | | | +| `template_display_name` | string | false | | | +| `template_icon` | string | false | | | +| `template_id` | string | false | | | +| `template_name` | string | false | | | +| `template_require_active_version` | boolean | false | | | +| `template_use_classic_parameter_flow` | boolean | false | | | +| `ttl_ms` | integer | false | | | +| `updated_at` | string | false | | | #### Enumerated Values @@ -10282,6 +10507,7 @@ If the schedule is empty, the user will be updated to use the default schedule.| "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -11927,7 +12153,7 @@ None | `access_token` | string | false | | Access token is the token that authorizes and authenticates the requests. | | `expires_in` | integer | false | | Expires in is the OAuth2 wire format "expires_in" field, which specifies how many seconds later the token expires, relative to an unknown time base approximately around "now". It is the application's responsibility to populate `Expiry` from `ExpiresIn` when required. | |`expiry`|string|false||Expiry is the optional expiration time of the access token. -If zero, TokenSource implementations will reuse the same token forever and RefreshToken or equivalent mechanisms for that TokenSource will not be used.| +If zero, [TokenSource] implementations will reuse the same token forever and RefreshToken or equivalent mechanisms for that TokenSource will not be used.| |`refresh_token`|string|false||Refresh token is a token that's used by the application (as opposed to the user) to refresh the access token if it expires.| |`token_type`|string|false||Token type is the type of token. The Type method returns either this or "Bearer", the default.| diff --git a/docs/reference/api/workspaces.md b/docs/reference/api/workspaces.md index a43a5f2c8fe18..debcb421e02e3 100644 --- a/docs/reference/api/workspaces.md +++ b/docs/reference/api/workspaces.md @@ -67,6 +67,7 @@ of the template will be used. "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -353,6 +354,7 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -664,6 +666,7 @@ of the template will be used. "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -953,6 +956,7 @@ curl -X GET http://coder-server:8080/api/v2/workspaces \ "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -1223,6 +1227,7 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace} \ "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", @@ -1625,6 +1630,7 @@ curl -X PUT http://coder-server:8080/api/v2/workspaces/{workspace}/dormant \ "healthy": false }, "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "is_prebuild": true, "last_used_at": "2019-08-24T14:15:22Z", "latest_app_status": { "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", diff --git a/docs/tutorials/quickstart.md b/docs/tutorials/quickstart.md index 595414fd63ccd..7f684fd28c266 100644 --- a/docs/tutorials/quickstart.md +++ b/docs/tutorials/quickstart.md @@ -116,7 +116,7 @@ is installed. ![Create template](../images/screenshots/create-template.png)_Create template_ -1. Select **Create template**. +1. Select **Save**. 1. After the template is ready, select **Create Workspace**. diff --git a/docs/tutorials/template-from-scratch.md b/docs/tutorials/template-from-scratch.md index 22c4c5392001e..3abfdbf940c10 100644 --- a/docs/tutorials/template-from-scratch.md +++ b/docs/tutorials/template-from-scratch.md @@ -351,11 +351,11 @@ use the Coder CLI. 1. In your web browser, enter your credentials: - Log in to your Coder deployment + ![Log in to your Coder deployment](../images/screenshots/coder-login.png) 1. Copy the session token to the clipboard: - Copy session token + ![Copy session token](../images/templates/coder-session-token.png) 1. Paste it into the CLI: diff --git a/docs/user-guides/desktop/index.md b/docs/user-guides/desktop/index.md index d47c2d2a604de..116f7d4d6de69 100644 --- a/docs/user-guides/desktop/index.md +++ b/docs/user-guides/desktop/index.md @@ -1,13 +1,19 @@ # Coder Desktop Coder Desktop provides seamless access to your remote workspaces without the need to install a CLI or configure manual port forwarding. -Connect to workspace services using simple hostnames like `myworkspace.coder`, launch native applications with one click, and synchronize files between local and remote environments. +Connect to workspace services using simple hostnames like `myworkspace.coder`, launch native applications with one click, +and synchronize files between local and remote environments. -> [!NOTE] -> Coder Desktop requires a Coder deployment running [v2.20.0](https://github.com/coder/coder/releases/tag/v2.20.0) or later. +Coder Desktop requires a Coder deployment running [v2.20.0](https://github.com/coder/coder/releases/tag/v2.20.0) or later. ## Install Coder Desktop +> [!IMPORTANT] +> Coder Desktop can't connect through a corporate VPN. +> +> Due to a [known issue](#coder-desktop-cant-connect-through-another-vpn), +> if your Coder deployment requires that you connect through a corporate VPN, Desktop will timeout when it tries to connect. +
You can install Coder Desktop on macOS or Windows. @@ -113,7 +119,7 @@ Before you can use Coder Desktop, you will need to sign in. ![Coder Desktop on Windows - enable Coder Connect](../../images/user-guides/desktop/coder-desktop-win-enable-coder-connect.png) - This may take a few moments, as Coder Desktop will download the necessary components from the Coder server if they have been updated. + This may take a few moments, because Coder Desktop will download the necessary components from the Coder server if they have been updated. 1. macOS: You may be prompted to enter your password to allow Coder Connect to start. @@ -121,7 +127,26 @@ Before you can use Coder Desktop, you will need to sign in. ## Troubleshooting -Do not install more than one copy of Coder Desktop. To avoid system VPN configuration conflicts, only one copy of `Coder Desktop.app` should exist on your Mac, and it must remain in `/Applications`. +If you encounter an issue with Coder Desktop that is not listed here, file an issue in the GitHub repository for +Coder Desktop for [macOS](https://github.com/coder/coder-desktop-macos/issues) or +[Windows](https://github.com/coder/coder-desktop-windows/issues), in the +[main Coder repository](https://github.com/coder/coder/issues), or consult the +[community on Discord](https://coder.com/chat). + +### Known Issues + +#### macOS: Do not install more than one copy of Coder Desktop + +To avoid system VPN configuration conflicts, only one copy of `Coder Desktop.app` should exist on your Mac, and it must remain in `/Applications`. + +#### Coder Desktop can't connect through another VPN + +If the logged in Coder deployment requires a corporate VPN to connect, Coder Connect can't establish communication +through the VPN, and will time out. + +This is due to known issues with [macOS](https://github.com/coder/coder-desktop-macos/issues/201) and +[Windows](https://github.com/coder/coder-desktop-windows/issues/147) networking. +A resolution is in progress. ## Next Steps diff --git a/enterprise/audit/backends/slog.go b/enterprise/audit/backends/slog.go index c49ebae296ff0..7418070b49c38 100644 --- a/enterprise/audit/backends/slog.go +++ b/enterprise/audit/backends/slog.go @@ -12,38 +12,34 @@ import ( "github.com/coder/coder/v2/enterprise/audit" ) -type slogBackend struct { +type SlogExporter struct { log slog.Logger } -func NewSlog(logger slog.Logger) audit.Backend { - return &slogBackend{log: logger} +func NewSlogExporter(logger slog.Logger) *SlogExporter { + return &SlogExporter{log: logger} } -func (*slogBackend) Decision() audit.FilterDecision { - return audit.FilterDecisionExport -} - -func (b *slogBackend) Export(ctx context.Context, alog database.AuditLog, details audit.BackendDetails) error { +func (e *SlogExporter) ExportStruct(ctx context.Context, data any, message string, extraFields ...slog.Field) error { // We don't use structs.Map because we don't want to recursively convert // fields into maps. When we keep the type information, slog can more // pleasantly format the output. For example, the clean result of // (*NullString).Value() may be printed instead of {String: "foo", Valid: true}. - sfs := structs.Fields(alog) + sfs := structs.Fields(data) var fields []any for _, sf := range sfs { - fields = append(fields, b.fieldToSlog(sf)) + fields = append(fields, e.fieldToSlog(sf)) } - if details.Actor != nil { - fields = append(fields, slog.F("actor", details.Actor)) + for _, field := range extraFields { + fields = append(fields, field) } - b.log.Info(ctx, "audit_log", fields...) + e.log.Info(ctx, message, fields...) return nil } -func (*slogBackend) fieldToSlog(field *structs.Field) slog.Field { +func (*SlogExporter) fieldToSlog(field *structs.Field) slog.Field { val := field.Value() switch ty := field.Value().(type) { @@ -55,3 +51,26 @@ func (*slogBackend) fieldToSlog(field *structs.Field) slog.Field { return slog.F(field.Name(), val) } + +type auditSlogBackend struct { + exporter *SlogExporter +} + +func NewSlog(logger slog.Logger) audit.Backend { + return &auditSlogBackend{ + exporter: NewSlogExporter(logger), + } +} + +func (*auditSlogBackend) Decision() audit.FilterDecision { + return audit.FilterDecisionExport +} + +func (b *auditSlogBackend) Export(ctx context.Context, alog database.AuditLog, details audit.BackendDetails) error { + var extraFields []slog.Field + if details.Actor != nil { + extraFields = append(extraFields, slog.F("actor", details.Actor)) + } + + return b.exporter.ExportStruct(ctx, alog, "audit_log", extraFields...) +} diff --git a/enterprise/audit/backends/slog_test.go b/enterprise/audit/backends/slog_test.go index 5fe3cf70c519a..99be36b3f9d15 100644 --- a/enterprise/audit/backends/slog_test.go +++ b/enterprise/audit/backends/slog_test.go @@ -24,7 +24,7 @@ import ( "github.com/coder/coder/v2/enterprise/audit/backends" ) -func TestSlogBackend(t *testing.T) { +func TestSlogExporter(t *testing.T) { t.Parallel() t.Run("OK", func(t *testing.T) { t.Parallel() @@ -32,30 +32,29 @@ func TestSlogBackend(t *testing.T) { var ( ctx, cancel = context.WithCancel(context.Background()) - sink = &fakeSink{} - logger = slog.Make(sink) - backend = backends.NewSlog(logger) + sink = &fakeSink{} + logger = slog.Make(sink) + exporter = backends.NewSlogExporter(logger) alog = audittest.RandomLog() ) defer cancel() - err := backend.Export(ctx, alog, audit.BackendDetails{}) + err := exporter.ExportStruct(ctx, alog, "audit_log") require.NoError(t, err) require.Len(t, sink.entries, 1) require.Equal(t, sink.entries[0].Message, "audit_log") require.Len(t, sink.entries[0].Fields, len(structs.Fields(alog))) }) - t.Run("FormatsCorrectly", func(t *testing.T) { t.Parallel() var ( ctx, cancel = context.WithCancel(context.Background()) - buf = bytes.NewBuffer(nil) - logger = slog.Make(slogjson.Sink(buf)) - backend = backends.NewSlog(logger) + buf = bytes.NewBuffer(nil) + logger = slog.Make(slogjson.Sink(buf)) + exporter = backends.NewSlogExporter(logger) _, inet, _ = net.ParseCIDR("127.0.0.1/32") alog = database.AuditLog{ @@ -81,11 +80,11 @@ func TestSlogBackend(t *testing.T) { ) defer cancel() - err := backend.Export(ctx, alog, audit.BackendDetails{Actor: &audit.Actor{ + err := exporter.ExportStruct(ctx, alog, "audit_log", slog.F("actor", &audit.Actor{ ID: uuid.UUID{2}, Username: "coadler", Email: "doug@coder.com", - }}) + })) require.NoError(t, err) logger.Sync() diff --git a/enterprise/audit/table.go b/enterprise/audit/table.go index 2a563946dc347..6c1f907abfa00 100644 --- a/enterprise/audit/table.go +++ b/enterprise/audit/table.go @@ -27,8 +27,6 @@ var AuditActionMap = map[string][]codersdk.AuditAction{ "Group": {codersdk.AuditActionCreate, codersdk.AuditActionWrite, codersdk.AuditActionDelete}, "APIKey": {codersdk.AuditActionLogin, codersdk.AuditActionLogout, codersdk.AuditActionRegister, codersdk.AuditActionCreate, codersdk.AuditActionDelete}, "License": {codersdk.AuditActionCreate, codersdk.AuditActionDelete}, - "WorkspaceAgent": {codersdk.AuditActionConnect, codersdk.AuditActionDisconnect}, - "WorkspaceApp": {codersdk.AuditActionOpen, codersdk.AuditActionClose}, } type Action string @@ -343,63 +341,6 @@ var auditableResourcesTypes = map[any]map[string]Action{ "field": ActionTrack, "mapping": ActionTrack, }, - &database.WorkspaceAgent{}: { - "id": ActionIgnore, - "created_at": ActionIgnore, - "updated_at": ActionIgnore, - "name": ActionIgnore, - "first_connected_at": ActionIgnore, - "last_connected_at": ActionIgnore, - "disconnected_at": ActionIgnore, - "resource_id": ActionIgnore, - "auth_token": ActionIgnore, - "auth_instance_id": ActionIgnore, - "architecture": ActionIgnore, - "environment_variables": ActionIgnore, - "operating_system": ActionIgnore, - "instance_metadata": ActionIgnore, - "resource_metadata": ActionIgnore, - "directory": ActionIgnore, - "version": ActionIgnore, - "last_connected_replica_id": ActionIgnore, - "connection_timeout_seconds": ActionIgnore, - "troubleshooting_url": ActionIgnore, - "motd_file": ActionIgnore, - "lifecycle_state": ActionIgnore, - "expanded_directory": ActionIgnore, - "logs_length": ActionIgnore, - "logs_overflowed": ActionIgnore, - "started_at": ActionIgnore, - "ready_at": ActionIgnore, - "subsystems": ActionIgnore, - "display_apps": ActionIgnore, - "api_version": ActionIgnore, - "display_order": ActionIgnore, - "parent_id": ActionIgnore, - "api_key_scope": ActionIgnore, - "deleted": ActionIgnore, - }, - &database.WorkspaceApp{}: { - "id": ActionIgnore, - "created_at": ActionIgnore, - "agent_id": ActionIgnore, - "display_name": ActionIgnore, - "icon": ActionIgnore, - "command": ActionIgnore, - "url": ActionIgnore, - "healthcheck_url": ActionIgnore, - "healthcheck_interval": ActionIgnore, - "healthcheck_threshold": ActionIgnore, - "health": ActionIgnore, - "subdomain": ActionIgnore, - "sharing_level": ActionIgnore, - "slug": ActionIgnore, - "external": ActionIgnore, - "display_group": ActionIgnore, - "display_order": ActionIgnore, - "hidden": ActionIgnore, - "open_in": ActionIgnore, - }, } // auditMap converts a map of struct pointers to a map of struct names as diff --git a/enterprise/cli/server.go b/enterprise/cli/server.go index 1bf4f31a8506b..3b1fd63ab1c4c 100644 --- a/enterprise/cli/server.go +++ b/enterprise/cli/server.go @@ -87,6 +87,7 @@ func (r *RootCmd) Server(_ func()) *serpent.Command { o := &coderd.Options{ Options: options, AuditLogging: true, + ConnectionLogging: true, BrowserOnly: options.DeploymentValues.BrowserOnly.Value(), SCIMAPIKey: []byte(options.DeploymentValues.SCIMAPIKey.Value()), RBAC: true, diff --git a/enterprise/coderd/coderd.go b/enterprise/coderd/coderd.go index bd128b25e2ef4..0d176567713a2 100644 --- a/enterprise/coderd/coderd.go +++ b/enterprise/coderd/coderd.go @@ -22,6 +22,7 @@ import ( agplportsharing "github.com/coder/coder/v2/coderd/portsharing" agplprebuilds "github.com/coder/coder/v2/coderd/prebuilds" "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/enterprise/coderd/connectionlog" "github.com/coder/coder/v2/enterprise/coderd/enidpsync" "github.com/coder/coder/v2/enterprise/coderd/portsharing" @@ -36,6 +37,7 @@ import ( "github.com/coder/coder/v2/coderd" agplaudit "github.com/coder/coder/v2/coderd/audit" + agplconnectionlog "github.com/coder/coder/v2/coderd/connectionlog" agpldbauthz "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/healthcheck" @@ -123,6 +125,13 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { options.IDPSync = enidpsync.NewSync(options.Logger, options.RuntimeConfig, options.Entitlements, idpsync.FromDeploymentValues(options.DeploymentValues)) } + if options.ConnectionLogger == nil { + options.ConnectionLogger = connectionlog.NewConnectionLogger( + connectionlog.NewDBBackend(options.Database), + connectionlog.NewSlogBackend(options.Logger), + ) + } + api := &API{ ctx: ctx, cancel: cancelFunc, @@ -217,6 +226,13 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { r.Use(apiKeyMiddleware) r.Get("/", api.replicas) }) + r.Route("/connectionlog", func(r chi.Router) { + r.Use( + apiKeyMiddleware, + api.RequireFeatureMW(codersdk.FeatureConnectionLog), + ) + r.Get("/", api.connectionLogs) + }) r.Route("/licenses", func(r chi.Router) { r.Use(apiKeyMiddleware) r.Post("/refresh-entitlements", api.postRefreshEntitlements) @@ -593,8 +609,9 @@ func New(ctx context.Context, options *Options) (_ *API, err error) { type Options struct { *coderd.Options - RBAC bool - AuditLogging bool + RBAC bool + AuditLogging bool + ConnectionLogging bool // Whether to block non-browser connections. BrowserOnly bool SCIMAPIKey []byte @@ -695,6 +712,7 @@ func (api *API) updateEntitlements(ctx context.Context) error { ctx, api.Database, len(agedReplicas), len(api.ExternalAuthConfigs), api.LicenseKeys, map[codersdk.FeatureName]bool{ codersdk.FeatureAuditLog: api.AuditLogging, + codersdk.FeatureConnectionLog: api.ConnectionLogging, codersdk.FeatureBrowserOnly: api.BrowserOnly, codersdk.FeatureSCIM: len(api.SCIMAPIKey) != 0, codersdk.FeatureMultipleExternalAuth: len(api.ExternalAuthConfigs) > 1, @@ -733,6 +751,14 @@ func (api *API) updateEntitlements(ctx context.Context) error { api.AGPL.Auditor.Store(&auditor) } + if initial, changed, enabled := featureChanged(codersdk.FeatureConnectionLog); shouldUpdate(initial, changed, enabled) { + connectionLogger := agplconnectionlog.NewNop() + if enabled { + connectionLogger = api.AGPL.Options.ConnectionLogger + } + api.AGPL.ConnectionLogger.Store(&connectionLogger) + } + if initial, changed, enabled := featureChanged(codersdk.FeatureBrowserOnly); shouldUpdate(initial, changed, enabled) { var handler func(rw http.ResponseWriter) bool if enabled { diff --git a/enterprise/coderd/coderdenttest/coderdenttest.go b/enterprise/coderd/coderdenttest/coderdenttest.go index e4088e83d09f5..54dcb9c582628 100644 --- a/enterprise/coderd/coderdenttest/coderdenttest.go +++ b/enterprise/coderd/coderdenttest/coderdenttest.go @@ -59,6 +59,7 @@ func init() { type Options struct { *coderdtest.Options + ConnectionLogging bool AuditLogging bool BrowserOnly bool EntitlementsUpdateInterval time.Duration @@ -100,6 +101,7 @@ func NewWithAPI(t *testing.T, options *Options) ( setHandler, cancelFunc, serverURL, oop := coderdtest.NewOptions(t, options.Options) coderAPI, err := coderd.New(context.Background(), &coderd.Options{ RBAC: true, + ConnectionLogging: options.ConnectionLogging, AuditLogging: options.AuditLogging, BrowserOnly: options.BrowserOnly, SCIMAPIKey: options.SCIMAPIKey, diff --git a/enterprise/coderd/connectionlog.go b/enterprise/coderd/connectionlog.go new file mode 100644 index 0000000000000..21f0420f0652d --- /dev/null +++ b/enterprise/coderd/connectionlog.go @@ -0,0 +1,167 @@ +package coderd + +import ( + "net/http" + "net/netip" + + "github.com/google/uuid" + + agpl "github.com/coder/coder/v2/coderd" + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" + "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/httpapi" + "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/searchquery" + "github.com/coder/coder/v2/codersdk" +) + +// @Summary Get connection logs +// @ID get-connection-logs +// @Security CoderSessionToken +// @Produce json +// @Tags Enterprise +// @Param q query string false "Search query" +// @Param limit query int true "Page limit" +// @Param offset query int false "Page offset" +// @Success 200 {object} codersdk.ConnectionLogResponse +// @Router /connectionlog [get] +func (api *API) connectionLogs(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + apiKey := httpmw.APIKey(r) + + page, ok := agpl.ParsePagination(rw, r) + if !ok { + return + } + + queryStr := r.URL.Query().Get("q") + filter, countFilter, errs := searchquery.ConnectionLogs(ctx, api.Database, queryStr, apiKey) + if len(errs) > 0 { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid connection search query.", + Validations: errs, + }) + return + } + // #nosec G115 - Safe conversion as pagination offset is expected to be within int32 range + filter.OffsetOpt = int32(page.Offset) + // #nosec G115 - Safe conversion as pagination limit is expected to be within int32 range + filter.LimitOpt = int32(page.Limit) + + count, err := api.Database.CountConnectionLogs(ctx, countFilter) + if dbauthz.IsNotAuthorizedError(err) { + httpapi.Forbidden(rw) + return + } + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + if count == 0 { + httpapi.Write(ctx, rw, http.StatusOK, codersdk.ConnectionLogResponse{ + ConnectionLogs: []codersdk.ConnectionLog{}, + Count: 0, + }) + return + } + + dblogs, err := api.Database.GetConnectionLogsOffset(ctx, filter) + if dbauthz.IsNotAuthorizedError(err) { + httpapi.Forbidden(rw) + return + } + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + httpapi.Write(ctx, rw, http.StatusOK, codersdk.ConnectionLogResponse{ + ConnectionLogs: convertConnectionLogs(dblogs), + Count: count, + }) +} + +func convertConnectionLogs(dblogs []database.GetConnectionLogsOffsetRow) []codersdk.ConnectionLog { + clogs := make([]codersdk.ConnectionLog, 0, len(dblogs)) + + for _, dblog := range dblogs { + clogs = append(clogs, convertConnectionLog(dblog)) + } + return clogs +} + +func convertConnectionLog(dblog database.GetConnectionLogsOffsetRow) codersdk.ConnectionLog { + ip, _ := netip.AddrFromSlice(dblog.ConnectionLog.Ip.IPNet.IP) + + var user *codersdk.User + if dblog.ConnectionLog.UserID.Valid { + sdkUser := db2sdk.User(database.User{ + ID: dblog.ConnectionLog.UserID.UUID, + Email: dblog.UserEmail.String, + Username: dblog.UserUsername.String, + CreatedAt: dblog.UserCreatedAt.Time, + UpdatedAt: dblog.UserUpdatedAt.Time, + Status: dblog.UserStatus.UserStatus, + RBACRoles: dblog.UserRoles, + LoginType: dblog.UserLoginType.LoginType, + AvatarURL: dblog.UserAvatarUrl.String, + Deleted: dblog.UserDeleted.Bool, + LastSeenAt: dblog.UserLastSeenAt.Time, + QuietHoursSchedule: dblog.UserQuietHoursSchedule.String, + Name: dblog.UserName.String, + }, []uuid.UUID{}) + user = &sdkUser + } + + var ( + webInfo *codersdk.ConnectionLogWebInfo + sshInfo *codersdk.ConnectionLogSSHInfo + ) + + switch dblog.ConnectionLog.Type { + case database.ConnectionTypeWorkspaceApp, + database.ConnectionTypePortForwarding: + webInfo = &codersdk.ConnectionLogWebInfo{ + UserAgent: dblog.ConnectionLog.UserAgent.String, + User: user, + SlugOrPort: dblog.ConnectionLog.SlugOrPort.String, + StatusCode: dblog.ConnectionLog.Code.Int32, + } + case database.ConnectionTypeSsh, + database.ConnectionTypeReconnectingPty, + database.ConnectionTypeJetbrains, + database.ConnectionTypeVscode: + sshInfo = &codersdk.ConnectionLogSSHInfo{ + ConnectionID: dblog.ConnectionLog.ConnectionID.UUID, + DisconnectReason: dblog.ConnectionLog.DisconnectReason.String, + } + if dblog.ConnectionLog.DisconnectTime.Valid { + sshInfo.DisconnectTime = &dblog.ConnectionLog.DisconnectTime.Time + } + if dblog.ConnectionLog.Code.Valid { + sshInfo.ExitCode = &dblog.ConnectionLog.Code.Int32 + } + } + + return codersdk.ConnectionLog{ + ID: dblog.ConnectionLog.ID, + ConnectTime: dblog.ConnectionLog.ConnectTime, + Organization: codersdk.MinimalOrganization{ + ID: dblog.ConnectionLog.OrganizationID, + Name: dblog.OrganizationName, + DisplayName: dblog.OrganizationDisplayName, + Icon: dblog.OrganizationIcon, + }, + WorkspaceOwnerID: dblog.ConnectionLog.WorkspaceOwnerID, + WorkspaceOwnerUsername: dblog.WorkspaceOwnerUsername, + WorkspaceID: dblog.ConnectionLog.WorkspaceID, + WorkspaceName: dblog.ConnectionLog.WorkspaceName, + AgentName: dblog.ConnectionLog.AgentName, + Type: codersdk.ConnectionType(dblog.ConnectionLog.Type), + IP: ip, + WebInfo: webInfo, + SSHInfo: sshInfo, + } +} diff --git a/enterprise/coderd/connectionlog/connectionlog.go b/enterprise/coderd/connectionlog/connectionlog.go new file mode 100644 index 0000000000000..e428a13baf183 --- /dev/null +++ b/enterprise/coderd/connectionlog/connectionlog.go @@ -0,0 +1,66 @@ +package connectionlog + +import ( + "context" + + "github.com/hashicorp/go-multierror" + + "cdr.dev/slog" + agpl "github.com/coder/coder/v2/coderd/connectionlog" + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" + auditbackends "github.com/coder/coder/v2/enterprise/audit/backends" +) + +type Backend interface { + Upsert(ctx context.Context, clog database.UpsertConnectionLogParams) error +} + +func NewConnectionLogger(backends ...Backend) agpl.ConnectionLogger { + return &connectionLogger{ + backends: backends, + } +} + +type connectionLogger struct { + backends []Backend +} + +func (c *connectionLogger) Upsert(ctx context.Context, clog database.UpsertConnectionLogParams) error { + var errs error + for _, backend := range c.backends { + err := backend.Upsert(ctx, clog) + if err != nil { + errs = multierror.Append(errs, err) + } + } + return errs +} + +type dbBackend struct { + db database.Store +} + +func NewDBBackend(db database.Store) Backend { + return &dbBackend{db: db} +} + +func (b *dbBackend) Upsert(ctx context.Context, clog database.UpsertConnectionLogParams) error { + //nolint:gocritic // This is the Connection Logger + _, err := b.db.UpsertConnectionLog(dbauthz.AsConnectionLogger(ctx), clog) + return err +} + +type connectionSlogBackend struct { + exporter *auditbackends.SlogExporter +} + +func NewSlogBackend(logger slog.Logger) Backend { + return &connectionSlogBackend{ + exporter: auditbackends.NewSlogExporter(logger), + } +} + +func (b *connectionSlogBackend) Upsert(ctx context.Context, clog database.UpsertConnectionLogParams) error { + return b.exporter.ExportStruct(ctx, clog, "connection_log") +} diff --git a/enterprise/coderd/connectionlog_test.go b/enterprise/coderd/connectionlog_test.go new file mode 100644 index 0000000000000..59ff1b780e7b6 --- /dev/null +++ b/enterprise/coderd/connectionlog_test.go @@ -0,0 +1,255 @@ +package coderd_test + +import ( + "context" + "database/sql" + "fmt" + "net" + "testing" + "time" + + "github.com/google/uuid" + "github.com/sqlc-dev/pqtype" + "github.com/stretchr/testify/require" + + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbgen" + "github.com/coder/coder/v2/coderd/database/dbtime" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/enterprise/coderd/coderdenttest" + "github.com/coder/coder/v2/enterprise/coderd/license" +) + +func TestConnectionLogs(t *testing.T) { + t.Parallel() + + createWorkspace := func(t *testing.T, db database.Store) database.WorkspaceTable { + u := dbgen.User(t, db, database.User{}) + o := dbgen.Organization(t, db, database.Organization{}) + tpl := dbgen.Template(t, db, database.Template{ + OrganizationID: o.ID, + CreatedBy: u.ID, + }) + return dbgen.Workspace(t, db, database.WorkspaceTable{ + ID: uuid.New(), + OwnerID: u.ID, + OrganizationID: o.ID, + AutomaticUpdates: database.AutomaticUpdatesNever, + TemplateID: tpl.ID, + }) + } + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client, db, _ := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ + ConnectionLogging: true, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureAuditLog: 1, + codersdk.FeatureConnectionLog: 1, + }, + }, + }) + + ws := createWorkspace(t, db) + _ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + + logs, err := client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{}) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.EqualValues(t, 1, logs.Count) + require.Equal(t, codersdk.ConnectionTypeSSH, logs.ConnectionLogs[0].Type) + }) + + t.Run("Empty", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client, _, _ := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ + ConnectionLogging: true, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureAuditLog: 1, + codersdk.FeatureConnectionLog: 1, + }, + }, + }) + + logs, err := client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{}) + require.NoError(t, err) + require.EqualValues(t, 0, logs.Count) + require.Len(t, logs.ConnectionLogs, 0) + }) + + t.Run("ByOrganizationIDAndName", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client, db, _ := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ + ConnectionLogging: true, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureAuditLog: 1, + codersdk.FeatureConnectionLog: 1, + }, + }, + }) + + org := dbgen.Organization(t, db, database.Organization{}) + ws := createWorkspace(t, db) + _ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: org.ID, + WorkspaceOwnerID: ws.OwnerID, + }) + _ = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + }) + + // By name + logs, err := client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{ + SearchQuery: fmt.Sprintf("organization:%s", org.Name), + }) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.Equal(t, org.ID, logs.ConnectionLogs[0].Organization.ID) + + // By ID + logs, err = client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{ + SearchQuery: fmt.Sprintf("organization:%s", ws.OrganizationID), + }) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.EqualValues(t, 1, logs.Count) + require.Equal(t, ws.OrganizationID, logs.ConnectionLogs[0].Organization.ID) + }) + + t.Run("WebInfo", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client, db, _ := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ + ConnectionLogging: true, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureAuditLog: 1, + codersdk.FeatureConnectionLog: 1, + }, + }, + }) + + now := dbtime.Now() + connID := uuid.New() + ws := createWorkspace(t, db) + clog := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-time.Hour), + Type: database.ConnectionTypeWorkspaceApp, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + ConnectionID: uuid.NullUUID{UUID: connID, Valid: true}, + UserAgent: sql.NullString{String: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36", Valid: true}, + UserID: uuid.NullUUID{UUID: ws.OwnerID, Valid: true}, + SlugOrPort: sql.NullString{String: "code-server", Valid: true}, + }) + + logs, err := client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{}) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.EqualValues(t, 1, logs.Count) + require.NotNil(t, logs.ConnectionLogs[0].WebInfo) + require.Equal(t, clog.SlugOrPort.String, logs.ConnectionLogs[0].WebInfo.SlugOrPort) + require.Equal(t, clog.UserAgent.String, logs.ConnectionLogs[0].WebInfo.UserAgent) + require.Equal(t, ws.OwnerID, logs.ConnectionLogs[0].WebInfo.User.ID) + }) + + t.Run("SSHInfo", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client, db, _ := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ + ConnectionLogging: true, + LicenseOptions: &coderdenttest.LicenseOptions{ + Features: license.Features{ + codersdk.FeatureAuditLog: 1, + codersdk.FeatureConnectionLog: 1, + }, + }, + }) + + now := dbtime.Now() + connID := uuid.New() + ws := createWorkspace(t, db) + clog := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now.Add(-time.Hour), + Type: database.ConnectionTypeSsh, + WorkspaceID: ws.ID, + OrganizationID: ws.OrganizationID, + WorkspaceOwnerID: ws.OwnerID, + ConnectionID: uuid.NullUUID{UUID: connID, Valid: true}, + }) + + logs, err := client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{}) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.NotNil(t, logs.ConnectionLogs[0].SSHInfo) + require.Empty(t, logs.ConnectionLogs[0].WebInfo) + require.Empty(t, logs.ConnectionLogs[0].SSHInfo.ExitCode) + require.Empty(t, logs.ConnectionLogs[0].SSHInfo.DisconnectTime) + require.Empty(t, logs.ConnectionLogs[0].SSHInfo.DisconnectReason) + + // Mark log as closed + updatedClog := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{ + Time: now, + OrganizationID: clog.OrganizationID, + Type: clog.Type, + WorkspaceID: clog.WorkspaceID, + WorkspaceOwnerID: clog.WorkspaceOwnerID, + WorkspaceName: clog.WorkspaceName, + AgentName: clog.AgentName, + Code: sql.NullInt32{ + Int32: 0, + Valid: false, + }, + Ip: pqtype.Inet{IPNet: net.IPNet{ + IP: net.ParseIP("192.168.0.1"), + Mask: net.CIDRMask(8, 32), + }, Valid: true}, + + ConnectionID: clog.ConnectionID, + ConnectionStatus: database.ConnectionStatusDisconnected, + DisconnectReason: sql.NullString{ + String: "example close reason", + Valid: true, + }, + }) + + logs, err = client.ConnectionLogs(ctx, codersdk.ConnectionLogsRequest{}) + require.NoError(t, err) + + require.Len(t, logs.ConnectionLogs, 1) + require.EqualValues(t, 1, logs.Count) + require.NotNil(t, logs.ConnectionLogs[0].SSHInfo) + require.Nil(t, logs.ConnectionLogs[0].WebInfo) + require.Equal(t, codersdk.ConnectionTypeSSH, logs.ConnectionLogs[0].Type) + require.Equal(t, clog.ConnectionID.UUID, logs.ConnectionLogs[0].SSHInfo.ConnectionID) + require.True(t, logs.ConnectionLogs[0].SSHInfo.DisconnectTime.Equal(now)) + require.Equal(t, updatedClog.DisconnectReason.String, logs.ConnectionLogs[0].SSHInfo.DisconnectReason) + }) +} diff --git a/enterprise/coderd/dynamicparameters_test.go b/enterprise/coderd/dynamicparameters_test.go index e13d370a059ad..94a4158dc8354 100644 --- a/enterprise/coderd/dynamicparameters_test.go +++ b/enterprise/coderd/dynamicparameters_test.go @@ -338,7 +338,6 @@ func TestDynamicParameterBuild(t *testing.T) { bld, err := templateAdmin.CreateWorkspaceBuild(ctx, wrk.ID, codersdk.CreateWorkspaceBuildRequest{ TemplateVersionID: immutable.ID, // Use the new template version with the immutable parameter Transition: codersdk.WorkspaceTransitionDelete, - DryRun: false, }) require.NoError(t, err) coderdtest.AwaitWorkspaceBuildJobCompleted(t, templateAdmin, bld.ID) @@ -354,6 +353,75 @@ func TestDynamicParameterBuild(t *testing.T) { require.NoError(t, err) require.Equal(t, wrk.ID, deleted.ID, "workspace should be deleted") }) + + t.Run("PreviouslyImmutable", func(t *testing.T) { + // Ok this is a weird test to document how things are working. + // What if a parameter flips it's immutability based on a value? + // The current behavior is to source immutability from the new state. + // So the value is allowed to be changed. + t.Parallel() + + ctx := testutil.Context(t, testutil.WaitShort) + // Start with a new template that has 1 parameter that is immutable + immutable, _ := coderdtest.DynamicParameterTemplate(t, templateAdmin, orgID, coderdtest.DynamicParameterTemplateParams{ + MainTF: "# PreviouslyImmutable\n" + string(must(os.ReadFile("testdata/parameters/dynamicimmutable/main.tf"))), + }) + + // Create the workspace with the immutable parameter + wrk, err := templateAdmin.CreateUserWorkspace(ctx, codersdk.Me, codersdk.CreateWorkspaceRequest{ + TemplateID: immutable.ID, + Name: coderdtest.RandomUsername(t), + RichParameterValues: []codersdk.WorkspaceBuildParameter{ + {Name: "isimmutable", Value: "true"}, + {Name: "immutable", Value: "coder"}, + }, + }) + require.NoError(t, err) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, templateAdmin, wrk.LatestBuild.ID) + + // Try new values + _, err = templateAdmin.CreateWorkspaceBuild(ctx, wrk.ID, codersdk.CreateWorkspaceBuildRequest{ + Transition: codersdk.WorkspaceTransitionStart, + RichParameterValues: []codersdk.WorkspaceBuildParameter{ + {Name: "isimmutable", Value: "false"}, + {Name: "immutable", Value: "not-coder"}, + }, + }) + require.NoError(t, err) + }) + + t.Run("PreviouslyMutable", func(t *testing.T) { + // The value cannot be changed because it becomes immutable. + t.Parallel() + + ctx := testutil.Context(t, testutil.WaitShort) + immutable, _ := coderdtest.DynamicParameterTemplate(t, templateAdmin, orgID, coderdtest.DynamicParameterTemplateParams{ + MainTF: "# PreviouslyMutable\n" + string(must(os.ReadFile("testdata/parameters/dynamicimmutable/main.tf"))), + }) + + // Create the workspace with the mutable parameter + wrk, err := templateAdmin.CreateUserWorkspace(ctx, codersdk.Me, codersdk.CreateWorkspaceRequest{ + TemplateID: immutable.ID, + Name: coderdtest.RandomUsername(t), + RichParameterValues: []codersdk.WorkspaceBuildParameter{ + {Name: "isimmutable", Value: "false"}, + {Name: "immutable", Value: "coder"}, + }, + }) + require.NoError(t, err) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, templateAdmin, wrk.LatestBuild.ID) + + // Switch it to immutable, which breaks the validation + _, err = templateAdmin.CreateWorkspaceBuild(ctx, wrk.ID, codersdk.CreateWorkspaceBuildRequest{ + Transition: codersdk.WorkspaceTransitionStart, + RichParameterValues: []codersdk.WorkspaceBuildParameter{ + {Name: "isimmutable", Value: "true"}, + {Name: "immutable", Value: "not-coder"}, + }, + }) + require.Error(t, err) + require.ErrorContains(t, err, "is not mutable") + }) }) } diff --git a/enterprise/coderd/license/license_test.go b/enterprise/coderd/license/license_test.go index d23dc617817f5..5ec28ffa9c294 100644 --- a/enterprise/coderd/license/license_test.go +++ b/enterprise/coderd/license/license_test.go @@ -655,6 +655,7 @@ func TestLicenseEntitlements(t *testing.T) { // maybe some should be moved to "AlwaysEnabled" instead. defaultEnablements := map[codersdk.FeatureName]bool{ codersdk.FeatureAuditLog: true, + codersdk.FeatureConnectionLog: true, codersdk.FeatureBrowserOnly: true, codersdk.FeatureSCIM: true, codersdk.FeatureMultipleExternalAuth: true, diff --git a/enterprise/coderd/testdata/parameters/dynamicimmutable/main.tf b/enterprise/coderd/testdata/parameters/dynamicimmutable/main.tf new file mode 100644 index 0000000000000..08bdd3336faa9 --- /dev/null +++ b/enterprise/coderd/testdata/parameters/dynamicimmutable/main.tf @@ -0,0 +1,23 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + } + } +} + +data "coder_workspace_owner" "me" {} + +data "coder_parameter" "isimmutable" { + name = "isimmutable" + type = "bool" + mutable = true + default = "true" +} + +data "coder_parameter" "immutable" { + name = "immutable" + type = "string" + mutable = data.coder_parameter.isimmutable.value == "false" + default = "Hello World" +} diff --git a/go.mod b/go.mod index 886515cf29dbf..a6d64e1bf5383 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,7 @@ require ( github.com/go-logr/logr v1.4.3 github.com/go-playground/validator/v10 v10.27.0 github.com/gofrs/flock v0.12.0 - github.com/gohugoio/hugo v0.147.0 + github.com/gohugoio/hugo v0.148.1 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/golang-migrate/migrate/v4 v4.18.1 github.com/gomarkdown/markdown v0.0.0-20240930133441-72d49d9543d8 @@ -198,18 +198,18 @@ require ( go.uber.org/goleak v1.3.1-0.20240429205332-517bace7cc29 go.uber.org/mock v0.5.0 go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516 - golang.org/x/crypto v0.39.0 + golang.org/x/crypto v0.40.0 golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 - golang.org/x/mod v0.25.0 - golang.org/x/net v0.41.0 - golang.org/x/oauth2 v0.29.0 - golang.org/x/sync v0.15.0 - golang.org/x/sys v0.33.0 - golang.org/x/term v0.32.0 - golang.org/x/text v0.26.0 - golang.org/x/tools v0.33.0 + golang.org/x/mod v0.26.0 + golang.org/x/net v0.42.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.16.0 + golang.org/x/sys v0.34.0 + golang.org/x/term v0.33.0 + golang.org/x/text v0.27.0 + golang.org/x/tools v0.34.0 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da - google.golang.org/api v0.231.0 + google.golang.org/api v0.241.0 google.golang.org/grpc v1.73.0 google.golang.org/protobuf v1.36.6 gopkg.in/DataDog/dd-trace-go.v1 v1.74.0 @@ -222,10 +222,10 @@ require ( ) require ( - cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/logging v1.13.0 // indirect - cloud.google.com/go/longrunning v0.6.4 // indirect + cloud.google.com/go/longrunning v0.6.7 // indirect dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -251,14 +251,14 @@ require ( github.com/agext/levenshtein v1.2.3 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect github.com/akutz/memconn v0.1.0 // indirect - github.com/alecthomas/chroma/v2 v2.17.0 // indirect + github.com/alecthomas/chroma/v2 v2.19.0 // indirect github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 + github.com/aws/aws-sdk-go-v2 v1.36.4 github.com/aws/aws-sdk-go-v2/config v1.29.14 github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect @@ -329,7 +329,7 @@ require ( github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect @@ -341,7 +341,7 @@ require ( github.com/hashicorp/go-terraform-address v0.0.0-20240523040243-ccea9d309e0c github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/hcl v1.0.1-vault-7 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 + github.com/hashicorp/hcl/v2 v2.24.0 github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-plugin-go v0.27.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect @@ -383,7 +383,7 @@ require ( github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/niklasfasching/go-org v1.7.0 // indirect + github.com/niklasfasching/go-org v1.8.0 // indirect github.com/oklog/run v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect @@ -406,7 +406,7 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/cast v1.8.0 // indirect + github.com/spf13/cast v1.9.2 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect github.com/tadvi/systray v0.0.0-20190226123456-11a2b8fa57af // indirect github.com/tailscale/certstore v0.1.1-0.20220316223106-78d6e1c49d8d // indirect @@ -417,8 +417,7 @@ require ( github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 github.com/tchap/go-patricia/v2 v2.3.2 // indirect github.com/tcnksm/go-httpstat v0.2.0 // indirect - github.com/tdewolff/parse/v2 v2.7.15 // indirect - github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739 // indirect + github.com/tdewolff/parse/v2 v2.8.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tinylib/msgp v1.2.5 // indirect @@ -436,7 +435,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - github.com/yuin/goldmark v1.7.10 // indirect + github.com/yuin/goldmark v1.7.12 // indirect github.com/yuin/goldmark-emoji v1.0.6 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zclconf/go-cty v1.16.3 @@ -447,20 +446,20 @@ require ( go.opentelemetry.io/collector/pdata/pprofile v0.121.0 // indirect go.opentelemetry.io/collector/semconv v0.123.0 // indirect go.opentelemetry.io/contrib v1.19.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect - golang.org/x/time v0.11.0 // indirect + golang.org/x/time v0.12.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect howett.net/plist v1.0.0 // indirect @@ -485,14 +484,14 @@ require ( github.com/coder/aisdk-go v0.0.9 github.com/coder/preview v1.0.3-0.20250701142654-c3d6e86b9393 github.com/fsnotify/fsnotify v1.9.0 - github.com/mark3labs/mcp-go v0.32.0 + github.com/mark3labs/mcp-go v0.33.0 ) require ( cel.dev/expr v0.23.0 // indirect cloud.google.com/go v0.120.0 // indirect - cloud.google.com/go/iam v1.4.1 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.50.0 // indirect git.sr.ht/~jackmordaunt/go-toast v1.1.2 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.64.2 // indirect @@ -533,7 +532,7 @@ require ( github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect google.golang.org/genai v1.12.0 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect diff --git a/go.sum b/go.sum index ded3464d585b3..9ec986a7ed7ff 100644 --- a/go.sum +++ b/go.sum @@ -101,8 +101,8 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= -cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -319,8 +319,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.4.1 h1:cFC25Nv+u5BkTR/BT1tXdoF2daiVbZ1RLx2eqfQ9RMM= -cloud.google.com/go/iam v1.4.1/go.mod h1:2vUEJpUG3Q9p2UdsyksaKpDzlwOrnMzS30isdReIcLM= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -355,8 +355,8 @@ cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhX cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.6.4 h1:3tyw9rO3E2XVXzSApn1gyEEnH2K9SynNQjMlBi3uHLg= -cloud.google.com/go/longrunning v0.6.4/go.mod h1:ttZpLCe6e7EXvn9OxpBRx7kZEB0efv8yBO6YnVMfhJs= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -380,8 +380,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -565,8 +565,8 @@ cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -754,8 +754,8 @@ github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.36.4 h1:GySzjhVvx0ERP6eyfAbAuAXLtAda5TEy19E5q5W8I9E= +github.com/aws/aws-sdk-go-v2 v1.36.4/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= @@ -796,8 +796,8 @@ github.com/bep/clocks v0.5.0 h1:hhvKVGLPQWRVsBP/UB7ErrHYIO42gINVbvqxvYTPVps= github.com/bep/clocks v0.5.0/go.mod h1:SUq3q+OOq41y2lRQqH5fsOoxN8GbxSiT6jvoVVLCVhU= github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= -github.com/bep/gitmap v1.6.0 h1:sDuQMm9HoTL0LtlrfxjbjgAg2wHQd4nkMup2FInYzhA= -github.com/bep/gitmap v1.6.0/go.mod h1:n+3W1f/rot2hynsqEGxGMErPRgT41n9CkGuzPvz9cIw= +github.com/bep/gitmap v1.9.0 h1:2pyb1ex+cdwF6c4tsrhEgEKfyNfxE34d5K+s2sa9byc= +github.com/bep/gitmap v1.9.0/go.mod h1:Juq6e1qqCRvc1W7nzgadPGI9IGV13ZncEebg5atj4Vo= github.com/bep/goat v0.5.0 h1:S8jLXHCVy/EHIoCY+btKkmcxcXFd34a0Q63/0D4TKeA= github.com/bep/goat v0.5.0/go.mod h1:Md9x7gRxiWKs85yHlVTvHQw9rg86Bm+Y4SuYE8CTH7c= github.com/bep/godartsass/v2 v2.5.0 h1:tKRvwVdyjCIr48qgtLa4gHEdtRkPF8H1OeEhJAEv7xg= @@ -1041,8 +1041,8 @@ github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6 github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/esiqveland/notify v0.13.3 h1:QCMw6o1n+6rl+oLUfg8P1IIDSFsDEb2WlXvVvIJbI/o= github.com/esiqveland/notify v0.13.3/go.mod h1:hesw/IRYTO0x99u1JPweAl4+5mwXJibQVUcP0Iu5ORE= -github.com/evanw/esbuild v0.25.3 h1:4JKyUsm/nHDhpxis4IyWXAi8GiyTwG1WdEp6OhGVE8U= -github.com/evanw/esbuild v0.25.3/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/evanw/esbuild v0.25.6 h1:LBEfbUJ7Krynyks4JzBjLS2sWUxrD9zcQEKnrscEHqA= +github.com/evanw/esbuild v0.25.6/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -1075,8 +1075,8 @@ github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3G github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/gen2brain/beeep v0.11.1 h1:EbSIhrQZFDj1K2fzlMpAYlFOzV8YuNe721A58XcCTYI= github.com/gen2brain/beeep v0.11.1/go.mod h1:jQVvuwnLuwOcdctHn/uyh8horSBNJ8uGb9Cn2W4tvoc= -github.com/getkin/kin-openapi v0.131.0 h1:NO2UeHnFKRYhZ8wg6Nyh5Cq7dHk4suQQr72a4pMrDxE= -github.com/getkin/kin-openapi v0.131.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= +github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk= +github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= @@ -1175,8 +1175,8 @@ github.com/gohugoio/hashstructure v0.5.0 h1:G2fjSBU36RdwEJBWJ+919ERvOVqAg9tfcYp4 github.com/gohugoio/hashstructure v0.5.0/go.mod h1:Ser0TniXuu/eauYmrwM4o64EBvySxNzITEOLlm4igec= github.com/gohugoio/httpcache v0.7.0 h1:ukPnn04Rgvx48JIinZvZetBfHaWE7I01JR2Q2RrQ3Vs= github.com/gohugoio/httpcache v0.7.0/go.mod h1:fMlPrdY/vVJhAriLZnrF5QpN3BNAcoBClgAyQd+lGFI= -github.com/gohugoio/hugo v0.147.0 h1:o9i3fbSRBksHLGBZvEfV/TlTTxszMECr2ktQaen1Y+8= -github.com/gohugoio/hugo v0.147.0/go.mod h1:5Fpy/TaZoP558OTBbttbVKa/Ty6m/ojfc2FlKPRhg8M= +github.com/gohugoio/hugo v0.148.1 h1:mOKLD5Ucyb77tEEILJkRzgHmGW0/x4x19Kpu3K11ROE= +github.com/gohugoio/hugo v0.148.1/go.mod h1:z/FL0CwJm9Ue/xFdMEVO4VOqogDuSlknCG9UnjBKkRk= github.com/gohugoio/hugo-goldmark-extensions/extras v0.3.0 h1:gj49kTR5Z4Hnm0ZaQrgPVazL3DUkppw+x6XhHCmh+Wk= github.com/gohugoio/hugo-goldmark-extensions/extras v0.3.0/go.mod h1:IMMj7xiUbLt1YNJ6m7AM4cnsX4cFnnfkleO/lBHGzUg= github.com/gohugoio/hugo-goldmark-extensions/passthrough v0.3.1 h1:nUzXfRTszLliZuN0JTKeunXTRaiFX6ksaWP0puLLYAY= @@ -1322,8 +1322,8 @@ github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqE github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= @@ -1376,8 +1376,8 @@ github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+O github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= @@ -1503,8 +1503,8 @@ github.com/makeworld-the-better-one/dither/v2 v2.4.0 h1:Az/dYXiTcwcRSe59Hzw4RI1r github.com/makeworld-the-better-one/dither/v2 v2.4.0/go.mod h1:VBtN8DXO7SNtyGmLiGA7IsFeKrBkQPze1/iAeM95arc= github.com/marekm4/color-extractor v1.2.1 h1:3Zb2tQsn6bITZ8MBVhc33Qn1k5/SEuZ18mrXGUqIwn0= github.com/marekm4/color-extractor v1.2.1/go.mod h1:90VjmiHI6M8ez9eYUaXLdcKnS+BAOp7w+NpwBdkJmpA= -github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= -github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= +github.com/mark3labs/mcp-go v0.33.0 h1:naxhjnTIs/tyPZmWUZFuG0lDmdA6sUyYGGf3gsHvTCc= +github.com/mark3labs/mcp-go v0.33.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1597,16 +1597,20 @@ github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0 github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/niklasfasching/go-org v1.7.0 h1:vyMdcMWWTe/XmANk19F4k8XGBYg0GQ/gJGMimOjGMek= -github.com/niklasfasching/go-org v1.7.0/go.mod h1:WuVm4d45oePiE0eX25GqTDQIt/qPW1T9DGkRscqLW5o= +github.com/niklasfasching/go-org v1.8.0 h1:WyGLaajLLp8JbQzkmapZ1y0MOzKuKV47HkZRloi+HGY= +github.com/niklasfasching/go-org v1.8.0/go.mod h1:e2A9zJs7cdONrEGs3gvxCcaAEpwwPNPG7csDpXckMNg= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6 h1:r3FaAI0NZK3hSmtTDrBVREhKULp8oUeqLT5Eyl2mSPo= +github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= +github.com/olekukonko/ll v0.0.8 h1:sbGZ1Fx4QxJXEqL/6IG8GEFnYojUSQ45dJVwN2FH2fc= +github.com/olekukonko/ll v0.0.8/go.mod h1:En+sEW0JNETl26+K8eZ6/W4UQ7CYSrrgg/EdIYT2H8g= +github.com/olekukonko/tablewriter v1.0.8 h1:f6wJzHg4QUtJdvrVPKco4QTrAylgaU0+b9br/lJxEiQ= +github.com/olekukonko/tablewriter v1.0.8/go.mod h1:H428M+HzoUXC6JU2Abj9IT9ooRmdq9CxuDmKMtrOCMs= github.com/open-policy-agent/opa v1.4.2 h1:ag4upP7zMsa4WE2p1pwAFeG4Pn3mNwfAx9DLhhJfbjU= github.com/open-policy-agent/opa v1.4.2/go.mod h1:DNzZPKqKh4U0n0ANxcCVlw8lCSv2c+h5G/3QvSYdWZ8= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.120.1 h1:lK/3zr73guK9apbXTcnDnYrC0YCQ25V3CIULYz3k2xU= @@ -1733,8 +1737,8 @@ github.com/sosedoff/gitkit v0.4.0/go.mod h1:V3EpGZ0nvCBhXerPsbDeqtyReNb48cwP9Ktk github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1785,13 +1789,12 @@ github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk= github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tdewolff/minify/v2 v2.20.37 h1:Q97cx4STXCh1dlWDlNHZniE8BJ2EBL0+2b0n92BJQhw= -github.com/tdewolff/minify/v2 v2.20.37/go.mod h1:L1VYef/jwKw6Wwyk5A+T0mBjjn3mMPgmjjA688RNsxU= -github.com/tdewolff/parse/v2 v2.7.15 h1:hysDXtdGZIRF5UZXwpfn3ZWRbm+ru4l53/ajBRGpCTw= -github.com/tdewolff/parse/v2 v2.7.15/go.mod h1:3FbJWZp3XT9OWVN3Hmfp0p/a08v4h8J9W1aghka0soA= -github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= -github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739 h1:IkjBCtQOOjIn03u/dMQK9g+Iw9ewps4mCl1nB8Sscbo= -github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= +github.com/tdewolff/minify/v2 v2.23.8 h1:tvjHzRer46kwOfpdCBCWsDblCw3QtnLJRd61pTVkyZ8= +github.com/tdewolff/minify/v2 v2.23.8/go.mod h1:VW3ISUd3gDOZuQ/jwZr4sCzsuX+Qvsx87FDMjk6Rvno= +github.com/tdewolff/parse/v2 v2.8.1 h1:J5GSHru6o3jF1uLlEKVXkDxxcVx6yzOlIVIotK4w2po= +github.com/tdewolff/parse/v2 v2.8.1/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo= +github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE= +github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= github.com/testcontainers/testcontainers-go v0.37.0 h1:L2Qc0vkTw2EHWQ08djon0D2uw7Z/PtHS/QzZZ5Ra/hg= github.com/testcontainers/testcontainers-go v0.37.0/go.mod h1:QPzbxZhQ6Bclip9igjLFj6z0hs01bU8lrl2dHQmgFGM= github.com/testcontainers/testcontainers-go/modules/localstack v0.37.0 h1:nPuxUYseqS0eYJg7KDJd95PhoMhdpTnSNtkDLwWFngo= @@ -1882,8 +1885,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.10 h1:S+LrtBjRmqMac2UdtB6yyCEJm+UILZ2fefI4p7o0QpI= -github.com/yuin/goldmark v1.7.10/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +github.com/yuin/goldmark v1.7.12 h1:YwGP/rrea2/CnCtUHgjuolG/PnMxdQtPMO5PvaE2/nY= +github.com/yuin/goldmark v1.7.12/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -1939,10 +1942,10 @@ go.opentelemetry.io/contrib v1.19.0 h1:rnYI7OEPMWFeM4QCqWQ3InMJ0arWMR1i0Cx9A5hcj go.opentelemetry.io/contrib v1.19.0/go.mod h1:gIzjwWFoGazJmtCaDgViqOSJPde2mCWzv60o0bWPcZs= go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= @@ -2001,8 +2004,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2033,8 +2036,8 @@ golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeap golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.26.0 h1:4XjIFEZWQmCZi6Wv8BoxsDhRU3RVnLX04dToTDAEPlY= -golang.org/x/image v0.26.0/go.mod h1:lcxbMFAovzpnJxzXS3nyL83K27tmqtKzIJpctK8YO5c= +golang.org/x/image v0.28.0 h1:gdem5JW1OLS4FbkWgLO+7ZeFzYtL3xClb97GaUzYMFE= +golang.org/x/image v0.28.0/go.mod h1:GUJYXtnGKEUgggyzh+Vxt+AviiCcyiwpsl8iQ8MvwGY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2067,8 +2070,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2131,8 +2134,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2162,8 +2165,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2185,8 +2188,8 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2283,8 +2286,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2303,8 +2306,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2327,16 +2330,16 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2401,8 +2404,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2484,8 +2487,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.231.0 h1:LbUD5FUl0C4qwia2bjXhCMH65yz1MLPzA/0OYEsYY7Q= -google.golang.org/api v0.231.0/go.mod h1:H52180fPI/QQlUc0F4xWfGZILdv09GCWKt2bcsn164A= +google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE= +google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2626,12 +2629,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 h1:29cjnHVylHwTzH66WfFZqgSQgnxzvWE+jvBwpZCLRxY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 h1:vPV0tzlsK6EzEDHNNH5sa7Hs9bd7iXR7B1tSiPepkV0= +google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:pKLAc5OolXC3ViWGI62vvC0n10CpwAtRcTNCFwTKBEw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/site/package.json b/site/package.json index 1512a803b0a96..e3a99b9d8eebf 100644 --- a/site/package.json +++ b/site/package.json @@ -120,6 +120,7 @@ "undici": "6.21.2", "unique-names-generator": "4.7.1", "uuid": "9.0.1", + "websocket-ts": "2.2.1", "yup": "1.6.1" }, "devDependencies": { diff --git a/site/pnpm-lock.yaml b/site/pnpm-lock.yaml index 62cdc6176092a..3c7f5176b5b6b 100644 --- a/site/pnpm-lock.yaml +++ b/site/pnpm-lock.yaml @@ -274,6 +274,9 @@ importers: uuid: specifier: 9.0.1 version: 9.0.1 + websocket-ts: + specifier: 2.2.1 + version: 2.2.1 yup: specifier: 1.6.1 version: 1.6.1 @@ -6344,6 +6347,9 @@ packages: webpack-virtual-modules@0.5.0: resolution: {integrity: sha512-kyDivFZ7ZM0BVOUteVbDFhlRt7Ah/CSPwJdi8hBpkK7QLumUqdLtVfm/PX/hkcnrvr0i77fO5+TjZ94Pe+C9iw==, tarball: https://registry.npmjs.org/webpack-virtual-modules/-/webpack-virtual-modules-0.5.0.tgz} + websocket-ts@2.2.1: + resolution: {integrity: sha512-YKPDfxlK5qOheLZ2bTIiktZO1bpfGdNCPJmTEaPW7G9UXI1GKjDdeacOrsULUS000OPNxDVOyAuKLuIWPqWM0Q==, tarball: https://registry.npmjs.org/websocket-ts/-/websocket-ts-2.2.1.tgz} + whatwg-encoding@2.0.0: resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==, tarball: https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz} engines: {node: '>=12'} @@ -13266,6 +13272,8 @@ snapshots: webpack-virtual-modules@0.5.0: {} + websocket-ts@2.2.1: {} + whatwg-encoding@2.0.0: dependencies: iconv-lite: 0.6.3 diff --git a/site/src/api/api.ts b/site/src/api/api.ts index dd8d3d77998d2..013c018d5c656 100644 --- a/site/src/api/api.ts +++ b/site/src/api/api.ts @@ -129,6 +129,14 @@ export const watchWorkspace = ( }); }; +export const watchAgentContainers = ( + agentId: string, +): OneWayWebSocket => { + return new OneWayWebSocket({ + apiRoute: `/api/v2/workspaceagents/${agentId}/containers/watch`, + }); +}; + type WatchInboxNotificationsParams = Readonly<{ read_status?: "read" | "unread" | "all"; }>; @@ -1805,6 +1813,14 @@ class ApiMethods { return response.data; }; + getConnectionLogs = async ( + options: TypesGen.ConnectionLogsRequest, + ): Promise => { + const url = getURLWithSearchParams("/api/v2/connectionlog", options); + const response = await this.axios.get(url); + return response.data; + }; + getTemplateDAUs = async ( templateId: string, ): Promise => { diff --git a/site/src/api/queries/connectionlog.ts b/site/src/api/queries/connectionlog.ts new file mode 100644 index 0000000000000..9fbeb3f9e783d --- /dev/null +++ b/site/src/api/queries/connectionlog.ts @@ -0,0 +1,24 @@ +import { API } from "api/api"; +import type { ConnectionLogResponse } from "api/typesGenerated"; +import { useFilterParamsKey } from "components/Filter/Filter"; +import type { UsePaginatedQueryOptions } from "hooks/usePaginatedQuery"; + +export function paginatedConnectionLogs( + searchParams: URLSearchParams, +): UsePaginatedQueryOptions { + return { + searchParams, + queryPayload: () => searchParams.get(useFilterParamsKey) ?? "", + queryKey: ({ payload, pageNumber }) => { + return ["connectionLogs", payload, pageNumber] as const; + }, + queryFn: ({ payload, limit, offset }) => { + return API.getConnectionLogs({ + offset, + limit, + q: payload, + }); + }, + prefetch: false, + }; +} diff --git a/site/src/api/rbacresourcesGenerated.ts b/site/src/api/rbacresourcesGenerated.ts index de09b245ff049..5d632d57fad95 100644 --- a/site/src/api/rbacresourcesGenerated.ts +++ b/site/src/api/rbacresourcesGenerated.ts @@ -31,6 +31,10 @@ export const RBACResourceActions: Partial< create: "create new audit log entries", read: "read audit logs", }, + connection_log: { + read: "read connection logs", + update: "upsert connection log entries", + }, crypto_key: { create: "create crypto keys", delete: "delete crypto keys", diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts index 53dc919df2df3..47a2984d374a2 100644 --- a/site/src/api/typesGenerated.ts +++ b/site/src/api/typesGenerated.ts @@ -322,6 +322,75 @@ export interface ConnectionLatency { readonly p95: number; } +// From codersdk/connectionlog.go +export interface ConnectionLog { + readonly id: string; + readonly connect_time: string; + readonly organization: MinimalOrganization; + readonly workspace_owner_id: string; + readonly workspace_owner_username: string; + readonly workspace_id: string; + readonly workspace_name: string; + readonly agent_name: string; + readonly ip: string; + readonly type: ConnectionType; + readonly web_info?: ConnectionLogWebInfo; + readonly ssh_info?: ConnectionLogSSHInfo; +} + +// From codersdk/connectionlog.go +export interface ConnectionLogResponse { + readonly connection_logs: readonly ConnectionLog[]; + readonly count: number; +} + +// From codersdk/connectionlog.go +export interface ConnectionLogSSHInfo { + readonly connection_id: string; + readonly disconnect_time?: string; + readonly disconnect_reason?: string; + readonly exit_code?: number; +} + +// From codersdk/connectionlog.go +export type ConnectionLogStatus = "completed" | "ongoing"; + +export const ConnectionLogStatuses: ConnectionLogStatus[] = [ + "completed", + "ongoing", +]; + +// From codersdk/connectionlog.go +export interface ConnectionLogWebInfo { + readonly user_agent: string; + readonly user: User | null; + readonly slug_or_port: string; + readonly status_code: number; +} + +// From codersdk/connectionlog.go +export interface ConnectionLogsRequest extends Pagination { + readonly q?: string; +} + +// From codersdk/connectionlog.go +export type ConnectionType = + | "jetbrains" + | "port_forwarding" + | "reconnecting_pty" + | "ssh" + | "vscode" + | "workspace_app"; + +export const ConnectionTypes: ConnectionType[] = [ + "jetbrains", + "port_forwarding", + "reconnecting_pty", + "ssh", + "vscode", + "workspace_app", +]; + // From codersdk/files.go export const ContentTypeTar = "application/x-tar"; @@ -920,6 +989,7 @@ export type FeatureName = | "appearance" | "audit_log" | "browser_only" + | "connection_log" | "control_shared_ports" | "custom_roles" | "external_provisioner_daemons" @@ -941,6 +1011,7 @@ export const FeatureNames: FeatureName[] = [ "appearance", "audit_log", "browser_only", + "connection_log", "control_shared_ports", "custom_roles", "external_provisioner_daemons", @@ -2241,6 +2312,7 @@ export type RBACResource = | "assign_org_role" | "assign_role" | "audit_log" + | "connection_log" | "crypto_key" | "debug_info" | "deployment_config" @@ -2280,6 +2352,7 @@ export const RBACResources: RBACResource[] = [ "assign_org_role", "assign_role", "audit_log", + "connection_log", "crypto_key", "debug_info", "deployment_config", @@ -3375,6 +3448,7 @@ export interface Workspace { readonly allow_renames: boolean; readonly favorite: boolean; readonly next_start_at: string | null; + readonly is_prebuild: boolean; } // From codersdk/workspaceagents.go diff --git a/site/src/components/Filter/UserFilter.tsx b/site/src/components/Filter/UserFilter.tsx index 3dc591cd4a284..0663d3d8d97d0 100644 --- a/site/src/components/Filter/UserFilter.tsx +++ b/site/src/components/Filter/UserFilter.tsx @@ -82,14 +82,15 @@ export type UserFilterMenu = ReturnType; interface UserMenuProps { menu: UserFilterMenu; + placeholder?: string; width?: number; } -export const UserMenu: FC = ({ menu, width }) => { +export const UserMenu: FC = ({ menu, width, placeholder }) => { return ( = ({ + code, + isHttpCode, + label, +}) => { + const pill = ( + + {code.toString()} + + ); + if (!label) { + return pill; + } + return ( + + + {pill} + {label} + + + ); +}; diff --git a/site/src/hooks/index.ts b/site/src/hooks/index.ts index 4453e36fa4bb4..901fee8a50ded 100644 --- a/site/src/hooks/index.ts +++ b/site/src/hooks/index.ts @@ -3,4 +3,3 @@ export * from "./useClickable"; export * from "./useClickableTableRow"; export * from "./useClipboard"; export * from "./usePagination"; -export * from "./useWithRetry"; diff --git a/site/src/hooks/useWithRetry.test.ts b/site/src/hooks/useWithRetry.test.ts deleted file mode 100644 index 7ed7b4331f21e..0000000000000 --- a/site/src/hooks/useWithRetry.test.ts +++ /dev/null @@ -1,329 +0,0 @@ -import { act, renderHook } from "@testing-library/react"; -import { useWithRetry } from "./useWithRetry"; - -// Mock timers -jest.useFakeTimers(); - -describe("useWithRetry", () => { - let mockFn: jest.Mock; - - beforeEach(() => { - mockFn = jest.fn(); - jest.clearAllTimers(); - }); - - afterEach(() => { - jest.clearAllMocks(); - }); - - it("should initialize with correct default state", () => { - const { result } = renderHook(() => useWithRetry(mockFn)); - - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).toBe(undefined); - }); - - it("should execute function successfully on first attempt", async () => { - mockFn.mockResolvedValue(undefined); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - await act(async () => { - await result.current.call(); - }); - - expect(mockFn).toHaveBeenCalledTimes(1); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).toBe(undefined); - }); - - it("should set isLoading to true during execution", async () => { - let resolvePromise: () => void; - const promise = new Promise((resolve) => { - resolvePromise = resolve; - }); - mockFn.mockReturnValue(promise); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - act(() => { - result.current.call(); - }); - - expect(result.current.isLoading).toBe(true); - - await act(async () => { - resolvePromise!(); - await promise; - }); - - expect(result.current.isLoading).toBe(false); - }); - - it("should retry on failure with exponential backoff", async () => { - mockFn - .mockRejectedValueOnce(new Error("First failure")) - .mockRejectedValueOnce(new Error("Second failure")) - .mockResolvedValueOnce(undefined); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the call - await act(async () => { - await result.current.call(); - }); - - expect(mockFn).toHaveBeenCalledTimes(1); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Fast-forward to first retry (1 second) - await act(async () => { - jest.advanceTimersByTime(1000); - }); - - expect(mockFn).toHaveBeenCalledTimes(2); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Fast-forward to second retry (2 seconds) - await act(async () => { - jest.advanceTimersByTime(2000); - }); - - expect(mockFn).toHaveBeenCalledTimes(3); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).toBe(undefined); - }); - - it("should continue retrying without limit", async () => { - mockFn.mockRejectedValue(new Error("Always fails")); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the call - await act(async () => { - await result.current.call(); - }); - - expect(mockFn).toHaveBeenCalledTimes(1); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Fast-forward through multiple retries to verify it continues - for (let i = 1; i < 15; i++) { - const delay = Math.min(1000 * 2 ** (i - 1), 600000); // exponential backoff with max delay - await act(async () => { - jest.advanceTimersByTime(delay); - }); - expect(mockFn).toHaveBeenCalledTimes(i + 1); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - } - - // Should still be retrying after 15 attempts - expect(result.current.nextRetryAt).not.toBe(null); - }); - - it("should respect max delay of 10 minutes", async () => { - mockFn.mockRejectedValue(new Error("Always fails")); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the call - await act(async () => { - await result.current.call(); - }); - - expect(result.current.isLoading).toBe(false); - - // Fast-forward through several retries to reach max delay - // After attempt 9, delay would be 1000 * 2^9 = 512000ms, which is less than 600000ms (10 min) - // After attempt 10, delay would be 1000 * 2^10 = 1024000ms, which should be capped at 600000ms - - // Skip to attempt 9 (delay calculation: 1000 * 2^8 = 256000ms) - for (let i = 1; i < 9; i++) { - const delay = 1000 * 2 ** (i - 1); - await act(async () => { - jest.advanceTimersByTime(delay); - }); - } - - expect(mockFn).toHaveBeenCalledTimes(9); - expect(result.current.nextRetryAt).not.toBe(null); - - // The 9th retry should use max delay (600000ms = 10 minutes) - await act(async () => { - jest.advanceTimersByTime(600000); - }); - - expect(mockFn).toHaveBeenCalledTimes(10); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Continue with more retries at max delay to verify it continues indefinitely - await act(async () => { - jest.advanceTimersByTime(600000); - }); - - expect(mockFn).toHaveBeenCalledTimes(11); - expect(result.current.nextRetryAt).not.toBe(null); - }); - - it("should cancel previous retry when call is invoked again", async () => { - mockFn - .mockRejectedValueOnce(new Error("First failure")) - .mockResolvedValueOnce(undefined); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the first call - await act(async () => { - await result.current.call(); - }); - - expect(mockFn).toHaveBeenCalledTimes(1); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Call again before retry happens - await act(async () => { - await result.current.call(); - }); - - expect(mockFn).toHaveBeenCalledTimes(2); - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).toBe(undefined); - - // Advance time to ensure previous retry was cancelled - await act(async () => { - jest.advanceTimersByTime(5000); - }); - - expect(mockFn).toHaveBeenCalledTimes(2); // Should not have been called again - }); - - it("should set nextRetryAt when scheduling retry", async () => { - mockFn - .mockRejectedValueOnce(new Error("Failure")) - .mockResolvedValueOnce(undefined); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the call - await act(async () => { - await result.current.call(); - }); - - const nextRetryAt = result.current.nextRetryAt; - expect(nextRetryAt).not.toBe(null); - expect(nextRetryAt).toBeInstanceOf(Date); - - // nextRetryAt should be approximately 1 second in the future - const expectedTime = Date.now() + 1000; - const actualTime = nextRetryAt!.getTime(); - expect(Math.abs(actualTime - expectedTime)).toBeLessThan(100); // Allow 100ms tolerance - - // Advance past retry time - await act(async () => { - jest.advanceTimersByTime(1000); - }); - - expect(result.current.nextRetryAt).toBe(undefined); - }); - - it("should cleanup timer on unmount", async () => { - mockFn.mockRejectedValue(new Error("Failure")); - - const { result, unmount } = renderHook(() => useWithRetry(mockFn)); - - // Start the call to create timer - await act(async () => { - await result.current.call(); - }); - - expect(result.current.isLoading).toBe(false); - expect(result.current.nextRetryAt).not.toBe(null); - - // Unmount should cleanup timer - unmount(); - - // Advance time to ensure timer was cleared - await act(async () => { - jest.advanceTimersByTime(5000); - }); - - // Function should not have been called again - expect(mockFn).toHaveBeenCalledTimes(1); - }); - - it("should prevent scheduling retries when function completes after unmount", async () => { - let rejectPromise: (error: Error) => void; - const promise = new Promise((_, reject) => { - rejectPromise = reject; - }); - mockFn.mockReturnValue(promise); - - const { result, unmount } = renderHook(() => useWithRetry(mockFn)); - - // Start the call - this will make the function in-flight - act(() => { - result.current.call(); - }); - - expect(result.current.isLoading).toBe(true); - - // Unmount while function is still in-flight - unmount(); - - // Function completes with error after unmount - await act(async () => { - rejectPromise!(new Error("Failed after unmount")); - await promise.catch(() => {}); // Suppress unhandled rejection - }); - - // Advance time to ensure no retry timers were scheduled - await act(async () => { - jest.advanceTimersByTime(5000); - }); - - // Function should only have been called once (no retries after unmount) - expect(mockFn).toHaveBeenCalledTimes(1); - }); - - it("should do nothing when call() is invoked while function is already loading", async () => { - let resolvePromise: () => void; - const promise = new Promise((resolve) => { - resolvePromise = resolve; - }); - mockFn.mockReturnValue(promise); - - const { result } = renderHook(() => useWithRetry(mockFn)); - - // Start the first call - this will set isLoading to true - act(() => { - result.current.call(); - }); - - expect(result.current.isLoading).toBe(true); - expect(mockFn).toHaveBeenCalledTimes(1); - - // Try to call again while loading - should do nothing - act(() => { - result.current.call(); - }); - - // Function should not have been called again - expect(mockFn).toHaveBeenCalledTimes(1); - expect(result.current.isLoading).toBe(true); - - // Complete the original promise - await act(async () => { - resolvePromise!(); - await promise; - }); - - expect(result.current.isLoading).toBe(false); - expect(mockFn).toHaveBeenCalledTimes(1); - }); -}); diff --git a/site/src/hooks/useWithRetry.ts b/site/src/hooks/useWithRetry.ts deleted file mode 100644 index 1310da221efc5..0000000000000 --- a/site/src/hooks/useWithRetry.ts +++ /dev/null @@ -1,106 +0,0 @@ -import { useCallback, useEffect, useRef, useState } from "react"; -import { useEffectEvent } from "./hookPolyfills"; - -const DELAY_MS = 1_000; -const MAX_DELAY_MS = 600_000; // 10 minutes -// Determines how much the delay between retry attempts increases after each -// failure. -const MULTIPLIER = 2; - -interface UseWithRetryResult { - call: () => void; - nextRetryAt: Date | undefined; - isLoading: boolean; -} - -interface RetryState { - isLoading: boolean; - nextRetryAt: Date | undefined; -} - -/** - * Hook that wraps a function with automatic retry functionality - * Provides a simple interface for executing functions with exponential backoff retry - */ -export function useWithRetry(fn: () => Promise): UseWithRetryResult { - const [state, setState] = useState({ - isLoading: false, - nextRetryAt: undefined, - }); - - const timeoutRef = useRef(null); - const mountedRef = useRef(true); - - const clearTimeout = useCallback(() => { - if (timeoutRef.current) { - window.clearTimeout(timeoutRef.current); - timeoutRef.current = null; - } - }, []); - - const stableFn = useEffectEvent(fn); - - const call = useCallback(() => { - if (state.isLoading) { - return; - } - - clearTimeout(); - - const executeAttempt = async (attempt = 0): Promise => { - if (!mountedRef.current) { - return; - } - setState({ - isLoading: true, - nextRetryAt: undefined, - }); - - try { - await stableFn(); - if (mountedRef.current) { - setState({ isLoading: false, nextRetryAt: undefined }); - } - } catch (error) { - if (!mountedRef.current) { - return; - } - const delayMs = Math.min( - DELAY_MS * MULTIPLIER ** attempt, - MAX_DELAY_MS, - ); - - setState({ - isLoading: false, - nextRetryAt: new Date(Date.now() + delayMs), - }); - - timeoutRef.current = window.setTimeout(() => { - if (!mountedRef.current) { - return; - } - setState({ - isLoading: false, - nextRetryAt: undefined, - }); - executeAttempt(attempt + 1); - }, delayMs); - } - }; - - executeAttempt(); - }, [state.isLoading, stableFn, clearTimeout]); - - useEffect(() => { - return () => { - mountedRef.current = false; - clearTimeout(); - }; - }, [clearTimeout]); - - return { - call, - nextRetryAt: state.nextRetryAt, - isLoading: state.isLoading, - }; -} diff --git a/site/src/modules/dashboard/Navbar/DeploymentDropdown.tsx b/site/src/modules/dashboard/Navbar/DeploymentDropdown.tsx index 9659a70ea32b3..f7376d99dd387 100644 --- a/site/src/modules/dashboard/Navbar/DeploymentDropdown.tsx +++ b/site/src/modules/dashboard/Navbar/DeploymentDropdown.tsx @@ -16,6 +16,7 @@ interface DeploymentDropdownProps { canViewDeployment: boolean; canViewOrganizations: boolean; canViewAuditLog: boolean; + canViewConnectionLog: boolean; canViewHealth: boolean; } @@ -23,12 +24,14 @@ export const DeploymentDropdown: FC = ({ canViewDeployment, canViewOrganizations, canViewAuditLog, + canViewConnectionLog, canViewHealth, }) => { const theme = useTheme(); if ( !canViewAuditLog && + !canViewConnectionLog && !canViewOrganizations && !canViewDeployment && !canViewHealth @@ -59,6 +62,7 @@ export const DeploymentDropdown: FC = ({ canViewDeployment={canViewDeployment} canViewOrganizations={canViewOrganizations} canViewAuditLog={canViewAuditLog} + canViewConnectionLog={canViewConnectionLog} canViewHealth={canViewHealth} /> @@ -71,6 +75,7 @@ const DeploymentDropdownContent: FC = ({ canViewOrganizations, canViewAuditLog, canViewHealth, + canViewConnectionLog, }) => { const popover = usePopover(); @@ -108,6 +113,16 @@ const DeploymentDropdownContent: FC = ({ Audit Logs )} + {canViewConnectionLog && ( + + Connection Logs + + )} {canViewHealth && ( = ({ canViewDeployment, canViewOrganizations, canViewAuditLog, + canViewConnectionLog, canViewHealth, }) => { const [open, setOpen] = useState(false); @@ -237,6 +239,14 @@ const AdminSettingsSub: FC = ({ Audit logs )} + {canViewConnectionLog && ( + + Connection logs + + )} {canViewHealth && ( { const canViewHealth = permissions.viewDebugInfo; const canViewAuditLog = featureVisibility.audit_log && permissions.viewAnyAuditLog; + const canViewConnectionLog = + featureVisibility.connection_log && permissions.viewAnyConnectionLog; return ( { canViewOrganizations={canViewOrganizations} canViewHealth={canViewHealth} canViewAuditLog={canViewAuditLog} + canViewConnectionLog={canViewConnectionLog} proxyContextValue={proxyContextValue} /> ); diff --git a/site/src/modules/dashboard/Navbar/NavbarView.test.tsx b/site/src/modules/dashboard/Navbar/NavbarView.test.tsx index 358b717b492a4..4c43e6a0877f9 100644 --- a/site/src/modules/dashboard/Navbar/NavbarView.test.tsx +++ b/site/src/modules/dashboard/Navbar/NavbarView.test.tsx @@ -33,6 +33,7 @@ describe("NavbarView", () => { canViewOrganizations canViewHealth canViewAuditLog + canViewConnectionLog />, ); const workspacesLink = @@ -50,6 +51,7 @@ describe("NavbarView", () => { canViewOrganizations canViewHealth canViewAuditLog + canViewConnectionLog />, ); const templatesLink = @@ -67,6 +69,7 @@ describe("NavbarView", () => { canViewOrganizations canViewHealth canViewAuditLog + canViewConnectionLog />, ); const deploymentMenu = await screen.findByText("Admin settings"); @@ -85,6 +88,7 @@ describe("NavbarView", () => { canViewOrganizations canViewHealth canViewAuditLog + canViewConnectionLog />, ); const deploymentMenu = await screen.findByText("Admin settings"); diff --git a/site/src/modules/dashboard/Navbar/NavbarView.tsx b/site/src/modules/dashboard/Navbar/NavbarView.tsx index d83b0e8b694a4..7b1bd9fc535ed 100644 --- a/site/src/modules/dashboard/Navbar/NavbarView.tsx +++ b/site/src/modules/dashboard/Navbar/NavbarView.tsx @@ -24,6 +24,7 @@ interface NavbarViewProps { canViewDeployment: boolean; canViewOrganizations: boolean; canViewAuditLog: boolean; + canViewConnectionLog: boolean; canViewHealth: boolean; proxyContextValue?: ProxyContextValue; } @@ -44,6 +45,7 @@ export const NavbarView: FC = ({ canViewOrganizations, canViewHealth, canViewAuditLog, + canViewConnectionLog, proxyContextValue, }) => { const webPush = useWebpushNotifications(); @@ -73,6 +75,7 @@ export const NavbarView: FC = ({ canViewOrganizations={canViewOrganizations} canViewDeployment={canViewDeployment} canViewHealth={canViewHealth} + canViewConnectionLog={canViewConnectionLog} />
@@ -124,6 +127,7 @@ export const NavbarView: FC = ({ supportLinks={supportLinks} onSignOut={onSignOut} canViewAuditLog={canViewAuditLog} + canViewConnectionLog={canViewConnectionLog} canViewOrganizations={canViewOrganizations} canViewDeployment={canViewDeployment} canViewHealth={canViewHealth} diff --git a/site/src/modules/management/DeploymentSidebarView.stories.tsx b/site/src/modules/management/DeploymentSidebarView.stories.tsx index d7fee99bc2ade..2465556110e98 100644 --- a/site/src/modules/management/DeploymentSidebarView.stories.tsx +++ b/site/src/modules/management/DeploymentSidebarView.stories.tsx @@ -1,5 +1,9 @@ import type { Meta, StoryObj } from "@storybook/react"; -import { MockNoPermissions, MockPermissions } from "testHelpers/entities"; +import { + MockBuildInfo, + MockNoPermissions, + MockPermissions, +} from "testHelpers/entities"; import { withDashboardProvider } from "testHelpers/storybook"; import { DeploymentSidebarView } from "./DeploymentSidebarView"; @@ -10,6 +14,8 @@ const meta: Meta = { parameters: { showOrganizations: true }, args: { permissions: MockPermissions, + experiments: [], + buildInfo: MockBuildInfo, }, }; diff --git a/site/src/modules/permissions/index.ts b/site/src/modules/permissions/index.ts index 16d01d113f8ee..db48e61411d18 100644 --- a/site/src/modules/permissions/index.ts +++ b/site/src/modules/permissions/index.ts @@ -156,6 +156,13 @@ export const permissionChecks = { }, action: "read", }, + viewAnyConnectionLog: { + object: { + resource_type: "connection_log", + any_org: true, + }, + action: "read", + }, viewDebugInfo: { object: { resource_type: "debug_info", diff --git a/site/src/modules/resources/AgentDevcontainerCard.tsx b/site/src/modules/resources/AgentDevcontainerCard.tsx index c7516dde15c39..bd2f05b123cad 100644 --- a/site/src/modules/resources/AgentDevcontainerCard.tsx +++ b/site/src/modules/resources/AgentDevcontainerCard.tsx @@ -130,12 +130,6 @@ export const AgentDevcontainerCard: FC = ({ return { previousData }; }, - onSuccess: async () => { - // Invalidate the containers query to refetch updated data. - await queryClient.invalidateQueries({ - queryKey: ["agents", parentAgent.id, "containers"], - }); - }, onError: (error, _, context) => { // If the mutation fails, use the context returned from // onMutate to roll back. diff --git a/site/src/modules/resources/AgentRow.tsx b/site/src/modules/resources/AgentRow.tsx index 3d0888f7872b1..0b5d8a5dc15c3 100644 --- a/site/src/modules/resources/AgentRow.tsx +++ b/site/src/modules/resources/AgentRow.tsx @@ -2,14 +2,12 @@ import type { Interpolation, Theme } from "@emotion/react"; import Collapse from "@mui/material/Collapse"; import Divider from "@mui/material/Divider"; import Skeleton from "@mui/material/Skeleton"; -import { API } from "api/api"; import type { Template, Workspace, WorkspaceAgent, WorkspaceAgentMetadata, } from "api/typesGenerated"; -import { isAxiosError } from "axios"; import { Button } from "components/Button/Button"; import { DropdownArrow } from "components/DropdownArrow/DropdownArrow"; import { Stack } from "components/Stack/Stack"; @@ -25,7 +23,6 @@ import { useRef, useState, } from "react"; -import { useQuery } from "react-query"; import AutoSizer from "react-virtualized-auto-sizer"; import type { FixedSizeList as List, ListOnScrollProps } from "react-window"; import { AgentApps, organizeAgentApps } from "./AgentApps/AgentApps"; @@ -41,6 +38,7 @@ import { PortForwardButton } from "./PortForwardButton"; import { AgentSSHButton } from "./SSHButton/SSHButton"; import { TerminalLink } from "./TerminalLink/TerminalLink"; import { VSCodeDesktopButton } from "./VSCodeDesktopButton/VSCodeDesktopButton"; +import { useAgentContainers } from "./useAgentContainers"; import { useAgentLogs } from "./useAgentLogs"; interface AgentRowProps { @@ -133,20 +131,7 @@ export const AgentRow: FC = ({ setBottomOfLogs(distanceFromBottom < AGENT_LOG_LINE_HEIGHT); }, []); - const { data: devcontainers } = useQuery({ - queryKey: ["agents", agent.id, "containers"], - queryFn: () => API.getAgentContainers(agent.id), - enabled: agent.status === "connected", - select: (res) => res.devcontainers, - // TODO: Implement a websocket connection to get updates on containers - // without having to poll. - refetchInterval: ({ state }) => { - const { error } = state; - return isAxiosError(error) && error.response?.status === 403 - ? false - : 10_000; - }, - }); + const devcontainers = useAgentContainers(agent); // This is used to show the parent apps of the devcontainer. const [showParentApps, setShowParentApps] = useState(false); diff --git a/site/src/modules/resources/useAgentContainers.test.tsx b/site/src/modules/resources/useAgentContainers.test.tsx new file mode 100644 index 0000000000000..dbdcdf6f21293 --- /dev/null +++ b/site/src/modules/resources/useAgentContainers.test.tsx @@ -0,0 +1,214 @@ +import { renderHook, waitFor } from "@testing-library/react"; +import * as API from "api/api"; +import type { WorkspaceAgentListContainersResponse } from "api/typesGenerated"; +import * as GlobalSnackbar from "components/GlobalSnackbar/utils"; +import { http, HttpResponse } from "msw"; +import type { FC, PropsWithChildren } from "react"; +import { QueryClient, QueryClientProvider } from "react-query"; +import { + MockWorkspaceAgent, + MockWorkspaceAgentDevcontainer, +} from "testHelpers/entities"; +import { server } from "testHelpers/server"; +import type { OneWayWebSocket } from "utils/OneWayWebSocket"; +import { useAgentContainers } from "./useAgentContainers"; + +const createWrapper = (): FC => { + const queryClient = new QueryClient({ + defaultOptions: { + queries: { + retry: false, + }, + }, + }); + return ({ children }) => ( + {children} + ); +}; + +describe("useAgentContainers", () => { + it("returns containers when agent is connected", async () => { + server.use( + http.get( + `/api/v2/workspaceagents/${MockWorkspaceAgent.id}/containers`, + () => { + return HttpResponse.json({ + devcontainers: [MockWorkspaceAgentDevcontainer], + containers: [], + }); + }, + ), + ); + + const { result } = renderHook( + () => useAgentContainers(MockWorkspaceAgent), + { + wrapper: createWrapper(), + }, + ); + + await waitFor(() => { + expect(result.current).toEqual([MockWorkspaceAgentDevcontainer]); + }); + }); + + it("returns undefined when agent is not connected", () => { + const disconnectedAgent = { + ...MockWorkspaceAgent, + status: "disconnected" as const, + }; + + const { result } = renderHook(() => useAgentContainers(disconnectedAgent), { + wrapper: createWrapper(), + }); + + expect(result.current).toBeUndefined(); + }); + + it("handles API errors gracefully", async () => { + server.use( + http.get( + `/api/v2/workspaceagents/${MockWorkspaceAgent.id}/containers`, + () => { + return HttpResponse.error(); + }, + ), + ); + + const { result } = renderHook( + () => useAgentContainers(MockWorkspaceAgent), + { + wrapper: createWrapper(), + }, + ); + + await waitFor(() => { + expect(result.current).toBeUndefined(); + }); + }); + + it("handles parsing errors from WebSocket", async () => { + const displayErrorSpy = jest.spyOn(GlobalSnackbar, "displayError"); + const watchAgentContainersSpy = jest.spyOn(API, "watchAgentContainers"); + + const mockSocket = { + addEventListener: jest.fn(), + close: jest.fn(), + }; + watchAgentContainersSpy.mockReturnValue( + mockSocket as unknown as OneWayWebSocket, + ); + + server.use( + http.get( + `/api/v2/workspaceagents/${MockWorkspaceAgent.id}/containers`, + () => { + return HttpResponse.json({ + devcontainers: [MockWorkspaceAgentDevcontainer], + containers: [], + }); + }, + ), + ); + + const { unmount } = renderHook( + () => useAgentContainers(MockWorkspaceAgent), + { + wrapper: createWrapper(), + }, + ); + + // Simulate message event with parsing error + const messageHandler = mockSocket.addEventListener.mock.calls.find( + (call) => call[0] === "message", + )?.[1]; + + if (messageHandler) { + messageHandler({ + parseError: new Error("Parse error"), + parsedMessage: null, + }); + } + + await waitFor(() => { + expect(displayErrorSpy).toHaveBeenCalledWith( + "Failed to update containers", + "Please try refreshing the page", + ); + }); + + unmount(); + displayErrorSpy.mockRestore(); + watchAgentContainersSpy.mockRestore(); + }); + + it("handles WebSocket errors", async () => { + const displayErrorSpy = jest.spyOn(GlobalSnackbar, "displayError"); + const watchAgentContainersSpy = jest.spyOn(API, "watchAgentContainers"); + + const mockSocket = { + addEventListener: jest.fn(), + close: jest.fn(), + }; + watchAgentContainersSpy.mockReturnValue( + mockSocket as unknown as OneWayWebSocket, + ); + + server.use( + http.get( + `/api/v2/workspaceagents/${MockWorkspaceAgent.id}/containers`, + () => { + return HttpResponse.json({ + devcontainers: [MockWorkspaceAgentDevcontainer], + containers: [], + }); + }, + ), + ); + + const { unmount } = renderHook( + () => useAgentContainers(MockWorkspaceAgent), + { + wrapper: createWrapper(), + }, + ); + + // Simulate error event + const errorHandler = mockSocket.addEventListener.mock.calls.find( + (call) => call[0] === "error", + )?.[1]; + + if (errorHandler) { + errorHandler(new Error("WebSocket error")); + } + + await waitFor(() => { + expect(displayErrorSpy).toHaveBeenCalledWith( + "Failed to load containers", + "Please try refreshing the page", + ); + }); + + unmount(); + displayErrorSpy.mockRestore(); + watchAgentContainersSpy.mockRestore(); + }); + + it("does not establish WebSocket connection when agent is not connected", () => { + const watchAgentContainersSpy = jest.spyOn(API, "watchAgentContainers"); + + const disconnectedAgent = { + ...MockWorkspaceAgent, + status: "disconnected" as const, + }; + + const { result } = renderHook(() => useAgentContainers(disconnectedAgent), { + wrapper: createWrapper(), + }); + + expect(watchAgentContainersSpy).not.toHaveBeenCalled(); + expect(result.current).toBeUndefined(); + + watchAgentContainersSpy.mockRestore(); + }); +}); diff --git a/site/src/modules/resources/useAgentContainers.ts b/site/src/modules/resources/useAgentContainers.ts new file mode 100644 index 0000000000000..e2239fe4666f1 --- /dev/null +++ b/site/src/modules/resources/useAgentContainers.ts @@ -0,0 +1,63 @@ +import { API, watchAgentContainers } from "api/api"; +import type { + WorkspaceAgent, + WorkspaceAgentDevcontainer, + WorkspaceAgentListContainersResponse, +} from "api/typesGenerated"; +import { displayError } from "components/GlobalSnackbar/utils"; +import { useEffectEvent } from "hooks/hookPolyfills"; +import { useEffect } from "react"; +import { useQuery, useQueryClient } from "react-query"; + +export function useAgentContainers( + agent: WorkspaceAgent, +): readonly WorkspaceAgentDevcontainer[] | undefined { + const queryClient = useQueryClient(); + + const { data: devcontainers } = useQuery({ + queryKey: ["agents", agent.id, "containers"], + queryFn: () => API.getAgentContainers(agent.id), + enabled: agent.status === "connected", + select: (res) => res.devcontainers, + staleTime: Number.POSITIVE_INFINITY, + }); + + const updateDevcontainersCache = useEffectEvent( + async (data: WorkspaceAgentListContainersResponse) => { + const queryKey = ["agents", agent.id, "containers"]; + + queryClient.setQueryData(queryKey, data); + }, + ); + + useEffect(() => { + if (agent.status !== "connected") { + return; + } + + const socket = watchAgentContainers(agent.id); + + socket.addEventListener("message", (event) => { + if (event.parseError) { + displayError( + "Failed to update containers", + "Please try refreshing the page", + ); + return; + } + + updateDevcontainersCache(event.parsedMessage); + }); + + socket.addEventListener("error", () => { + displayError( + "Failed to load containers", + "Please try refreshing the page", + ); + }); + + return () => socket.close(); + }, [agent.id, agent.status, updateDevcontainersCache]); + + return devcontainers; +} diff --git a/site/src/pages/AuditPage/AuditFilter.tsx b/site/src/pages/AuditPage/AuditFilter.tsx index a1c1bc57d8549..c625a7d60797e 100644 --- a/site/src/pages/AuditPage/AuditFilter.tsx +++ b/site/src/pages/AuditPage/AuditFilter.tsx @@ -82,10 +82,17 @@ export const useActionFilterMenu = ({ value, onChange, }: Pick) => { - const actionOptions: SelectFilterOption[] = AuditActions.map((action) => ({ - value: action, - label: capitalize(action), - })); + const actionOptions: SelectFilterOption[] = AuditActions + // TODO(ethanndickson): Logs with these action types are no longer produced. + // Until we remove them from the database and API, we shouldn't suggest them + // in the filter dropdown. + .filter( + (action) => !["connect", "disconnect", "open", "close"].includes(action), + ) + .map((action) => ({ + value: action, + label: capitalize(action), + })); return useFilterMenu({ onChange, value, diff --git a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx index a123e83214775..73ab52da5cd1a 100644 --- a/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx +++ b/site/src/pages/AuditPage/AuditLogRow/AuditLogRow.tsx @@ -6,14 +6,13 @@ import Tooltip from "@mui/material/Tooltip"; import type { AuditLog } from "api/typesGenerated"; import { Avatar } from "components/Avatar/Avatar"; import { DropdownArrow } from "components/DropdownArrow/DropdownArrow"; -import { Pill } from "components/Pill/Pill"; import { Stack } from "components/Stack/Stack"; +import { StatusPill } from "components/StatusPill/StatusPill"; import { TimelineEntry } from "components/Timeline/TimelineEntry"; import { InfoIcon } from "lucide-react"; import { NetworkIcon } from "lucide-react"; import { type FC, useState } from "react"; import { Link as RouterLink } from "react-router-dom"; -import type { ThemeRole } from "theme/roles"; import userAgentParser from "ua-parser-js"; import { AuditLogDescription } from "./AuditLogDescription/AuditLogDescription"; import { AuditLogDiff } from "./AuditLogDiff/AuditLogDiff"; @@ -22,21 +21,6 @@ import { determineIdPSyncMappingDiff, } from "./AuditLogDiff/auditUtils"; -const httpStatusColor = (httpStatus: number): ThemeRole => { - // Treat server errors (500) as errors - if (httpStatus >= 500) { - return "error"; - } - - // Treat client errors (400) as warnings - if (httpStatus >= 400) { - return "warning"; - } - - // OK (200) and redirects (300) are successful - return "success"; -}; - interface AuditLogRowProps { auditLog: AuditLog; // Useful for Storybook @@ -139,7 +123,7 @@ export const AuditLogRow: FC = ({ - + {/* With multi-org, there is not enough space so show everything in a tooltip. */} @@ -243,19 +227,6 @@ export const AuditLogRow: FC = ({ ); }; -function StatusPill({ code }: { code: number }) { - const isHttp = code >= 100; - - return ( - - {code.toString()} - - ); -} - const styles = { auditLogCell: { padding: "0 !important", @@ -311,14 +282,6 @@ const styles = { width: "100%", }, - statusCodePill: { - fontSize: 10, - height: 20, - paddingLeft: 10, - paddingRight: 10, - fontWeight: 600, - }, - deletedLabel: (theme) => ({ ...(theme.typography.caption as CSSObject), color: theme.palette.text.secondary, diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogFilter.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogFilter.tsx new file mode 100644 index 0000000000000..9d049c4e6865b --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogFilter.tsx @@ -0,0 +1,157 @@ +import { ConnectionLogStatuses, ConnectionTypes } from "api/typesGenerated"; +import { Filter, MenuSkeleton, type useFilter } from "components/Filter/Filter"; +import { + SelectFilter, + type SelectFilterOption, +} from "components/Filter/SelectFilter"; +import { type UserFilterMenu, UserMenu } from "components/Filter/UserFilter"; +import { + type UseFilterMenuOptions, + useFilterMenu, +} from "components/Filter/menu"; +import capitalize from "lodash/capitalize"; +import { + type OrganizationsFilterMenu, + OrganizationsMenu, +} from "modules/tableFiltering/options"; +import type { FC } from "react"; +import { connectionTypeToFriendlyName } from "utils/connection"; +import { docs } from "utils/docs"; + +const PRESET_FILTERS = [ + { + query: "status:connected type:ssh", + name: "Active SSH connections", + }, +]; + +interface ConnectionLogFilterProps { + filter: ReturnType; + error?: unknown; + menus: { + user: UserFilterMenu; + status: StatusFilterMenu; + type: TypeFilterMenu; + // The organization menu is only provided in a multi-org setup. + organization?: OrganizationsFilterMenu; + }; +} + +export const ConnectionLogFilter: FC = ({ + filter, + error, + menus, +}) => { + const width = menus.organization ? 175 : undefined; + + return ( + + + + + {menus.organization && ( + + )} + + } + optionsSkeleton={ + <> + + + + {menus.organization && } + + } + /> + ); +}; + +export const useStatusFilterMenu = ({ + value, + onChange, +}: Pick) => { + const statusOptions: SelectFilterOption[] = ConnectionLogStatuses.map( + (status) => ({ + value: status, + label: capitalize(status), + }), + ); + return useFilterMenu({ + onChange, + value, + id: "status", + getSelectedOption: async () => + statusOptions.find((option) => option.value === value) ?? null, + getOptions: async () => statusOptions, + }); +}; + +type StatusFilterMenu = ReturnType; + +interface StatusMenuProps { + menu: StatusFilterMenu; + width?: number; +} + +const StatusMenu: FC = ({ menu, width }) => { + return ( + + ); +}; + +export const useTypeFilterMenu = ({ + value, + onChange, +}: Pick) => { + const typeOptions: SelectFilterOption[] = ConnectionTypes.map((type) => { + const label: string = connectionTypeToFriendlyName(type); + return { + value: type, + label, + }; + }); + return useFilterMenu({ + onChange, + value, + id: "connection_type", + getSelectedOption: async () => + typeOptions.find((option) => option.value === value) ?? null, + getOptions: async () => typeOptions, + }); +}; + +type TypeFilterMenu = ReturnType; + +interface TypeMenuProps { + menu: TypeFilterMenu; + width?: number; +} + +const TypeMenu: FC = ({ menu, width }) => { + return ( + + ); +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogHelpTooltip.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogHelpTooltip.tsx new file mode 100644 index 0000000000000..be87c6e8a8b17 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogHelpTooltip.tsx @@ -0,0 +1,35 @@ +import { + HelpTooltip, + HelpTooltipContent, + HelpTooltipLink, + HelpTooltipLinksGroup, + HelpTooltipText, + HelpTooltipTitle, + HelpTooltipTrigger, +} from "components/HelpTooltip/HelpTooltip"; +import type { FC } from "react"; +import { docs } from "utils/docs"; + +const Language = { + title: "Why are some events missing?", + body: "The connection log is a best-effort log of workspace access. Some events are reported by workspace agents, and receipt of these events by the server is not guaranteed.", + docs: "Connection log documentation", +}; + +export const ConnectionLogHelpTooltip: FC = () => { + return ( + + + + + {Language.title} + {Language.body} + + + {Language.docs} + + + + + ); +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogPage.test.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogPage.test.tsx new file mode 100644 index 0000000000000..7beea3f033e30 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogPage.test.tsx @@ -0,0 +1,129 @@ +import { screen, waitFor } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { API } from "api/api"; +import { DEFAULT_RECORDS_PER_PAGE } from "components/PaginationWidget/utils"; +import { http, HttpResponse } from "msw"; +import { + MockConnectedSSHConnectionLog, + MockDisconnectedSSHConnectionLog, + MockEntitlementsWithConnectionLog, +} from "testHelpers/entities"; +import { + renderWithAuth, + waitForLoaderToBeRemoved, +} from "testHelpers/renderHelpers"; +import { server } from "testHelpers/server"; +import * as CreateDayString from "utils/createDayString"; +import ConnectionLogPage from "./ConnectionLogPage"; + +interface RenderPageOptions { + filter?: string; + page?: number; +} + +const renderPage = async ({ filter, page }: RenderPageOptions = {}) => { + let route = "/connectionlog"; + const params = new URLSearchParams(); + + if (filter) { + params.set("filter", filter); + } + + if (page) { + params.set("page", page.toString()); + } + + if (Array.from(params).length > 0) { + route += `?${params.toString()}`; + } + + renderWithAuth(, { + route, + path: "/connectionlog", + }); + await waitForLoaderToBeRemoved(); +}; + +describe("ConnectionLogPage", () => { + beforeEach(() => { + // Mocking the dayjs module within the createDayString file + const mock = jest.spyOn(CreateDayString, "createDayString"); + mock.mockImplementation(() => "a minute ago"); + + // Mock the entitlements + server.use( + http.get("/api/v2/entitlements", () => { + return HttpResponse.json(MockEntitlementsWithConnectionLog); + }), + ); + }); + + it("renders page 5", async () => { + // Given + const page = 5; + const getConnectionLogsSpy = jest + .spyOn(API, "getConnectionLogs") + .mockResolvedValue({ + connection_logs: [ + MockConnectedSSHConnectionLog, + MockDisconnectedSSHConnectionLog, + ], + count: 2, + }); + + // When + await renderPage({ page: page }); + + // Then + expect(getConnectionLogsSpy).toHaveBeenCalledWith({ + limit: DEFAULT_RECORDS_PER_PAGE, + offset: DEFAULT_RECORDS_PER_PAGE * (page - 1), + q: "", + }); + screen.getByTestId( + `connection-log-row-${MockConnectedSSHConnectionLog.id}`, + ); + screen.getByTestId( + `connection-log-row-${MockDisconnectedSSHConnectionLog.id}`, + ); + }); + + describe("Filtering", () => { + it("filters by URL", async () => { + const getConnectionLogsSpy = jest + .spyOn(API, "getConnectionLogs") + .mockResolvedValue({ + connection_logs: [MockConnectedSSHConnectionLog], + count: 1, + }); + + const query = "type:ssh status:connected"; + await renderPage({ filter: query }); + + expect(getConnectionLogsSpy).toHaveBeenCalledWith({ + limit: DEFAULT_RECORDS_PER_PAGE, + offset: 0, + q: query, + }); + }); + + it("resets page to 1 when filter is changed", async () => { + await renderPage({ page: 2 }); + + const getConnectionLogsSpy = jest.spyOn(API, "getConnectionLogs"); + getConnectionLogsSpy.mockClear(); + + const filterField = screen.getByLabelText("Filter"); + const query = "type:ssh status:connected"; + await userEvent.type(filterField, query); + + await waitFor(() => + expect(getConnectionLogsSpy).toHaveBeenCalledWith({ + limit: DEFAULT_RECORDS_PER_PAGE, + offset: 0, + q: query, + }), + ); + }); + }); +}); diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogPage.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogPage.tsx new file mode 100644 index 0000000000000..9cd27bac95bf4 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogPage.tsx @@ -0,0 +1,99 @@ +import { paginatedConnectionLogs } from "api/queries/connectionlog"; +import { useFilter } from "components/Filter/Filter"; +import { useUserFilterMenu } from "components/Filter/UserFilter"; +import { isNonInitialPage } from "components/PaginationWidget/utils"; +import { usePaginatedQuery } from "hooks/usePaginatedQuery"; +import { useDashboard } from "modules/dashboard/useDashboard"; +import { useFeatureVisibility } from "modules/dashboard/useFeatureVisibility"; +import { useOrganizationsFilterMenu } from "modules/tableFiltering/options"; +import type { FC } from "react"; +import { Helmet } from "react-helmet-async"; +import { useSearchParams } from "react-router-dom"; +import { pageTitle } from "utils/page"; +import { useStatusFilterMenu, useTypeFilterMenu } from "./ConnectionLogFilter"; +import { ConnectionLogPageView } from "./ConnectionLogPageView"; + +const ConnectionLogPage: FC = () => { + const feats = useFeatureVisibility(); + + // The "else false" is required if connection_log is undefined, which may + // happen if the license is removed. + // + // see: https://github.com/coder/coder/issues/14798 + const isConnectionLogVisible = feats.connection_log || false; + + const { showOrganizations } = useDashboard(); + + const [searchParams, setSearchParams] = useSearchParams(); + const connectionlogsQuery = usePaginatedQuery( + paginatedConnectionLogs(searchParams), + ); + const filter = useFilter({ + searchParamsResult: [searchParams, setSearchParams], + onUpdate: connectionlogsQuery.goToFirstPage, + }); + + const userMenu = useUserFilterMenu({ + value: filter.values.workspace_owner, + onChange: (option) => + filter.update({ + ...filter.values, + workspace_owner: option?.value, + }), + }); + + const statusMenu = useStatusFilterMenu({ + value: filter.values.status, + onChange: (option) => + filter.update({ + ...filter.values, + status: option?.value, + }), + }); + + const typeMenu = useTypeFilterMenu({ + value: filter.values.type, + onChange: (option) => + filter.update({ + ...filter.values, + type: option?.value, + }), + }); + + const organizationsMenu = useOrganizationsFilterMenu({ + value: filter.values.organization, + onChange: (option) => + filter.update({ + ...filter.values, + organization: option?.value, + }), + }); + + return ( + <> + + {pageTitle("Connection Log")} + + + + + ); +}; + +export default ConnectionLogPage; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogPageView.stories.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogPageView.stories.tsx new file mode 100644 index 0000000000000..393127280409b --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogPageView.stories.tsx @@ -0,0 +1,95 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { + MockMenu, + getDefaultFilterProps, +} from "components/Filter/storyHelpers"; +import { + mockInitialRenderResult, + mockSuccessResult, +} from "components/PaginationWidget/PaginationContainer.mocks"; +import type { UsePaginatedQueryResult } from "hooks/usePaginatedQuery"; +import type { ComponentProps } from "react"; +import { chromaticWithTablet } from "testHelpers/chromatic"; +import { + MockConnectedSSHConnectionLog, + MockDisconnectedSSHConnectionLog, + MockUserOwner, +} from "testHelpers/entities"; +import { ConnectionLogPageView } from "./ConnectionLogPageView"; + +type FilterProps = ComponentProps["filterProps"]; + +const defaultFilterProps = getDefaultFilterProps({ + query: `username:${MockUserOwner.username}`, + values: { + username: MockUserOwner.username, + status: undefined, + type: undefined, + organization: undefined, + }, + menus: { + user: MockMenu, + status: MockMenu, + type: MockMenu, + }, +}); + +const meta: Meta = { + title: "pages/ConnectionLogPage", + component: ConnectionLogPageView, + args: { + connectionLogs: [ + MockConnectedSSHConnectionLog, + MockDisconnectedSSHConnectionLog, + ], + isConnectionLogVisible: true, + filterProps: defaultFilterProps, + }, +}; + +export default meta; +type Story = StoryObj; + +export const ConnectionLog: Story = { + parameters: { chromatic: chromaticWithTablet }, + args: { + connectionLogsQuery: mockSuccessResult, + }, +}; + +export const Loading: Story = { + args: { + connectionLogs: undefined, + isNonInitialPage: false, + connectionLogsQuery: mockInitialRenderResult, + }, +}; + +export const EmptyPage: Story = { + args: { + connectionLogs: [], + isNonInitialPage: true, + connectionLogsQuery: { + ...mockSuccessResult, + totalRecords: 0, + } as UsePaginatedQueryResult, + }, +}; + +export const NoLogs: Story = { + args: { + connectionLogs: [], + isNonInitialPage: false, + connectionLogsQuery: { + ...mockSuccessResult, + totalRecords: 0, + } as UsePaginatedQueryResult, + }, +}; + +export const NotVisible: Story = { + args: { + isConnectionLogVisible: false, + connectionLogsQuery: mockInitialRenderResult, + }, +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogPageView.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogPageView.tsx new file mode 100644 index 0000000000000..fe3840d098aaa --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogPageView.tsx @@ -0,0 +1,146 @@ +import Table from "@mui/material/Table"; +import TableBody from "@mui/material/TableBody"; +import TableCell from "@mui/material/TableCell"; +import TableContainer from "@mui/material/TableContainer"; +import TableRow from "@mui/material/TableRow"; +import type { ConnectionLog } from "api/typesGenerated"; +import { ChooseOne, Cond } from "components/Conditionals/ChooseOne"; +import { EmptyState } from "components/EmptyState/EmptyState"; +import { Margins } from "components/Margins/Margins"; +import { + PageHeader, + PageHeaderSubtitle, + PageHeaderTitle, +} from "components/PageHeader/PageHeader"; +import { + PaginationContainer, + type PaginationResult, +} from "components/PaginationWidget/PaginationContainer"; +import { Paywall } from "components/Paywall/Paywall"; +import { Stack } from "components/Stack/Stack"; +import { TableLoader } from "components/TableLoader/TableLoader"; +import { Timeline } from "components/Timeline/Timeline"; +import type { ComponentProps, FC } from "react"; +import { docs } from "utils/docs"; +import { ConnectionLogFilter } from "./ConnectionLogFilter"; +import { ConnectionLogHelpTooltip } from "./ConnectionLogHelpTooltip"; +import { ConnectionLogRow } from "./ConnectionLogRow/ConnectionLogRow"; + +const Language = { + title: "Connection Log", + subtitle: "View workspace connection events.", +}; + +interface ConnectionLogPageViewProps { + connectionLogs?: readonly ConnectionLog[]; + isNonInitialPage: boolean; + isConnectionLogVisible: boolean; + error?: unknown; + filterProps: ComponentProps; + connectionLogsQuery: PaginationResult; +} + +export const ConnectionLogPageView: FC = ({ + connectionLogs, + isNonInitialPage, + isConnectionLogVisible, + error, + filterProps, + connectionLogsQuery: paginationResult, +}) => { + const isLoading = + (connectionLogs === undefined || + paginationResult.totalRecords === undefined) && + !error; + + const isEmpty = !isLoading && connectionLogs?.length === 0; + + return ( + + + + + {Language.title} + + + + {Language.subtitle} + + + + + + + + + + + + {/* Error condition should just show an empty table. */} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {connectionLogs && ( + new Date(log.connect_time)} + row={(log) => ( + + )} + /> + )} + + + +
+
+
+
+ + + + +
+
+ ); +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.stories.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.stories.tsx new file mode 100644 index 0000000000000..8c8263e7dbc68 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.stories.tsx @@ -0,0 +1,105 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { + MockConnectedSSHConnectionLog, + MockWebConnectionLog, +} from "testHelpers/entities"; +import { ConnectionLogDescription } from "./ConnectionLogDescription"; + +const meta: Meta = { + title: "pages/ConnectionLogPage/ConnectionLogDescription", + component: ConnectionLogDescription, +}; + +export default meta; +type Story = StoryObj; + +export const SSH: Story = { + args: { + connectionLog: MockConnectedSSHConnectionLog, + }, +}; + +export const App: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + }, + }, +}; + +export const AppUnauthenticated: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + web_info: { + ...MockWebConnectionLog.web_info!, + user: null, + }, + }, + }, +}; + +export const AppAuthenticatedFail: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + web_info: { + ...MockWebConnectionLog.web_info!, + status_code: 404, + }, + }, + }, +}; + +export const PortForwardingAuthenticated: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + type: "port_forwarding", + web_info: { + ...MockWebConnectionLog.web_info!, + slug_or_port: "8080", + }, + }, + }, +}; + +export const AppUnauthenticatedRedirect: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + web_info: { + ...MockWebConnectionLog.web_info!, + user: null, + status_code: 303, + }, + }, + }, +}; + +export const VSCode: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + type: "vscode", + }, + }, +}; + +export const JetBrains: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + type: "jetbrains", + }, + }, +}; + +export const WebTerminal: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + type: "reconnecting_pty", + }, + }, +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.tsx new file mode 100644 index 0000000000000..b862134624189 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogDescription/ConnectionLogDescription.tsx @@ -0,0 +1,95 @@ +import Link from "@mui/material/Link"; +import type { ConnectionLog } from "api/typesGenerated"; +import type { FC, ReactNode } from "react"; +import { Link as RouterLink } from "react-router-dom"; +import { connectionTypeToFriendlyName } from "utils/connection"; + +interface ConnectionLogDescriptionProps { + connectionLog: ConnectionLog; +} + +export const ConnectionLogDescription: FC = ({ + connectionLog, +}) => { + const { type, workspace_owner_username, workspace_name, web_info } = + connectionLog; + + switch (type) { + case "port_forwarding": + case "workspace_app": { + if (!web_info) return null; + + const { user, slug_or_port, status_code } = web_info; + const isPortForward = type === "port_forwarding"; + const presentAction = isPortForward ? "access" : "open"; + const pastAction = isPortForward ? "accessed" : "opened"; + + const target: ReactNode = isPortForward ? ( + <> + port {slug_or_port} + + ) : ( + {slug_or_port} + ); + + const actionText: ReactNode = (() => { + if (status_code === 303) { + return ( + <> + was redirected attempting to {presentAction} {target} + + ); + } + if ((status_code ?? 0) >= 400) { + return ( + <> + unsuccessfully attempted to {presentAction} {target} + + ); + } + return ( + <> + {pastAction} {target} + + ); + })(); + + const isOwnWorkspace = user + ? workspace_owner_username === user.username + : false; + + return ( + + {user ? user.username : "Unauthenticated user"} {actionText} in{" "} + {isOwnWorkspace ? "their" : `${workspace_owner_username}'s`}{" "} + + {workspace_name} + {" "} + workspace + + ); + } + + case "reconnecting_pty": + case "ssh": + case "jetbrains": + case "vscode": { + const friendlyType = connectionTypeToFriendlyName(type); + return ( + + {friendlyType} session to {workspace_owner_username}'s{" "} + + {workspace_name} + {" "} + workspace{" "} + + ); + } + } +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.stories.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.stories.tsx new file mode 100644 index 0000000000000..4e9dd49ed3edf --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.stories.tsx @@ -0,0 +1,74 @@ +import TableContainer from "@mui/material/TableContainer"; +import type { Meta, StoryObj } from "@storybook/react"; +import { Table, TableBody } from "components/Table/Table"; +import { + MockConnectedSSHConnectionLog, + MockDisconnectedSSHConnectionLog, + MockWebConnectionLog, +} from "testHelpers/entities"; +import { ConnectionLogRow } from "./ConnectionLogRow"; + +const meta: Meta = { + title: "pages/ConnectionLogPage/ConnectionLogRow", + component: ConnectionLogRow, + decorators: [ + (Story) => ( + + + + + +
+
+ ), + ], +}; + +export default meta; +type Story = StoryObj; + +export const Web: Story = { + args: { + connectionLog: MockWebConnectionLog, + }, +}; + +export const WebUnauthenticatedFail: Story = { + args: { + connectionLog: { + ...MockWebConnectionLog, + web_info: { + status_code: 404, + user_agent: MockWebConnectionLog.web_info!.user_agent, + user: null, // Unauthenticated connection attempt + slug_or_port: MockWebConnectionLog.web_info!.slug_or_port, + }, + }, + }, +}; + +export const ConnectedSSH: Story = { + args: { + connectionLog: MockConnectedSSHConnectionLog, + }, +}; + +export const DisconnectedSSH: Story = { + args: { + connectionLog: { + ...MockDisconnectedSSHConnectionLog, + }, + }, +}; + +export const DisconnectedSSHError: Story = { + args: { + connectionLog: { + ...MockDisconnectedSSHConnectionLog, + ssh_info: { + ...MockDisconnectedSSHConnectionLog.ssh_info!, + exit_code: 130, // 128 + SIGINT + }, + }, + }, +}; diff --git a/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.tsx b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.tsx new file mode 100644 index 0000000000000..ac847cff73b39 --- /dev/null +++ b/site/src/pages/ConnectionLogPage/ConnectionLogRow/ConnectionLogRow.tsx @@ -0,0 +1,195 @@ +import type { CSSObject, Interpolation, Theme } from "@emotion/react"; +import Link from "@mui/material/Link"; +import TableCell from "@mui/material/TableCell"; +import Tooltip from "@mui/material/Tooltip"; +import type { ConnectionLog } from "api/typesGenerated"; +import { Avatar } from "components/Avatar/Avatar"; +import { Stack } from "components/Stack/Stack"; +import { StatusPill } from "components/StatusPill/StatusPill"; +import { TimelineEntry } from "components/Timeline/TimelineEntry"; +import { InfoIcon } from "lucide-react"; +import { NetworkIcon } from "lucide-react"; +import type { FC } from "react"; +import { Link as RouterLink } from "react-router-dom"; +import userAgentParser from "ua-parser-js"; +import { connectionTypeIsWeb } from "utils/connection"; +import { ConnectionLogDescription } from "./ConnectionLogDescription/ConnectionLogDescription"; + +interface ConnectionLogRowProps { + connectionLog: ConnectionLog; +} + +export const ConnectionLogRow: FC = ({ + connectionLog, +}) => { + const userAgent = connectionLog.web_info?.user_agent + ? userAgentParser(connectionLog.web_info?.user_agent) + : undefined; + const isWeb = connectionTypeIsWeb(connectionLog.type); + const code = + connectionLog.web_info?.status_code ?? connectionLog.ssh_info?.exit_code; + + return ( + + + + + {/* Non-web logs don't have an associated user, so we + * display a default network icon instead */} + {connectionLog.web_info?.user ? ( + + ) : ( + + + + )} + + + + + + {new Date(connectionLog.connect_time).toLocaleTimeString()} + {connectionLog.ssh_info?.disconnect_time && + ` → ${new Date(connectionLog.ssh_info.disconnect_time).toLocaleTimeString()}`} + + + + + {code !== undefined && ( + + )} + + {connectionLog.ip && ( +
+

IP:

+
{connectionLog.ip}
+
+ )} + {userAgent?.os.name && ( +
+

OS:

+
{userAgent.os.name}
+
+ )} + {userAgent?.browser.name && ( +
+

Browser:

+
+ {userAgent.browser.name} {userAgent.browser.version} +
+
+ )} + {connectionLog.organization && ( +
+

+ Organization: +

+ + {connectionLog.organization.display_name || + connectionLog.organization.name} + +
+ )} + {connectionLog.ssh_info?.disconnect_reason && ( +
+

+ Close Reason: +

+
{connectionLog.ssh_info?.disconnect_reason}
+
+ )} + + } + > + ({ + color: theme.palette.info.light, + })} + /> +
+
+
+
+
+
+
+ ); +}; + +const styles = { + connectionLogCell: { + padding: "0 !important", + border: 0, + }, + + connectionLogHeader: { + padding: "16px 32px", + }, + + connectionLogHeaderInfo: { + flex: 1, + }, + + connectionLogSummary: (theme) => ({ + ...(theme.typography.body1 as CSSObject), + fontFamily: "inherit", + }), + + connectionLogTime: (theme) => ({ + color: theme.palette.text.secondary, + fontSize: 12, + }), + + connectionLogInfoheader: (theme) => ({ + margin: 0, + color: theme.palette.text.primary, + fontSize: 14, + lineHeight: "150%", + fontWeight: 600, + }), + + connectionLogInfoTooltip: { + display: "flex", + flexDirection: "column", + gap: 8, + }, + + fullWidth: { + width: "100%", + }, +} satisfies Record>; diff --git a/site/src/pages/TerminalPage/TerminalAlerts.tsx b/site/src/pages/TerminalPage/TerminalAlerts.tsx index 07740135769f3..6a06a76964128 100644 --- a/site/src/pages/TerminalPage/TerminalAlerts.tsx +++ b/site/src/pages/TerminalPage/TerminalAlerts.tsx @@ -170,14 +170,16 @@ const TerminalAlert: FC = (props) => { ); }; +// Since the terminal connection is always trying to reconnect, we show this +// alert to indicate that the terminal is trying to connect. const DisconnectedAlert: FC = (props) => { return ( } > - Disconnected + Trying to connect... ); }; diff --git a/site/src/pages/TerminalPage/TerminalPage.test.tsx b/site/src/pages/TerminalPage/TerminalPage.test.tsx index 7600fa5257d43..4591190ad9904 100644 --- a/site/src/pages/TerminalPage/TerminalPage.test.tsx +++ b/site/src/pages/TerminalPage/TerminalPage.test.tsx @@ -85,7 +85,7 @@ describe("TerminalPage", () => { await expectTerminalText(container, Language.workspaceErrorMessagePrefix); }); - it("shows an error if the websocket fails", async () => { + it("shows reconnect message when websocket fails", async () => { server.use( http.get("/api/v2/workspaceagents/:agentId/pty", () => { return HttpResponse.json({}, { status: 500 }); @@ -94,7 +94,9 @@ describe("TerminalPage", () => { const { container } = await renderTerminal(); - await expectTerminalText(container, Language.websocketErrorMessagePrefix); + await waitFor(() => { + expect(container.textContent).toContain("Trying to connect..."); + }); }); it("renders data from the backend", async () => { diff --git a/site/src/pages/TerminalPage/TerminalPage.tsx b/site/src/pages/TerminalPage/TerminalPage.tsx index 2023bdb0eeb29..5c13e89c30005 100644 --- a/site/src/pages/TerminalPage/TerminalPage.tsx +++ b/site/src/pages/TerminalPage/TerminalPage.tsx @@ -26,6 +26,13 @@ import { openMaybePortForwardedURL } from "utils/portForward"; import { terminalWebsocketUrl } from "utils/terminal"; import { getMatchingAgentOrFirst } from "utils/workspace"; import { v4 as uuidv4 } from "uuid"; +// Use websocket-ts for better WebSocket handling and auto-reconnection. +import { + ExponentialBackoff, + type Websocket, + WebsocketBuilder, + WebsocketEvent, +} from "websocket-ts"; import { TerminalAlerts } from "./TerminalAlerts"; import type { ConnectionStatus } from "./types"; @@ -221,7 +228,7 @@ const TerminalPage: FC = () => { } // Hook up terminal events to the websocket. - let websocket: WebSocket | null; + let websocket: Websocket | null; const disposers = [ terminal.onData((data) => { websocket?.send( @@ -259,9 +266,11 @@ const TerminalPage: FC = () => { if (disposed) { return; // Unmounted while we waited for the async call. } - websocket = new WebSocket(url); + websocket = new WebsocketBuilder(url) + .withBackoff(new ExponentialBackoff(1000, 6)) + .build(); websocket.binaryType = "arraybuffer"; - websocket.addEventListener("open", () => { + websocket.addEventListener(WebsocketEvent.open, () => { // Now that we are connected, allow user input. terminal.options = { disableStdin: false, @@ -278,18 +287,16 @@ const TerminalPage: FC = () => { ); setConnectionStatus("connected"); }); - websocket.addEventListener("error", () => { + websocket.addEventListener(WebsocketEvent.error, (_, event) => { + console.error("WebSocket error:", event); terminal.options.disableStdin = true; - terminal.writeln( - `${Language.websocketErrorMessagePrefix}socket errored`, - ); setConnectionStatus("disconnected"); }); - websocket.addEventListener("close", () => { + websocket.addEventListener(WebsocketEvent.close, () => { terminal.options.disableStdin = true; setConnectionStatus("disconnected"); }); - websocket.addEventListener("message", (event) => { + websocket.addEventListener(WebsocketEvent.message, (_, event) => { if (typeof event.data === "string") { // This exclusively occurs when testing. // "jest-websocket-mock" doesn't support ArrayBuffer. @@ -298,12 +305,25 @@ const TerminalPage: FC = () => { terminal.write(new Uint8Array(event.data)); } }); + websocket.addEventListener(WebsocketEvent.reconnect, () => { + if (websocket) { + websocket.binaryType = "arraybuffer"; + websocket.send( + new TextEncoder().encode( + JSON.stringify({ + height: terminal.rows, + width: terminal.cols, + }), + ), + ); + } + }); }) .catch((error) => { if (disposed) { return; // Unmounted while we waited for the async call. } - terminal.writeln(Language.websocketErrorMessagePrefix + error.message); + console.error("WebSocket connection failed:", error); setConnectionStatus("disconnected"); }); diff --git a/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.stories.tsx b/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.stories.tsx new file mode 100644 index 0000000000000..e576e479d27c7 --- /dev/null +++ b/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.stories.tsx @@ -0,0 +1,93 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { getAuthorizationKey } from "api/queries/authCheck"; +import { templateByNameKey } from "api/queries/templates"; +import { workspaceByOwnerAndNameKey } from "api/queries/workspaces"; +import type { Workspace } from "api/typesGenerated"; +import { + reactRouterNestedAncestors, + reactRouterParameters, +} from "storybook-addon-remix-react-router"; +import { + MockPrebuiltWorkspace, + MockTemplate, + MockUserOwner, + MockWorkspace, +} from "testHelpers/entities"; +import { withAuthProvider, withDashboardProvider } from "testHelpers/storybook"; +import { WorkspaceSettingsLayout } from "../WorkspaceSettingsLayout"; +import WorkspaceSchedulePage from "./WorkspaceSchedulePage"; + +const meta = { + title: "pages/WorkspaceSchedulePage", + component: WorkspaceSchedulePage, + decorators: [withAuthProvider, withDashboardProvider], + parameters: { + layout: "fullscreen", + user: MockUserOwner, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const RegularWorkspace: Story = { + parameters: { + reactRouter: workspaceRouterParameters(MockWorkspace), + queries: workspaceQueries(MockWorkspace), + }, +}; + +export const PrebuiltWorkspace: Story = { + parameters: { + reactRouter: workspaceRouterParameters(MockPrebuiltWorkspace), + queries: workspaceQueries(MockPrebuiltWorkspace), + }, +}; + +function workspaceRouterParameters(workspace: Workspace) { + return reactRouterParameters({ + location: { + pathParams: { + username: `@${workspace.owner_name}`, + workspace: workspace.name, + }, + }, + routing: reactRouterNestedAncestors( + { + path: "/:username/:workspace/settings/schedule", + }, + , + ), + }); +} + +function workspaceQueries(workspace: Workspace) { + return [ + { + key: workspaceByOwnerAndNameKey(workspace.owner_name, workspace.name), + data: workspace, + }, + { + key: getAuthorizationKey({ + checks: { + updateWorkspace: { + object: { + resource_type: "workspace", + resource_id: MockWorkspace.id, + owner_id: MockWorkspace.owner_id, + }, + action: "update", + }, + }, + }), + data: { updateWorkspace: true }, + }, + { + key: templateByNameKey( + MockWorkspace.organization_id, + MockWorkspace.template_name, + ), + data: MockTemplate, + }, + ]; +} diff --git a/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.tsx b/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.tsx index 597b20173fafa..4c8526a4cda6b 100644 --- a/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.tsx +++ b/site/src/pages/WorkspaceSettingsPage/WorkspaceSchedulePage/WorkspaceSchedulePage.tsx @@ -7,6 +7,7 @@ import { Alert } from "components/Alert/Alert"; import { ErrorAlert } from "components/Alert/ErrorAlert"; import { ConfirmDialog } from "components/Dialogs/ConfirmDialog/ConfirmDialog"; import { displayError, displaySuccess } from "components/GlobalSnackbar/utils"; +import { Link } from "components/Link/Link"; import { Loader } from "components/Loader/Loader"; import { PageHeader, PageHeaderTitle } from "components/PageHeader/PageHeader"; import dayjs from "dayjs"; @@ -20,6 +21,7 @@ import { type FC, useState } from "react"; import { Helmet } from "react-helmet-async"; import { useMutation, useQuery, useQueryClient } from "react-query"; import { useNavigate, useParams } from "react-router-dom"; +import { docs } from "utils/docs"; import { pageTitle } from "utils/page"; import { WorkspaceScheduleForm } from "./WorkspaceScheduleForm"; import { @@ -32,7 +34,7 @@ const permissionsToCheck = (workspace: TypesGen.Workspace) => updateWorkspace: { object: { resource_type: "workspace", - resourceId: workspace.id, + resource_id: workspace.id, owner_id: workspace.owner_id, }, action: "update", @@ -94,42 +96,62 @@ const WorkspaceSchedulePage: FC = () => { )} - {template && ( - { - navigate(`/@${username}/${workspaceName}`); - }} - onSubmit={async (values) => { - const data = { - workspace, - autostart: formValuesToAutostartRequest(values), - ttl: formValuesToTTLRequest(values), - autostartChanged: scheduleChanged( - getAutostart(workspace), - values, - ), - autostopChanged: scheduleChanged(getAutostop(workspace), values), - }; - - await submitScheduleMutation.mutateAsync(data); - - if ( - data.autostopChanged && - getAutostop(workspace).autostopEnabled - ) { - setIsConfirmingApply(true); - } - }} - /> - )} + {template && + (workspace.is_prebuild ? ( + + Prebuilt workspaces ignore workspace-level scheduling until they are + claimed. For prebuilt workspace specific scheduling refer to the{" "} + + Prebuilt Workspaces Scheduling + + documentation page. + + ) : ( + { + navigate(`/@${username}/${workspaceName}`); + }} + onSubmit={async (values) => { + const data = { + workspace, + autostart: formValuesToAutostartRequest(values), + ttl: formValuesToTTLRequest(values), + autostartChanged: scheduleChanged( + getAutostart(workspace), + values, + ), + autostopChanged: scheduleChanged( + getAutostop(workspace), + values, + ), + }; + + await submitScheduleMutation.mutateAsync(data); + + if ( + data.autostopChanged && + getAutostop(workspace).autostopEnabled + ) { + setIsConfirmingApply(true); + } + }} + /> + ))} } /> + } /> + } /> }> diff --git a/site/src/testHelpers/entities.ts b/site/src/testHelpers/entities.ts index 22dc47ae2390f..045d6ad06ddeb 100644 --- a/site/src/testHelpers/entities.ts +++ b/site/src/testHelpers/entities.ts @@ -1411,6 +1411,14 @@ export const MockWorkspace: TypesGen.Workspace = { deleting_at: null, dormant_at: null, next_start_at: null, + is_prebuild: false, +}; + +export const MockPrebuiltWorkspace = { + ...MockWorkspace, + owner_name: "prebuilds", + name: "prebuilt-workspace", + is_prebuild: true, }; export const MockFavoriteWorkspace: TypesGen.Workspace = { @@ -2450,6 +2458,21 @@ export const MockEntitlementsWithAuditLog: TypesGen.Entitlements = { }), }; +export const MockEntitlementsWithConnectionLog: TypesGen.Entitlements = { + errors: [], + warnings: [], + has_license: true, + require_telemetry: false, + trial: false, + refreshed_at: "2022-05-20T16:45:57.122Z", + features: withDefaultFeatures({ + connection_log: { + enabled: true, + entitlement: "entitled", + }, + }), +}; + export const MockEntitlementsWithScheduling: TypesGen.Entitlements = { errors: [], warnings: [], @@ -2718,6 +2741,79 @@ export const MockAuditLogRequestPasswordReset: TypesGen.AuditLog = { }, }; +export const MockWebConnectionLog: TypesGen.ConnectionLog = { + id: "497dcba3-ecbf-4587-a2dd-5eb0665e6880", + connect_time: "2022-05-19T16:45:57.122Z", + organization: { + id: MockOrganization.id, + name: MockOrganization.name, + display_name: MockOrganization.display_name, + icon: MockOrganization.icon, + }, + workspace_owner_id: MockUserMember.id, + workspace_owner_username: MockUserMember.username, + workspace_id: MockWorkspace.id, + workspace_name: MockWorkspace.name, + agent_name: "dev", + ip: "127.0.0.1", + type: "workspace_app", + web_info: { + user_agent: + '"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"', + user: MockUserMember, + slug_or_port: "code-server", + status_code: 200, + }, +}; + +export const MockConnectedSSHConnectionLog: TypesGen.ConnectionLog = { + id: "7884a866-4ae1-4945-9fba-b2b8d2b7c5a9", + connect_time: "2022-05-19T16:45:57.122Z", + organization: { + id: MockOrganization.id, + name: MockOrganization.name, + display_name: MockOrganization.display_name, + icon: MockOrganization.icon, + }, + workspace_owner_id: MockUserMember.id, + workspace_owner_username: MockUserMember.username, + workspace_id: MockWorkspace.id, + workspace_name: MockWorkspace.name, + agent_name: "dev", + ip: "127.0.0.1", + type: "ssh", + ssh_info: { + connection_id: "026c8c11-fc5c-4df8-a286-5fe6d7f54f98", + disconnect_reason: undefined, + disconnect_time: undefined, + exit_code: undefined, + }, +}; + +export const MockDisconnectedSSHConnectionLog: TypesGen.ConnectionLog = { + id: "893e75e0-1518-4ac8-9629-35923a39533a", + connect_time: "2022-05-19T16:45:57.122Z", + organization: { + id: MockOrganization.id, + name: MockOrganization.name, + display_name: MockOrganization.display_name, + icon: MockOrganization.icon, + }, + workspace_owner_id: MockUserMember.id, + workspace_owner_username: MockUserMember.username, + workspace_id: MockWorkspace.id, + workspace_name: MockWorkspace.name, + agent_name: "dev", + ip: "127.0.0.1", + type: "ssh", + ssh_info: { + connection_id: "026c8c11-fc5c-4df8-a286-5fe6d7f54f98", + disconnect_reason: "server shut down", + disconnect_time: "2022-05-19T16:49:57.122Z", + exit_code: 0, + }, +}; + export const MockWorkspaceQuota: TypesGen.WorkspaceQuota = { credits_consumed: 0, budget: 100, @@ -2882,6 +2978,7 @@ export const MockPermissions: Permissions = { viewAllUsers: true, updateUsers: true, viewAnyAuditLog: true, + viewAnyConnectionLog: true, viewDeploymentConfig: true, editDeploymentConfig: true, viewDeploymentStats: true, @@ -2909,6 +3006,7 @@ export const MockNoPermissions: Permissions = { viewAllUsers: false, updateUsers: false, viewAnyAuditLog: false, + viewAnyConnectionLog: false, viewDeploymentConfig: false, editDeploymentConfig: false, viewDeploymentStats: false, diff --git a/site/src/utils/connection.ts b/site/src/utils/connection.ts new file mode 100644 index 0000000000000..0150fa333e158 --- /dev/null +++ b/site/src/utils/connection.ts @@ -0,0 +1,33 @@ +import type { ConnectionType } from "api/typesGenerated"; + +export const connectionTypeToFriendlyName = (type: ConnectionType): string => { + switch (type) { + case "jetbrains": + return "JetBrains"; + case "reconnecting_pty": + return "Web Terminal"; + case "ssh": + return "SSH"; + case "vscode": + return "VS Code"; + case "port_forwarding": + return "Port Forwarding"; + case "workspace_app": + return "Workspace App"; + } +}; + +export const connectionTypeIsWeb = (type: ConnectionType): boolean => { + switch (type) { + case "port_forwarding": + case "workspace_app": { + return true; + } + case "reconnecting_pty": + case "ssh": + case "jetbrains": + case "vscode": { + return false; + } + } +}; diff --git a/site/src/utils/http.ts b/site/src/utils/http.ts new file mode 100644 index 0000000000000..5ea00dbd18e01 --- /dev/null +++ b/site/src/utils/http.ts @@ -0,0 +1,16 @@ +import type { ThemeRole } from "theme/roles"; + +export const httpStatusColor = (httpStatus: number): ThemeRole => { + // Treat server errors (500) as errors + if (httpStatus >= 500) { + return "error"; + } + + // Treat client errors (400) as warnings + if (httpStatus >= 400) { + return "warning"; + } + + // OK (200) and redirects (300) are successful + return "success"; +};