diff --git a/.github/actions/setup-go/action.yaml b/.github/actions/setup-go/action.yaml index c53ce28c3c591..e3610c76d4085 100644 --- a/.github/actions/setup-go/action.yaml +++ b/.github/actions/setup-go/action.yaml @@ -4,12 +4,12 @@ description: | inputs: version: description: "The Go version to use." - default: "1.22.3" + default: "1.22.4" runs: using: "composite" steps: - name: Setup Go - uses: buildjet/setup-go@v5 + uses: actions/setup-go@v5 with: go-version: ${{ inputs.version }} diff --git a/.github/actions/setup-node/action.yaml b/.github/actions/setup-node/action.yaml index c0a5477ec143b..9d439a67bb499 100644 --- a/.github/actions/setup-node/action.yaml +++ b/.github/actions/setup-node/action.yaml @@ -15,7 +15,7 @@ runs: with: version: 8 - name: Setup Node - uses: buildjet/setup-node@v4.0.1 + uses: actions/setup-node@v4.0.1 with: node-version: 18.19.0 # See https://github.com/actions/setup-node#caching-global-packages-data diff --git a/.github/actions/setup-tf/action.yaml b/.github/actions/setup-tf/action.yaml index 0fa40bdbfdefc..e660e6f3c3f5f 100644 --- a/.github/actions/setup-tf/action.yaml +++ b/.github/actions/setup-tf/action.yaml @@ -7,5 +7,5 @@ runs: - name: Install Terraform uses: hashicorp/setup-terraform@v3 with: - terraform_version: 1.7.5 + terraform_version: 1.8.4 terraform_wrapper: false diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index fecbe9ba959cb..0f8f8849a84c2 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -61,7 +61,9 @@ updates: - dependency-name: "terraform" - package-ecosystem: "npm" - directory: "/site/" + directories: + - "/site" + - "/offlinedocs" schedule: interval: "monthly" time: "06:00" @@ -82,33 +84,3 @@ updates: update-types: - version-update:semver-major open-pull-requests-limit: 15 - groups: - site: - patterns: - - "*" - - - package-ecosystem: "npm" - directory: "/offlinedocs/" - schedule: - interval: "monthly" - time: "06:00" - timezone: "America/Chicago" - reviewers: - - "coder/ts" - commit-message: - prefix: "chore" - labels: [] - ignore: - # Ignore patch updates for all dependencies - - dependency-name: "*" - update-types: - - version-update:semver-patch - # Ignore major updates to Node.js types, because they need to - # correspond to the Node.js engine version - - dependency-name: "@types/node" - update-types: - - version-update:semver-major - groups: - offlinedocs: - patterns: - - "*" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e092cef28ab02..bcb58924e7cba 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -120,7 +120,7 @@ jobs: update-flake: needs: changes if: needs.changes.outputs.gomod == 'true' - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} steps: - name: Checkout uses: actions/checkout@v4 @@ -139,7 +139,7 @@ jobs: lint: needs: changes if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} steps: - name: Checkout uses: actions/checkout@v4 @@ -160,7 +160,7 @@ jobs: echo "LINT_CACHE_DIR=$dir" >> $GITHUB_ENV - name: golangci-lint cache - uses: buildjet/cache@v4 + uses: actions/cache@v4 with: path: | ${{ env.LINT_CACHE_DIR }} @@ -170,7 +170,7 @@ jobs: # Check for any typos - name: Check for typos - uses: crate-ci/typos@v1.21.0 + uses: crate-ci/typos@v1.22.9 with: config: .github/workflows/typos.toml @@ -191,9 +191,15 @@ jobs: run: | make --output-sync=line -j lint + - name: Check workflow files + run: | + bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) 1.6.22 + ./actionlint -color -shellcheck= -ignore "set-output" + shell: bash + gen: timeout-minutes: 8 - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: changes if: needs.changes.outputs.docs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' steps: @@ -243,7 +249,7 @@ jobs: fmt: needs: changes if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} timeout-minutes: 7 steps: - name: Checkout @@ -254,12 +260,9 @@ jobs: - name: Setup Node uses: ./.github/actions/setup-node + # Use default Go version - name: Setup Go - uses: buildjet/setup-go@v5 - with: - # This doesn't need caching. It's super fast anyways! - cache: false - go-version: 1.21.9 + uses: ./.github/actions/setup-go - name: Install shfmt run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0 @@ -273,7 +276,7 @@ jobs: run: ./scripts/check_unstaged.sh test-go: - runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'buildjet-4vcpu-ubuntu-2204' || matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'macos-latest-xlarge' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'windows-latest-16-cores' || matrix.os }} + runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'macos-latest-xlarge' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'windows-latest-16-cores' || matrix.os }} needs: changes if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' timeout-minutes: 20 @@ -329,7 +332,7 @@ jobs: api-key: ${{ secrets.DATADOG_API_KEY }} test-go-pg: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: - changes if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' @@ -351,8 +354,50 @@ jobs: uses: ./.github/actions/setup-tf - name: Test with PostgreSQL Database + env: + POSTGRES_VERSION: "13" + TS_DEBUG_DISCO: "true" + run: | + make test-postgres + + - name: Upload test stats to Datadog + timeout-minutes: 1 + continue-on-error: true + uses: ./.github/actions/upload-datadog + if: success() || failure() + with: + api-key: ${{ secrets.DATADOG_API_KEY }} + + # NOTE: this could instead be defined as a matrix strategy, but we want to + # only block merging if tests on postgres 13 fail. Using a matrix strategy + # here makes the check in the above `required` job rather complicated. + test-go-pg-16: + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} + needs: + - changes + if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' + # This timeout must be greater than the timeout set by `go test` in + # `make test-postgres` to ensure we receive a trace of running + # goroutines. Setting this to the timeout +5m should work quite well + # even if some of the preceding steps are slow. + timeout-minutes: 25 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Setup Go + uses: ./.github/actions/setup-go + + - name: Setup Terraform + uses: ./.github/actions/setup-tf + + - name: Test with PostgreSQL Database + env: + POSTGRES_VERSION: "16" + TS_DEBUG_DISCO: "true" run: | - export TS_DEBUG_DISCO=true make test-postgres - name: Upload test stats to Datadog @@ -364,7 +409,7 @@ jobs: api-key: ${{ secrets.DATADOG_API_KEY }} test-go-race: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: changes if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' timeout-minutes: 25 @@ -399,7 +444,7 @@ jobs: # These tests are skipped in the main go test jobs because they require root # and mess with networking. test-go-tailnet-integration: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: changes # Unnecessary to run on main for now if: needs.changes.outputs.tailnet-integration == 'true' || needs.changes.outputs.ci == 'true' @@ -421,7 +466,7 @@ jobs: run: make test-tailnet-integration test-js: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: changes if: needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' timeout-minutes: 20 @@ -438,7 +483,7 @@ jobs: working-directory: site test-e2e: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-16vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-16' || 'ubuntu-latest' }} needs: changes if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' timeout-minutes: 20 @@ -582,7 +627,7 @@ jobs: offlinedocs: name: offlinedocs needs: changes - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true' || needs.changes.outputs.docs == 'true' steps: @@ -680,11 +725,10 @@ jobs: build: # This builds and publishes ghcr.io/coder/coder-preview:main for each commit - # to main branch. We are only building this for amd64 platform. (>95% pulls - # are for amd64) + # to main branch. needs: changes if: github.ref == 'refs/heads/main' && needs.changes.outputs.docs-only == 'false' && !github.event.pull_request.head.repo.fork - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} env: DOCKER_CLI_EXPERIMENTAL: "enabled" outputs: @@ -890,7 +934,7 @@ jobs: # runs sqlc-vet to ensure all queries are valid. This catches any mistakes # in migrations or sqlc queries that makes a query unable to be prepared. sqlc-vet: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} needs: changes if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main' steps: @@ -922,7 +966,7 @@ jobs: uses: actions/dependency-review-action@v4.3.2 with: allow-licenses: Apache-2.0, BSD-2-Clause, BSD-3-Clause, CC0-1.0, ISC, MIT, MIT-0, MPL-2.0 - allow-dependencies-licenses: "pkg:golang/github.com/coder/wgtunnel@0.1.13-0.20240522110300-ade90dfb2da0" + allow-dependencies-licenses: "pkg:golang/github.com/coder/wgtunnel@0.1.13-0.20240522110300-ade90dfb2da0, pkg:npm/pako@1.0.11" license-check: true vulnerability-check: false - name: "Report" diff --git a/.github/workflows/nightly-gauntlet.yaml b/.github/workflows/nightly-gauntlet.yaml index 592abe921c013..4d04f824e9cfc 100644 --- a/.github/workflows/nightly-gauntlet.yaml +++ b/.github/workflows/nightly-gauntlet.yaml @@ -11,7 +11,7 @@ jobs: # While GitHub's toaster runners are likelier to flake, we want consistency # between this environment and the regular test environment for DataDog # statistics and to only show real workflow threats. - runs-on: "buildjet-8vcpu-ubuntu-2204" + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} # This runner costs 0.016 USD per minute, # so 0.016 * 240 = 3.84 USD per run. timeout-minutes: 240 @@ -40,7 +40,7 @@ jobs: go-timing: # We run these tests with p=1 so we don't need a lot of compute. - runs-on: "buildjet-2vcpu-ubuntu-2204" + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04' || 'ubuntu-latest' }} timeout-minutes: 10 steps: - name: Checkout diff --git a/.github/workflows/pr-auto-assign.yaml b/.github/workflows/pr-auto-assign.yaml index e042124d04d14..d8210637f1061 100644 --- a/.github/workflows/pr-auto-assign.yaml +++ b/.github/workflows/pr-auto-assign.yaml @@ -14,4 +14,4 @@ jobs: runs-on: ubuntu-latest steps: - name: Assign author - uses: toshimaru/auto-author-assign@v2.1.0 + uses: toshimaru/auto-author-assign@v2.1.1 diff --git a/.github/workflows/pr-deploy.yaml b/.github/workflows/pr-deploy.yaml index 68693fe29ce04..5fff7cafe0d25 100644 --- a/.github/workflows/pr-deploy.yaml +++ b/.github/workflows/pr-deploy.yaml @@ -189,7 +189,7 @@ jobs: needs: get_info # Run build job only if there are changes in the files that we care about or if the workflow is manually triggered with --build flag if: needs.get_info.outputs.BUILD == 'true' - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} # This concurrency only cancels build jobs if a new build is triggred. It will avoid cancelling the current deployemtn in case of docs chnages. concurrency: group: build-${{ github.workflow }}-${{ github.ref }}-${{ needs.get_info.outputs.BUILD }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index faa6593452e25..a13bbbe3fd91b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -39,7 +39,7 @@ env: jobs: release: name: Build and publish - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} env: # Necessary for Docker manifest DOCKER_CLI_EXPERIMENTAL: "enabled" @@ -180,7 +180,7 @@ jobs: - name: Test migrations from current ref to main run: | - make test-migrations + POSTGRES_VERSION=13 make test-migrations # Setup GCloud for signing Windows binaries. - name: Authenticate to Google Cloud @@ -297,7 +297,7 @@ jobs: # build Docker images for each architecture version="$(./scripts/version.sh)" - make -j build/coder_"$version"_linux_{amd64,arm64,armv7}.tag + make build/coder_"$version"_linux_{amd64,arm64,armv7}.tag # we can't build multi-arch if the images aren't pushed, so quit now # if dry-running @@ -308,7 +308,7 @@ jobs: # build and push multi-arch manifest, this depends on the other images # being pushed so will automatically push them. - make -j push/build/coder_"$version"_linux.tag + make push/build/coder_"$version"_linux.tag # if the current version is equal to the highest (according to semver) # version in the repo, also create a multi-arch image as ":latest" and diff --git a/.github/workflows/security.yaml b/.github/workflows/security.yaml index 1bf0bf4b63180..c4420ce688446 100644 --- a/.github/workflows/security.yaml +++ b/.github/workflows/security.yaml @@ -23,7 +23,7 @@ concurrency: jobs: codeql: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} steps: - name: Checkout uses: actions/checkout@v4 @@ -56,7 +56,7 @@ jobs: "${{ secrets.SLACK_SECURITY_FAILURE_WEBHOOK_URL }}" trivy: - runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }} steps: - name: Checkout uses: actions/checkout@v4 @@ -114,7 +114,7 @@ jobs: echo "image=$(cat "$image_job")" >> $GITHUB_OUTPUT - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 + uses: aquasecurity/trivy-action@7c2007bcb556501da015201bcba5aa14069b74e2 with: image-ref: ${{ steps.build.outputs.image }} format: sarif diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index 559260e0f7f32..7ee9554f0cdc3 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -33,4 +33,5 @@ extend-exclude = [ "**/pnpm-lock.yaml", "tailnet/testdata/**", "site/src/pages/SetupPage/countries.tsx", + "provisioner/terraform/testdata/**", ] diff --git a/.gitignore b/.gitignore index 5e5631409ce86..29081a803f217 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,6 @@ result # Filebrowser.db **/filebrowser.db + +# pnpm +.pnpm-store/ diff --git a/.prettierignore b/.prettierignore index 9be32290acf05..f0bb6e214de4c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -71,6 +71,9 @@ result # Filebrowser.db **/filebrowser.db + +# pnpm +.pnpm-store/ # .prettierignore.include: # Helm templates contain variables that are invalid YAML and can't be formatted # by Prettier. diff --git a/Makefile b/Makefile index 47cdea7cb653a..c3059800c7515 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ GOOS := $(shell go env GOOS) GOARCH := $(shell go env GOARCH) GOOS_BIN_EXT := $(if $(filter windows, $(GOOS)),.exe,) VERSION := $(shell ./scripts/version.sh) +POSTGRES_VERSION ?= 16 # Use the highest ZSTD compression level in CI. ifdef CI @@ -615,10 +616,10 @@ site/src/theme/icons.json: $(wildcard scripts/gensite/*) $(wildcard site/static/ examples/examples.gen.json: scripts/examplegen/main.go examples/examples.go $(shell find ./examples/templates) go run ./scripts/examplegen/main.go > examples/examples.gen.json -coderd/rbac/object_gen.go: scripts/rbacgen/main.go coderd/rbac/object.go +coderd/rbac/object_gen.go: scripts/rbacgen/rbacobject.gotmpl scripts/rbacgen/main.go coderd/rbac/object.go go run scripts/rbacgen/main.go rbac > coderd/rbac/object_gen.go -codersdk/rbacresources_gen.go: scripts/rbacgen/main.go coderd/rbac/object.go +codersdk/rbacresources_gen.go: scripts/rbacgen/codersdk.gotmpl scripts/rbacgen/main.go coderd/rbac/object.go go run scripts/rbacgen/main.go codersdk > codersdk/rbacresources_gen.go docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics @@ -814,7 +815,7 @@ test-migrations: test-postgres-docker # NOTE: we set --memory to the same size as a GitHub runner. test-postgres-docker: - docker rm -f test-postgres-docker || true + docker rm -f test-postgres-docker-${POSTGRES_VERSION} || true docker run \ --env POSTGRES_PASSWORD=postgres \ --env POSTGRES_USER=postgres \ @@ -822,11 +823,11 @@ test-postgres-docker: --env PGDATA=/tmp \ --tmpfs /tmp \ --publish 5432:5432 \ - --name test-postgres-docker \ + --name test-postgres-docker-${POSTGRES_VERSION} \ --restart no \ --detach \ --memory 16GB \ - gcr.io/coder-dev-1/postgres:13 \ + gcr.io/coder-dev-1/postgres:${POSTGRES_VERSION} \ -c shared_buffers=1GB \ -c work_mem=1GB \ -c effective_cache_size=1GB \ @@ -865,3 +866,7 @@ test-tailnet-integration: test-clean: go clean -testcache .PHONY: test-clean + +.PHONY: test-e2e +test-e2e: + cd ./site && DEBUG=pw:api pnpm playwright:test --forbid-only --workers 1 diff --git a/README.md b/README.md index a39b8219074b2..7bf1cd92b954e 100644 --- a/README.md +++ b/README.md @@ -20,17 +20,17 @@

-[Quickstart](#quickstart) | [Docs](https://coder.com/docs) | [Why Coder](https://coder.com/why) | [Enterprise](https://coder.com/docs/v2/latest/enterprise) +[Quickstart](#quickstart) | [Docs](https://coder.com/docs) | [Why Coder](https://coder.com/why) | [Enterprise](https://coder.com/docs/enterprise) [![discord](https://img.shields.io/discord/747933592273027093?label=discord)](https://discord.gg/coder) [![release](https://img.shields.io/github/v/release/coder/coder)](https://github.com/coder/coder/releases/latest) [![godoc](https://pkg.go.dev/badge/github.com/coder/coder.svg)](https://pkg.go.dev/github.com/coder/coder) -[![Go Report Card](https://goreportcard.com/badge/github.com/coder/coder)](https://goreportcard.com/report/github.com/coder/coder) +[![Go Report Card](https://goreportcard.com/badge/github.com/coder/coder/v2)](https://goreportcard.com/report/github.com/coder/coder/v2) [![license](https://img.shields.io/github/license/coder/coder)](./LICENSE) -[Coder](https://coder.com) enables organizations to set up development environments in their public or private cloud infrastructure. Cloud development environments are defined with Terraform, connected through a secure high-speed Wireguard® tunnel, and are automatically shut down when not in use to save on costs. Coder gives engineering teams the flexibility to use the cloud for workloads that are most beneficial to them. +[Coder](https://coder.com) enables organizations to set up development environments in their public or private cloud infrastructure. Cloud development environments are defined with Terraform, connected through a secure high-speed Wireguard® tunnel, and automatically shut down when not used to save on costs. Coder gives engineering teams the flexibility to use the cloud for workloads most beneficial to them. - Define cloud development environments in Terraform - EC2 VMs, Kubernetes Pods, Docker Containers, etc. @@ -53,7 +53,7 @@ curl -L https://coder.com/install.sh | sh coder server # Navigate to http://localhost:3000 to create your initial user, -# create a Docker template, and provision a workspace +# create a Docker template and provision a workspace ``` ## Install @@ -69,7 +69,7 @@ curl -L https://coder.com/install.sh | sh You can run the install script with `--dry-run` to see the commands that will be used to install without executing them. Run the install script with `--help` for additional flags. -> See [install](https://coder.com/docs/v2/latest/install) for additional methods. +> See [install](https://coder.com/docs/install) for additional methods. Once installed, you can start a production deployment with a single command: @@ -81,27 +81,27 @@ coder server coder server --postgres-url --access-url ``` -Use `coder --help` to get a list of flags and environment variables. Use our [install guides](https://coder.com/docs/v2/latest/install) for a full walkthrough. +Use `coder --help` to get a list of flags and environment variables. Use our [install guides](https://coder.com/docs/install) for a complete walkthrough. ## Documentation -Browse our docs [here](https://coder.com/docs/v2) or visit a specific section below: +Browse our docs [here](https://coder.com/docs) or visit a specific section below: -- [**Templates**](https://coder.com/docs/v2/latest/templates): Templates are written in Terraform and describe the infrastructure for workspaces -- [**Workspaces**](https://coder.com/docs/v2/latest/workspaces): Workspaces contain the IDEs, dependencies, and configuration information needed for software development -- [**IDEs**](https://coder.com/docs/v2/latest/ides): Connect your existing editor to a workspace -- [**Administration**](https://coder.com/docs/v2/latest/admin): Learn how to operate Coder -- [**Enterprise**](https://coder.com/docs/v2/latest/enterprise): Learn about our paid features built for large teams +- [**Templates**](https://coder.com/docs/templates): Templates are written in Terraform and describe the infrastructure for workspaces +- [**Workspaces**](https://coder.com/docs/workspaces): Workspaces contain the IDEs, dependencies, and configuration information needed for software development +- [**IDEs**](https://coder.com/docs/ides): Connect your existing editor to a workspace +- [**Administration**](https://coder.com/docs/admin): Learn how to operate Coder +- [**Enterprise**](https://coder.com/docs/enterprise): Learn about our paid features built for large teams ## Support Feel free to [open an issue](https://github.com/coder/coder/issues/new) if you have questions, run into bugs, or have a feature request. -[Join our Discord](https://discord.gg/coder) to provide feedback on in-progress features, and chat with the community using Coder! +[Join our Discord](https://discord.gg/coder) to provide feedback on in-progress features and chat with the community using Coder! ## Integrations -We are always working on new integrations. Feel free to open an issue to request an integration. Contributions are welcome in any official or community repositories. +We are always working on new integrations. Please feel free to open an issue and ask for an integration. Contributions are welcome in any official or community repositories. ### Official @@ -120,9 +120,9 @@ We are always working on new integrations. Feel free to open an issue to request ## Contributing We are always happy to see new contributors to Coder. If you are new to the Coder codebase, we have -[a guide on how to get started](https://coder.com/docs/v2/latest/CONTRIBUTING). We'd love to see your +[a guide on how to get started](https://coder.com/docs/CONTRIBUTING). We'd love to see your contributions! ## Hiring -Apply [here](https://cdr.co/github-apply) if you're interested in joining our team. +Apply [here](https://jobs.ashbyhq.com/coder?utm_source=github&utm_medium=readme&utm_campaign=unknown) if you're interested in joining our team. diff --git a/agent/agent.go b/agent/agent.go index c7a785f8d5da1..5512f04db28ea 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -91,6 +91,7 @@ type Options struct { ModifiedProcesses chan []*agentproc.Process // ProcessManagementTick is used for testing process priority management. ProcessManagementTick <-chan time.Time + BlockFileTransfer bool } type Client interface { @@ -184,6 +185,7 @@ func New(options Options) Agent { modifiedProcs: options.ModifiedProcesses, processManagementTick: options.ProcessManagementTick, logSender: agentsdk.NewLogSender(options.Logger), + blockFileTransfer: options.BlockFileTransfer, prometheusRegistry: prometheusRegistry, metrics: newAgentMetrics(prometheusRegistry), @@ -239,6 +241,7 @@ type agent struct { sessionToken atomic.Pointer[string] sshServer *agentssh.Server sshMaxTimeout time.Duration + blockFileTransfer bool lifecycleUpdate chan struct{} lifecycleReported chan codersdk.WorkspaceAgentLifecycle @@ -277,6 +280,7 @@ func (a *agent) init() { AnnouncementBanners: func() *[]codersdk.BannerConfig { return a.announcementBanners.Load() }, UpdateEnv: a.updateCommandEnv, WorkingDirectory: func() string { return a.manifest.Load().Directory }, + BlockFileTransfer: a.blockFileTransfer, }) if err != nil { panic(err) diff --git a/agent/agent_test.go b/agent/agent_test.go index a008a60a2362e..4b0712bcf93c6 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -970,6 +970,99 @@ func TestAgent_SCP(t *testing.T) { require.NoError(t, err) } +func TestAgent_FileTransferBlocked(t *testing.T) { + t.Parallel() + + assertFileTransferBlocked := func(t *testing.T, errorMessage string) { + // NOTE: Checking content of the error message is flaky. Most likely there is a race condition, which results + // in stopping the client in different phases, and returning different errors: + // - client read the full error message: File transfer has been disabled. + // - client's stream was terminated before reading the error message: EOF + // - client just read the error code (Windows): Process exited with status 65 + isErr := strings.Contains(errorMessage, agentssh.BlockedFileTransferErrorMessage) || + strings.Contains(errorMessage, "EOF") || + strings.Contains(errorMessage, "Process exited with status 65") + require.True(t, isErr, fmt.Sprintf("Message: "+errorMessage)) + } + + t.Run("SFTP", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + //nolint:dogsled + conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) { + o.BlockFileTransfer = true + }) + sshClient, err := conn.SSHClient(ctx) + require.NoError(t, err) + defer sshClient.Close() + _, err = sftp.NewClient(sshClient) + require.Error(t, err) + assertFileTransferBlocked(t, err.Error()) + }) + + t.Run("SCP with go-scp package", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + //nolint:dogsled + conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) { + o.BlockFileTransfer = true + }) + sshClient, err := conn.SSHClient(ctx) + require.NoError(t, err) + defer sshClient.Close() + scpClient, err := scp.NewClientBySSH(sshClient) + require.NoError(t, err) + defer scpClient.Close() + tempFile := filepath.Join(t.TempDir(), "scp") + err = scpClient.CopyFile(context.Background(), strings.NewReader("hello world"), tempFile, "0755") + require.Error(t, err) + assertFileTransferBlocked(t, err.Error()) + }) + + t.Run("Forbidden commands", func(t *testing.T) { + t.Parallel() + + for _, c := range agentssh.BlockedFileTransferCommands { + t.Run(c, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + //nolint:dogsled + conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) { + o.BlockFileTransfer = true + }) + sshClient, err := conn.SSHClient(ctx) + require.NoError(t, err) + defer sshClient.Close() + + session, err := sshClient.NewSession() + require.NoError(t, err) + defer session.Close() + + stdout, err := session.StdoutPipe() + require.NoError(t, err) + + //nolint:govet // we don't need `c := c` in Go 1.22 + err = session.Start(c) + require.NoError(t, err) + defer session.Close() + + msg, err := io.ReadAll(stdout) + require.NoError(t, err) + assertFileTransferBlocked(t, string(msg)) + }) + } + }) +} + func TestAgent_EnvironmentVariables(t *testing.T) { t.Parallel() key := "EXAMPLE" diff --git a/agent/agentssh/agentssh.go b/agent/agentssh/agentssh.go index 54e5a3f41223e..5903220975b8c 100644 --- a/agent/agentssh/agentssh.go +++ b/agent/agentssh/agentssh.go @@ -52,8 +52,16 @@ const ( // MagicProcessCmdlineJetBrains is a string in a process's command line that // uniquely identifies it as JetBrains software. MagicProcessCmdlineJetBrains = "idea.vendor.name=JetBrains" + + // BlockedFileTransferErrorCode indicates that SSH server restricted the raw command from performing + // the file transfer. + BlockedFileTransferErrorCode = 65 // Error code: host not allowed to connect + BlockedFileTransferErrorMessage = "File transfer has been disabled." ) +// BlockedFileTransferCommands contains a list of restricted file transfer commands. +var BlockedFileTransferCommands = []string{"nc", "rsync", "scp", "sftp"} + // Config sets configuration parameters for the agent SSH server. type Config struct { // MaxTimeout sets the absolute connection timeout, none if empty. If set to @@ -74,6 +82,8 @@ type Config struct { // X11SocketDir is the directory where X11 sockets are created. Default is // /tmp/.X11-unix. X11SocketDir string + // BlockFileTransfer restricts use of file transfer applications. + BlockFileTransfer bool } type Server struct { @@ -272,6 +282,18 @@ func (s *Server) sessionHandler(session ssh.Session) { extraEnv = append(extraEnv, fmt.Sprintf("DISPLAY=:%d.0", x11.ScreenNumber)) } + if s.fileTransferBlocked(session) { + s.logger.Warn(ctx, "file transfer blocked", slog.F("session_subsystem", session.Subsystem()), slog.F("raw_command", session.RawCommand())) + + if session.Subsystem() == "" { // sftp does not expect error, otherwise it fails with "package too long" + // Response format: \n + errorMessage := fmt.Sprintf("\x02%s\n", BlockedFileTransferErrorMessage) + _, _ = session.Write([]byte(errorMessage)) + } + _ = session.Exit(BlockedFileTransferErrorCode) + return + } + switch ss := session.Subsystem(); ss { case "": case "sftp": @@ -322,6 +344,37 @@ func (s *Server) sessionHandler(session ssh.Session) { _ = session.Exit(0) } +// fileTransferBlocked method checks if the file transfer commands should be blocked. +// +// Warning: consider this mechanism as "Do not trespass" sign, as a violator can still ssh to the host, +// smuggle the `scp` binary, or just manually send files outside with `curl` or `ftp`. +// If a user needs a more sophisticated and battle-proof solution, consider full endpoint security. +func (s *Server) fileTransferBlocked(session ssh.Session) bool { + if !s.config.BlockFileTransfer { + return false // file transfers are permitted + } + // File transfers are restricted. + + if session.Subsystem() == "sftp" { + return true + } + + cmd := session.Command() + if len(cmd) == 0 { + return false // no command? + } + + c := cmd[0] + c = filepath.Base(c) // in case the binary is absolute path, /usr/sbin/scp + + for _, cmd := range BlockedFileTransferCommands { + if cmd == c { + return true + } + } + return false +} + func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, extraEnv []string) (retErr error) { ctx := session.Context() env := append(session.Environ(), extraEnv...) diff --git a/agent/apphealth.go b/agent/apphealth.go index 1badc0f361376..0b7e87e57df68 100644 --- a/agent/apphealth.go +++ b/agent/apphealth.go @@ -10,14 +10,11 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" + "github.com/coder/coder/v2/clock" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" - "github.com/coder/retry" ) -// WorkspaceAgentApps fetches the workspace apps. -type WorkspaceAgentApps func(context.Context) ([]codersdk.WorkspaceApp, error) - // PostWorkspaceAgentAppHealth updates the workspace app health. type PostWorkspaceAgentAppHealth func(context.Context, agentsdk.PostAppHealthsRequest) error @@ -26,15 +23,26 @@ type WorkspaceAppHealthReporter func(ctx context.Context) // NewWorkspaceAppHealthReporter creates a WorkspaceAppHealthReporter that reports app health to coderd. func NewWorkspaceAppHealthReporter(logger slog.Logger, apps []codersdk.WorkspaceApp, postWorkspaceAgentAppHealth PostWorkspaceAgentAppHealth) WorkspaceAppHealthReporter { + return NewAppHealthReporterWithClock(logger, apps, postWorkspaceAgentAppHealth, clock.NewReal()) +} + +// NewAppHealthReporterWithClock is only called directly by test code. Product code should call +// NewAppHealthReporter. +func NewAppHealthReporterWithClock( + logger slog.Logger, + apps []codersdk.WorkspaceApp, + postWorkspaceAgentAppHealth PostWorkspaceAgentAppHealth, + clk clock.Clock, +) WorkspaceAppHealthReporter { logger = logger.Named("apphealth") - runHealthcheckLoop := func(ctx context.Context) error { + return func(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() // no need to run this loop if no apps for this workspace. if len(apps) == 0 { - return nil + return } hasHealthchecksEnabled := false @@ -49,7 +57,7 @@ func NewWorkspaceAppHealthReporter(logger slog.Logger, apps []codersdk.Workspace // no need to run this loop if no health checks are configured. if !hasHealthchecksEnabled { - return nil + return } // run a ticker for each app health check. @@ -61,25 +69,29 @@ func NewWorkspaceAppHealthReporter(logger slog.Logger, apps []codersdk.Workspace } app := nextApp go func() { - t := time.NewTicker(time.Duration(app.Healthcheck.Interval) * time.Second) - defer t.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-t.C: - } - // we set the http timeout to the healthcheck interval to prevent getting too backed up. - client := &http.Client{ - Timeout: time.Duration(app.Healthcheck.Interval) * time.Second, - } + _ = clk.TickerFunc(ctx, time.Duration(app.Healthcheck.Interval)*time.Second, func() error { + // We time out at the healthcheck interval to prevent getting too backed up, but + // set it 1ms early so that it's not simultaneous with the next tick in testing, + // which makes the test easier to understand. + // + // It would be idiomatic to use the http.Client.Timeout or a context.WithTimeout, + // but we are passing this off to the native http library, which is not aware + // of the clock library we are using. That means in testing, with a mock clock + // it will compare mocked times with real times, and we will get strange results. + // So, we just implement the timeout as a context we cancel with an AfterFunc + reqCtx, reqCancel := context.WithCancel(ctx) + timeout := clk.AfterFunc( + time.Duration(app.Healthcheck.Interval)*time.Second-time.Millisecond, + reqCancel, + "timeout", app.Slug) + defer timeout.Stop() + err := func() error { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, app.Healthcheck.URL, nil) + req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, app.Healthcheck.URL, nil) if err != nil { return err } - res, err := client.Do(req) + res, err := http.DefaultClient.Do(req) if err != nil { return err } @@ -118,54 +130,36 @@ func NewWorkspaceAppHealthReporter(logger slog.Logger, apps []codersdk.Workspace mu.Unlock() logger.Debug(ctx, "workspace app healthy", slog.F("id", app.ID.String()), slog.F("slug", app.Slug)) } - - t.Reset(time.Duration(app.Healthcheck.Interval) * time.Second) - } + return nil + }, "healthcheck", app.Slug) }() } mu.Lock() lastHealth := copyHealth(health) mu.Unlock() - reportTicker := time.NewTicker(time.Second) - defer reportTicker.Stop() - // every second we check if the health values of the apps have changed - // and if there is a change we will report the new values. - for { - select { - case <-ctx.Done(): + reportTicker := clk.TickerFunc(ctx, time.Second, func() error { + mu.RLock() + changed := healthChanged(lastHealth, health) + mu.RUnlock() + if !changed { return nil - case <-reportTicker.C: - mu.RLock() - changed := healthChanged(lastHealth, health) - mu.RUnlock() - if !changed { - continue - } - - mu.Lock() - lastHealth = copyHealth(health) - mu.Unlock() - err := postWorkspaceAgentAppHealth(ctx, agentsdk.PostAppHealthsRequest{ - Healths: lastHealth, - }) - if err != nil { - logger.Error(ctx, "failed to report workspace app health", slog.Error(err)) - } else { - logger.Debug(ctx, "sent workspace app health", slog.F("health", lastHealth)) - } } - } - } - return func(ctx context.Context) { - for r := retry.New(time.Second, 30*time.Second); r.Wait(ctx); { - err := runHealthcheckLoop(ctx) - if err == nil || xerrors.Is(err, context.Canceled) || xerrors.Is(err, context.DeadlineExceeded) { - return + mu.Lock() + lastHealth = copyHealth(health) + mu.Unlock() + err := postWorkspaceAgentAppHealth(ctx, agentsdk.PostAppHealthsRequest{ + Healths: lastHealth, + }) + if err != nil { + logger.Error(ctx, "failed to report workspace app health", slog.Error(err)) + } else { + logger.Debug(ctx, "sent workspace app health", slog.F("health", lastHealth)) } - logger.Error(ctx, "failed running workspace app reporter", slog.Error(err)) - } + return nil + }, "report") + _ = reportTicker.Wait() // only possible error is context done } } diff --git a/agent/apphealth_test.go b/agent/apphealth_test.go index b8be5c1fa227f..ff411433e3821 100644 --- a/agent/apphealth_test.go +++ b/agent/apphealth_test.go @@ -4,14 +4,12 @@ import ( "context" "net/http" "net/http/httptest" + "slices" "strings" - "sync" - "sync/atomic" "testing" "time" "github.com/google/uuid" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "cdr.dev/slog" @@ -19,6 +17,7 @@ import ( "github.com/coder/coder/v2/agent" "github.com/coder/coder/v2/agent/agenttest" "github.com/coder/coder/v2/agent/proto" + "github.com/coder/coder/v2/clock" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" @@ -27,15 +26,17 @@ import ( func TestAppHealth_Healthy(t *testing.T) { t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) defer cancel() apps := []codersdk.WorkspaceApp{ { + ID: uuid.UUID{1}, Slug: "app1", Healthcheck: codersdk.Healthcheck{}, Health: codersdk.WorkspaceAppHealthDisabled, }, { + ID: uuid.UUID{2}, Slug: "app2", Healthcheck: codersdk.Healthcheck{ // URL: We don't set the URL for this test because the setup will @@ -46,6 +47,7 @@ func TestAppHealth_Healthy(t *testing.T) { Health: codersdk.WorkspaceAppHealthInitializing, }, { + ID: uuid.UUID{3}, Slug: "app3", Healthcheck: codersdk.Healthcheck{ Interval: 2, @@ -54,36 +56,71 @@ func TestAppHealth_Healthy(t *testing.T) { Health: codersdk.WorkspaceAppHealthInitializing, }, } + checks2 := 0 + checks3 := 0 handlers := []http.Handler{ nil, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks2++ httpapi.Write(r.Context(), w, http.StatusOK, nil) }), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks3++ httpapi.Write(r.Context(), w, http.StatusOK, nil) }), } - getApps, closeFn := setupAppReporter(ctx, t, apps, handlers) + mClock := clock.NewMock(t) + healthcheckTrap := mClock.Trap().TickerFunc("healthcheck") + defer healthcheckTrap.Close() + reportTrap := mClock.Trap().TickerFunc("report") + defer reportTrap.Close() + + fakeAPI, closeFn := setupAppReporter(ctx, t, slices.Clone(apps), handlers, mClock) defer closeFn() - apps, err := getApps(ctx) - require.NoError(t, err) - require.EqualValues(t, codersdk.WorkspaceAppHealthDisabled, apps[0].Health) - require.Eventually(t, func() bool { - apps, err := getApps(ctx) - if err != nil { - return false - } + healthchecksStarted := make([]string, 2) + for i := 0; i < 2; i++ { + c := healthcheckTrap.MustWait(ctx) + c.Release() + healthchecksStarted[i] = c.Tags[1] + } + slices.Sort(healthchecksStarted) + require.Equal(t, []string{"app2", "app3"}, healthchecksStarted) + + // advance the clock 1ms before the report ticker starts, so that it's not + // simultaneous with the checks. + mClock.Advance(time.Millisecond).MustWait(ctx) + reportTrap.MustWait(ctx).Release() + + mClock.Advance(999 * time.Millisecond).MustWait(ctx) // app2 is now healthy + + mClock.Advance(time.Millisecond).MustWait(ctx) // report gets triggered + update := testutil.RequireRecvCtx(ctx, t, fakeAPI.AppHealthCh()) + require.Len(t, update.GetUpdates(), 2) + applyUpdate(t, apps, update) + require.Equal(t, codersdk.WorkspaceAppHealthHealthy, apps[1].Health) + require.Equal(t, codersdk.WorkspaceAppHealthInitializing, apps[2].Health) + + mClock.Advance(999 * time.Millisecond).MustWait(ctx) // app3 is now healthy - return apps[1].Health == codersdk.WorkspaceAppHealthHealthy && apps[2].Health == codersdk.WorkspaceAppHealthHealthy - }, testutil.WaitLong, testutil.IntervalSlow) + mClock.Advance(time.Millisecond).MustWait(ctx) // report gets triggered + update = testutil.RequireRecvCtx(ctx, t, fakeAPI.AppHealthCh()) + require.Len(t, update.GetUpdates(), 2) + applyUpdate(t, apps, update) + require.Equal(t, codersdk.WorkspaceAppHealthHealthy, apps[1].Health) + require.Equal(t, codersdk.WorkspaceAppHealthHealthy, apps[2].Health) + + // ensure we aren't spamming + require.Equal(t, 2, checks2) + require.Equal(t, 1, checks3) } func TestAppHealth_500(t *testing.T) { t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) defer cancel() apps := []codersdk.WorkspaceApp{ { + ID: uuid.UUID{2}, Slug: "app2", Healthcheck: codersdk.Healthcheck{ // URL: We don't set the URL for this test because the setup will @@ -99,24 +136,40 @@ func TestAppHealth_500(t *testing.T) { httpapi.Write(r.Context(), w, http.StatusInternalServerError, nil) }), } - getApps, closeFn := setupAppReporter(ctx, t, apps, handlers) + + mClock := clock.NewMock(t) + healthcheckTrap := mClock.Trap().TickerFunc("healthcheck") + defer healthcheckTrap.Close() + reportTrap := mClock.Trap().TickerFunc("report") + defer reportTrap.Close() + + fakeAPI, closeFn := setupAppReporter(ctx, t, slices.Clone(apps), handlers, mClock) defer closeFn() - require.Eventually(t, func() bool { - apps, err := getApps(ctx) - if err != nil { - return false - } + healthcheckTrap.MustWait(ctx).Release() + // advance the clock 1ms before the report ticker starts, so that it's not + // simultaneous with the checks. + mClock.Advance(time.Millisecond).MustWait(ctx) + reportTrap.MustWait(ctx).Release() - return apps[0].Health == codersdk.WorkspaceAppHealthUnhealthy - }, testutil.WaitLong, testutil.IntervalSlow) + mClock.Advance(999 * time.Millisecond).MustWait(ctx) // check gets triggered + mClock.Advance(time.Millisecond).MustWait(ctx) // report gets triggered, but unsent since we are at the threshold + + mClock.Advance(999 * time.Millisecond).MustWait(ctx) // 2nd check, crosses threshold + mClock.Advance(time.Millisecond).MustWait(ctx) // 2nd report, sends update + + update := testutil.RequireRecvCtx(ctx, t, fakeAPI.AppHealthCh()) + require.Len(t, update.GetUpdates(), 1) + applyUpdate(t, apps, update) + require.Equal(t, codersdk.WorkspaceAppHealthUnhealthy, apps[0].Health) } func TestAppHealth_Timeout(t *testing.T) { t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) defer cancel() apps := []codersdk.WorkspaceApp{ { + ID: uuid.UUID{2}, Slug: "app2", Healthcheck: codersdk.Healthcheck{ // URL: We don't set the URL for this test because the setup will @@ -127,63 +180,66 @@ func TestAppHealth_Timeout(t *testing.T) { Health: codersdk.WorkspaceAppHealthInitializing, }, } + handlers := []http.Handler{ - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // sleep longer than the interval to cause the health check to time out - time.Sleep(2 * time.Second) - httpapi.Write(r.Context(), w, http.StatusOK, nil) + http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { + // allow the request to time out + <-r.Context().Done() }), } - getApps, closeFn := setupAppReporter(ctx, t, apps, handlers) - defer closeFn() - require.Eventually(t, func() bool { - apps, err := getApps(ctx) - if err != nil { - return false - } + mClock := clock.NewMock(t) + start := mClock.Now() - return apps[0].Health == codersdk.WorkspaceAppHealthUnhealthy - }, testutil.WaitLong, testutil.IntervalSlow) -} - -func TestAppHealth_NotSpamming(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() - apps := []codersdk.WorkspaceApp{ - { - Slug: "app2", - Healthcheck: codersdk.Healthcheck{ - // URL: We don't set the URL for this test because the setup will - // create a httptest server for us and set it for us. - Interval: 1, - Threshold: 1, - }, - Health: codersdk.WorkspaceAppHealthInitializing, - }, + // for this test, it's easier to think in the number of milliseconds elapsed + // since start. + ms := func(n int) time.Time { + return start.Add(time.Duration(n) * time.Millisecond) } + healthcheckTrap := mClock.Trap().TickerFunc("healthcheck") + defer healthcheckTrap.Close() + reportTrap := mClock.Trap().TickerFunc("report") + defer reportTrap.Close() + timeoutTrap := mClock.Trap().AfterFunc("timeout") + defer timeoutTrap.Close() - counter := new(int32) - handlers := []http.Handler{ - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(counter, 1) - }), - } - _, closeFn := setupAppReporter(ctx, t, apps, handlers) + fakeAPI, closeFn := setupAppReporter(ctx, t, apps, handlers, mClock) defer closeFn() - // Ensure we haven't made more than 2 (expected 1 + 1 for buffer) requests in the last second. - // if there is a bug where we are spamming the healthcheck route this will catch it. - time.Sleep(time.Second) - require.LessOrEqual(t, atomic.LoadInt32(counter), int32(2)) + healthcheckTrap.MustWait(ctx).Release() + // advance the clock 1ms before the report ticker starts, so that it's not + // simultaneous with the checks. + mClock.Set(ms(1)).MustWait(ctx) + reportTrap.MustWait(ctx).Release() + + w := mClock.Set(ms(1000)) // 1st check starts + timeoutTrap.MustWait(ctx).Release() + mClock.Set(ms(1001)).MustWait(ctx) // report tick, no change + mClock.Set(ms(1999)) // timeout pops + w.MustWait(ctx) // 1st check finished + w = mClock.Set(ms(2000)) // 2nd check starts + timeoutTrap.MustWait(ctx).Release() + mClock.Set(ms(2001)).MustWait(ctx) // report tick, no change + mClock.Set(ms(2999)) // timeout pops + w.MustWait(ctx) // 2nd check finished + // app is now unhealthy after 2 timeouts + mClock.Set(ms(3000)) // 3rd check starts + timeoutTrap.MustWait(ctx).Release() + mClock.Set(ms(3001)).MustWait(ctx) // report tick, sends changes + + update := testutil.RequireRecvCtx(ctx, t, fakeAPI.AppHealthCh()) + require.Len(t, update.GetUpdates(), 1) + applyUpdate(t, apps, update) + require.Equal(t, codersdk.WorkspaceAppHealthUnhealthy, apps[0].Health) } -func setupAppReporter(ctx context.Context, t *testing.T, apps []codersdk.WorkspaceApp, handlers []http.Handler) (agent.WorkspaceAgentApps, func()) { +func setupAppReporter( + ctx context.Context, t *testing.T, + apps []codersdk.WorkspaceApp, + handlers []http.Handler, + clk clock.Clock, +) (*agenttest.FakeAgentAPI, func()) { closers := []func(){} - for i, app := range apps { - if app.ID == uuid.Nil { - app.ID = uuid.New() - apps[i] = app - } + for _, app := range apps { + require.NotEqual(t, uuid.Nil, app.ID, "all apps must have ID set") } for i, handler := range handlers { if handler == nil { @@ -196,14 +252,6 @@ func setupAppReporter(ctx context.Context, t *testing.T, apps []codersdk.Workspa closers = append(closers, ts.Close) } - var mu sync.Mutex - workspaceAgentApps := func(context.Context) ([]codersdk.WorkspaceApp, error) { - mu.Lock() - defer mu.Unlock() - var newApps []codersdk.WorkspaceApp - return append(newApps, apps...), nil - } - // We don't care about manifest or stats in this test since it's not using // a full agent and these RPCs won't get called. // @@ -212,38 +260,31 @@ func setupAppReporter(ctx context.Context, t *testing.T, apps []codersdk.Workspa // post function. fakeAAPI := agenttest.NewFakeAgentAPI(t, slogtest.Make(t, nil), nil, nil) - // Process events from the channel and update the health of the apps. - go func() { - appHealthCh := fakeAAPI.AppHealthCh() - for { - select { - case <-ctx.Done(): - return - case req := <-appHealthCh: - mu.Lock() - for _, update := range req.Updates { - updateID, err := uuid.FromBytes(update.Id) - assert.NoError(t, err) - updateHealth := codersdk.WorkspaceAppHealth(strings.ToLower(proto.AppHealth_name[int32(update.Health)])) - - for i, app := range apps { - if app.ID != updateID { - continue - } - app.Health = updateHealth - apps[i] = app - } - } - mu.Unlock() - } - } - }() - - go agent.NewWorkspaceAppHealthReporter(slogtest.Make(t, nil).Leveled(slog.LevelDebug), apps, agentsdk.AppHealthPoster(fakeAAPI))(ctx) + go agent.NewAppHealthReporterWithClock( + slogtest.Make(t, nil).Leveled(slog.LevelDebug), + apps, agentsdk.AppHealthPoster(fakeAAPI), clk, + )(ctx) - return workspaceAgentApps, func() { + return fakeAAPI, func() { for _, closeFn := range closers { closeFn() } } } + +func applyUpdate(t *testing.T, apps []codersdk.WorkspaceApp, req *proto.BatchUpdateAppHealthRequest) { + t.Helper() + for _, update := range req.Updates { + updateID, err := uuid.FromBytes(update.Id) + require.NoError(t, err) + updateHealth := codersdk.WorkspaceAppHealth(strings.ToLower(proto.AppHealth_name[int32(update.Health)])) + + for i, app := range apps { + if app.ID != updateID { + continue + } + app.Health = updateHealth + apps[i] = app + } + } +} diff --git a/cli/agent.go b/cli/agent.go index 1f91f1c98bb8d..5465aeedd9302 100644 --- a/cli/agent.go +++ b/cli/agent.go @@ -27,6 +27,7 @@ import ( "cdr.dev/slog/sloggers/slogstackdriver" "github.com/coder/coder/v2/agent" "github.com/coder/coder/v2/agent/agentproc" + "github.com/coder/coder/v2/agent/agentssh" "github.com/coder/coder/v2/agent/reaper" "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/codersdk" @@ -48,6 +49,7 @@ func (r *RootCmd) workspaceAgent() *serpent.Command { slogHumanPath string slogJSONPath string slogStackdriverPath string + blockFileTransfer bool ) cmd := &serpent.Command{ Use: "agent", @@ -314,6 +316,8 @@ func (r *RootCmd) workspaceAgent() *serpent.Command { // Intentionally set this to nil. It's mainly used // for testing. ModifiedProcesses: nil, + + BlockFileTransfer: blockFileTransfer, }) promHandler := agent.PrometheusMetricsHandler(prometheusRegistry, logger) @@ -417,6 +421,13 @@ func (r *RootCmd) workspaceAgent() *serpent.Command { Default: "", Value: serpent.StringOf(&slogStackdriverPath), }, + { + Flag: "block-file-transfer", + Default: "false", + Env: "CODER_AGENT_BLOCK_FILE_TRANSFER", + Description: fmt.Sprintf("Block file transfer using known applications: %s.", strings.Join(agentssh.BlockedFileTransferCommands, ",")), + Value: serpent.BoolOf(&blockFileTransfer), + }, } return cmd diff --git a/cli/cliui/output.go b/cli/cliui/output.go index 9f06d0ba5d2cb..d15d18b63fe18 100644 --- a/cli/cliui/output.go +++ b/cli/cliui/output.go @@ -7,6 +7,7 @@ import ( "reflect" "strings" + "github.com/jedib0t/go-pretty/v6/table" "golang.org/x/xerrors" "github.com/coder/serpent" @@ -143,7 +144,11 @@ func (f *tableFormat) AttachOptions(opts *serpent.OptionSet) { // Format implements OutputFormat. func (f *tableFormat) Format(_ context.Context, data any) (string, error) { - return DisplayTable(data, f.sort, f.columns) + headers := make(table.Row, len(f.allColumns)) + for i, header := range f.allColumns { + headers[i] = header + } + return renderTable(data, f.sort, headers, f.columns) } type jsonFormat struct{} diff --git a/cli/cliui/parameter.go b/cli/cliui/parameter.go index 897ddec4de4d6..8080ef1a96906 100644 --- a/cli/cliui/parameter.go +++ b/cli/cliui/parameter.go @@ -43,7 +43,10 @@ func RichParameter(inv *serpent.Invocation, templateVersionParameter codersdk.Te return "", err } - values, err := MultiSelect(inv, options) + values, err := MultiSelect(inv, MultiSelectOptions{ + Options: options, + Defaults: options, + }) if err == nil { v, err := json.Marshal(&values) if err != nil { diff --git a/cli/cliui/select.go b/cli/cliui/select.go index 3ae27ee811e50..7d190b4bccf3c 100644 --- a/cli/cliui/select.go +++ b/cli/cliui/select.go @@ -14,48 +14,11 @@ import ( "github.com/coder/serpent" ) -func init() { - survey.SelectQuestionTemplate = ` -{{- define "option"}} - {{- " " }}{{- if eq .SelectedIndex .CurrentIndex }}{{color "green" }}{{ .Config.Icons.SelectFocus.Text }} {{else}}{{color "default"}} {{end}} - {{- .CurrentOpt.Value}} - {{- color "reset"}} -{{end}} - -{{- if not .ShowAnswer }} -{{- if .Config.Icons.Help.Text }} -{{- if .FilterMessage }}{{ "Search:" }}{{ .FilterMessage }} -{{- else }} -{{- color "black+h"}}{{- "Type to search" }}{{color "reset"}} -{{- end }} -{{- "\n" }} -{{- end }} -{{- "\n" }} -{{- range $ix, $option := .PageEntries}} - {{- template "option" $.IterateOption $ix $option}} -{{- end}} -{{- end }}` - - survey.MultiSelectQuestionTemplate = ` -{{- define "option"}} - {{- if eq .SelectedIndex .CurrentIndex }}{{color .Config.Icons.SelectFocus.Format }}{{ .Config.Icons.SelectFocus.Text }}{{color "reset"}}{{else}} {{end}} - {{- if index .Checked .CurrentOpt.Index }}{{color .Config.Icons.MarkedOption.Format }} {{ .Config.Icons.MarkedOption.Text }} {{else}}{{color .Config.Icons.UnmarkedOption.Format }} {{ .Config.Icons.UnmarkedOption.Text }} {{end}} - {{- color "reset"}} - {{- " "}}{{- .CurrentOpt.Value}} -{{end}} -{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} -{{- if not .ShowAnswer }} - {{- "\n"}} - {{- range $ix, $option := .PageEntries}} - {{- template "option" $.IterateOption $ix $option}} - {{- end}} -{{- end}}` -} - type SelectOptions struct { Options []string // Default will be highlighted first if it's a valid option. Default string + Message string Size int HideSearch bool } @@ -122,6 +85,7 @@ func Select(inv *serpent.Invocation, opts SelectOptions) (string, error) { Options: opts.Options, Default: defaultOption, PageSize: opts.Size, + Message: opts.Message, }, &value, survey.WithIcons(func(is *survey.IconSet) { is.Help.Text = "Type to search" if opts.HideSearch { @@ -138,15 +102,22 @@ func Select(inv *serpent.Invocation, opts SelectOptions) (string, error) { return value, err } -func MultiSelect(inv *serpent.Invocation, items []string) ([]string, error) { +type MultiSelectOptions struct { + Message string + Options []string + Defaults []string +} + +func MultiSelect(inv *serpent.Invocation, opts MultiSelectOptions) ([]string, error) { // Similar hack is applied to Select() if flag.Lookup("test.v") != nil { - return items, nil + return opts.Defaults, nil } prompt := &survey.MultiSelect{ - Options: items, - Default: items, + Options: opts.Options, + Default: opts.Defaults, + Message: opts.Message, } var values []string diff --git a/cli/cliui/select_test.go b/cli/cliui/select_test.go index c399121adb6ec..c0da49714fc40 100644 --- a/cli/cliui/select_test.go +++ b/cli/cliui/select_test.go @@ -107,7 +107,10 @@ func newMultiSelect(ptty *ptytest.PTY, items []string) ([]string, error) { var values []string cmd := &serpent.Command{ Handler: func(inv *serpent.Invocation) error { - selectedItems, err := cliui.MultiSelect(inv, items) + selectedItems, err := cliui.MultiSelect(inv, cliui.MultiSelectOptions{ + Options: items, + Defaults: items, + }) if err == nil { values = selectedItems } diff --git a/cli/cliui/table.go b/cli/cliui/table.go index 9962678be902a..c9f3ee69936b4 100644 --- a/cli/cliui/table.go +++ b/cli/cliui/table.go @@ -22,6 +22,13 @@ func Table() table.Writer { return tableWriter } +// This type can be supplied as part of a slice to DisplayTable +// or to a `TableFormat` `Format` call to render a separator. +// Leading separators are not supported and trailing separators +// are ignored by the table formatter. +// e.g. `[]any{someRow, TableSeparator, someRow}` +type TableSeparator struct{} + // filterTableColumns returns configurations to hide columns // that are not provided in the array. If the array is empty, // no filtering will occur! @@ -47,8 +54,12 @@ func filterTableColumns(header table.Row, columns []string) []table.ColumnConfig return columnConfigs } -// DisplayTable renders a table as a string. The input argument must be a slice -// of structs. At least one field in the struct must have a `table:""` tag +// DisplayTable renders a table as a string. The input argument can be: +// - a struct slice. +// - an interface slice, where the first element is a struct, +// and all other elements are of the same type, or a TableSeparator. +// +// At least one field in the struct must have a `table:""` tag // containing the name of the column in the outputted table. // // If `sort` is not specified, the field with the `table:"$NAME,default_sort"` @@ -66,11 +77,20 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) v := reflect.Indirect(reflect.ValueOf(out)) if v.Kind() != reflect.Slice { - return "", xerrors.Errorf("DisplayTable called with a non-slice type") + return "", xerrors.New("DisplayTable called with a non-slice type") + } + var tableType reflect.Type + if v.Type().Elem().Kind() == reflect.Interface { + if v.Len() == 0 { + return "", xerrors.New("DisplayTable called with empty interface slice") + } + tableType = reflect.Indirect(reflect.ValueOf(v.Index(0).Interface())).Type() + } else { + tableType = v.Type().Elem() } // Get the list of table column headers. - headersRaw, defaultSort, err := typeToTableHeaders(v.Type().Elem(), true) + headersRaw, defaultSort, err := typeToTableHeaders(tableType, true) if err != nil { return "", xerrors.Errorf("get table headers recursively for type %q: %w", v.Type().Elem().String(), err) } @@ -82,9 +102,8 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) } headers := make(table.Row, len(headersRaw)) for i, header := range headersRaw { - headers[i] = header + headers[i] = strings.ReplaceAll(header, "_", " ") } - // Verify that the given sort column and filter columns are valid. if sort != "" || len(filterColumns) != 0 { headersMap := make(map[string]string, len(headersRaw)) @@ -130,6 +149,11 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) return "", xerrors.Errorf("specified sort column %q not found in table headers, available columns are %q", sort, strings.Join(headersRaw, `", "`)) } } + return renderTable(out, sort, headers, filterColumns) +} + +func renderTable(out any, sort string, headers table.Row, filterColumns []string) (string, error) { + v := reflect.Indirect(reflect.ValueOf(out)) // Setup the table formatter. tw := Table() @@ -143,15 +167,22 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) // Write each struct to the table. for i := 0; i < v.Len(); i++ { + cur := v.Index(i).Interface() + _, ok := cur.(TableSeparator) + if ok { + tw.AppendSeparator() + continue + } // Format the row as a slice. - rowMap, err := valueToTableMap(v.Index(i)) + // ValueToTableMap does what `reflect.Indirect` does + rowMap, err := valueToTableMap(reflect.ValueOf(cur)) if err != nil { return "", xerrors.Errorf("get table row map %v: %w", i, err) } rowSlice := make([]any, len(headers)) - for i, h := range headersRaw { - v, ok := rowMap[h] + for i, h := range headers { + v, ok := rowMap[h.(string)] if !ok { v = nil } @@ -174,6 +205,24 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) } } + // Guard against nil dereferences + if v != nil { + rt := reflect.TypeOf(v) + switch rt.Kind() { + case reflect.Slice: + // By default, the behavior is '%v', which just returns a string like + // '[a b c]'. This will add commas in between each value. + strs := make([]string, 0) + vt := reflect.ValueOf(v) + for i := 0; i < vt.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", vt.Index(i).Interface())) + } + v = "[" + strings.Join(strs, ", ") + "]" + default: + // Leave it as it is + } + } + rowSlice[i] = v } @@ -188,25 +237,28 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error) // returned. If the table tag is malformed, an error is returned. // // The returned name is transformed from "snake_case" to "normal text". -func parseTableStructTag(field reflect.StructField) (name string, defaultSort, recursive bool, skipParentName bool, err error) { +func parseTableStructTag(field reflect.StructField) (name string, defaultSort, noSortOpt, recursive, skipParentName bool, err error) { tags, err := structtag.Parse(string(field.Tag)) if err != nil { - return "", false, false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err) + return "", false, false, false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err) } tag, err := tags.Get("table") if err != nil || tag.Name == "-" { // tags.Get only returns an error if the tag is not found. - return "", false, false, false, nil + return "", false, false, false, false, nil } defaultSortOpt := false + noSortOpt = false recursiveOpt := false skipParentNameOpt := false for _, opt := range tag.Options { switch opt { case "default_sort": defaultSortOpt = true + case "nosort": + noSortOpt = true case "recursive": recursiveOpt = true case "recursive_inline": @@ -216,11 +268,11 @@ func parseTableStructTag(field reflect.StructField) (name string, defaultSort, r recursiveOpt = true skipParentNameOpt = true default: - return "", false, false, false, xerrors.Errorf("unknown option %q in struct field tag", opt) + return "", false, false, false, false, xerrors.Errorf("unknown option %q in struct field tag", opt) } } - return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, recursiveOpt, skipParentNameOpt, nil + return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, noSortOpt, recursiveOpt, skipParentNameOpt, nil } func isStructOrStructPointer(t reflect.Type) bool { @@ -244,12 +296,16 @@ func typeToTableHeaders(t reflect.Type, requireDefault bool) ([]string, string, headers := []string{} defaultSortName := "" + noSortOpt := false for i := 0; i < t.NumField(); i++ { field := t.Field(i) - name, defaultSort, recursive, skip, err := parseTableStructTag(field) + name, defaultSort, noSort, recursive, skip, err := parseTableStructTag(field) if err != nil { return nil, "", xerrors.Errorf("parse struct tags for field %q in type %q: %w", field.Name, t.String(), err) } + if requireDefault && noSort { + noSortOpt = true + } if name == "" && (recursive && skip) { return nil, "", xerrors.Errorf("a name is required for the field %q. "+ @@ -292,8 +348,8 @@ func typeToTableHeaders(t reflect.Type, requireDefault bool) ([]string, string, headers = append(headers, name) } - if defaultSortName == "" && requireDefault { - return nil, "", xerrors.Errorf("no field marked as default_sort in type %q", t.String()) + if defaultSortName == "" && requireDefault && !noSortOpt { + return nil, "", xerrors.Errorf("no field marked as default_sort or nosort in type %q", t.String()) } return headers, defaultSortName, nil @@ -320,7 +376,7 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) { for i := 0; i < val.NumField(); i++ { field := val.Type().Field(i) fieldVal := val.Field(i) - name, _, recursive, skip, err := parseTableStructTag(field) + name, _, _, recursive, skip, err := parseTableStructTag(field) if err != nil { return nil, xerrors.Errorf("parse struct tags for field %q in type %T: %w", field.Name, val, err) } diff --git a/cli/cliui/table_test.go b/cli/cliui/table_test.go index bb0b6c658fe45..bb46219c3c80e 100644 --- a/cli/cliui/table_test.go +++ b/cli/cliui/table_test.go @@ -138,10 +138,10 @@ func Test_DisplayTable(t *testing.T) { t.Parallel() expected := ` -NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR -bar 20 [a] bar1 21 bar3 23 {bar4 24 } 2022-08-02T15:49:10Z -baz 30 [] baz1 31 baz3 33 {baz4 34 } 2022-08-02T15:49:10Z -foo 10 [a b c] foo1 11 foo2 12 foo3 13 {foo4 14 } 2022-08-02T15:49:10Z 2022-08-02T15:49:10Z +NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR +bar 20 [a] bar1 21 bar3 23 {bar4 24 } 2022-08-02T15:49:10Z +baz 30 [] baz1 31 baz3 33 {baz4 34 } 2022-08-02T15:49:10Z +foo 10 [a, b, c] foo1 11 foo2 12 foo3 13 {foo4 14 } 2022-08-02T15:49:10Z 2022-08-02T15:49:10Z ` // Test with non-pointer values. @@ -165,10 +165,10 @@ foo 10 [a b c] foo1 11 foo2 12 foo3 t.Parallel() expected := ` -NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR -foo 10 [a b c] foo1 11 foo2 12 foo3 13 {foo4 14 } 2022-08-02T15:49:10Z 2022-08-02T15:49:10Z -bar 20 [a] bar1 21 bar3 23 {bar4 24 } 2022-08-02T15:49:10Z -baz 30 [] baz1 31 baz3 33 {baz4 34 } 2022-08-02T15:49:10Z +NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR +foo 10 [a, b, c] foo1 11 foo2 12 foo3 13 {foo4 14 } 2022-08-02T15:49:10Z 2022-08-02T15:49:10Z +bar 20 [a] bar1 21 bar3 23 {bar4 24 } 2022-08-02T15:49:10Z +baz 30 [] baz1 31 baz3 33 {baz4 34 } 2022-08-02T15:49:10Z ` out, err := cliui.DisplayTable(in, "age", nil) @@ -218,6 +218,42 @@ Alice 25 compareTables(t, expected, out) }) + // This test ensures we can display dynamically typed slices + t.Run("Interfaces", func(t *testing.T) { + t.Parallel() + + in := []any{tableTest1{}} + out, err := cliui.DisplayTable(in, "", nil) + t.Log("rendered table:\n" + out) + require.NoError(t, err) + other := []tableTest1{{}} + expected, err := cliui.DisplayTable(other, "", nil) + require.NoError(t, err) + compareTables(t, expected, out) + }) + + t.Run("WithSeparator", func(t *testing.T) { + t.Parallel() + expected := ` +NAME AGE ROLES SUB 1 NAME SUB 1 AGE SUB 2 NAME SUB 2 AGE SUB 3 INNER NAME SUB 3 INNER AGE SUB 4 TIME TIME PTR +bar 20 [a] bar1 21 bar3 23 {bar4 24 } 2022-08-02T15:49:10Z +--------------------------------------------------------------------------------------------------------------------------------------------------------------- +baz 30 [] baz1 31 baz3 33 {baz4 34 } 2022-08-02T15:49:10Z +--------------------------------------------------------------------------------------------------------------------------------------------------------------- +foo 10 [a, b, c] foo1 11 foo2 12 foo3 13 {foo4 14 } 2022-08-02T15:49:10Z 2022-08-02T15:49:10Z + ` + + var inlineIn []any + for _, v := range in { + inlineIn = append(inlineIn, v) + inlineIn = append(inlineIn, cliui.TableSeparator{}) + } + out, err := cliui.DisplayTable(inlineIn, "", nil) + t.Log("rendered table:\n" + out) + require.NoError(t, err) + compareTables(t, expected, out) + }) + // This test ensures that safeties against invalid use of `table` tags // causes errors (even without data). t.Run("Errors", func(t *testing.T) { @@ -255,14 +291,6 @@ Alice 25 _, err := cliui.DisplayTable(in, "", nil) require.Error(t, err) }) - - t.Run("WithData", func(t *testing.T) { - t.Parallel() - - in := []any{tableTest1{}} - _, err := cliui.DisplayTable(in, "", nil) - require.Error(t, err) - }) }) t.Run("NotStruct", func(t *testing.T) { diff --git a/cli/exp.go b/cli/exp.go index 3d63057638829..5c72d0f9fcd20 100644 --- a/cli/exp.go +++ b/cli/exp.go @@ -13,6 +13,7 @@ func (r *RootCmd) expCmd() *serpent.Command { Children: []*serpent.Command{ r.scaletestCmd(), r.errorExample(), + r.promptExample(), }, } return cmd diff --git a/cli/login.go b/cli/login.go index 65a94d8a4ec3e..7dde98b118c5d 100644 --- a/cli/login.go +++ b/cli/login.go @@ -58,6 +58,21 @@ func promptFirstUsername(inv *serpent.Invocation) (string, error) { return username, nil } +func promptFirstName(inv *serpent.Invocation) (string, error) { + name, err := cliui.Prompt(inv, cliui.PromptOptions{ + Text: "(Optional) What " + pretty.Sprint(cliui.DefaultStyles.Field, "name") + " would you like?", + Default: "", + }) + if err != nil { + if errors.Is(err, cliui.Canceled) { + return "", nil + } + return "", err + } + + return name, nil +} + func promptFirstPassword(inv *serpent.Invocation) (string, error) { retry: password, err := cliui.Prompt(inv, cliui.PromptOptions{ @@ -130,6 +145,7 @@ func (r *RootCmd) login() *serpent.Command { var ( email string username string + name string password string trial bool useTokenForSession bool @@ -191,6 +207,7 @@ func (r *RootCmd) login() *serpent.Command { _, _ = fmt.Fprintf(inv.Stdout, "Attempting to authenticate with %s URL: '%s'\n", urlSource, serverURL) + // nolint: nestif if !hasFirstUser { _, _ = fmt.Fprintf(inv.Stdout, Caret+"Your Coder deployment hasn't been set up!\n") @@ -212,6 +229,10 @@ func (r *RootCmd) login() *serpent.Command { if err != nil { return err } + name, err = promptFirstName(inv) + if err != nil { + return err + } } if email == "" { @@ -239,7 +260,7 @@ func (r *RootCmd) login() *serpent.Command { if !inv.ParsedFlags().Changed("first-user-trial") && os.Getenv(firstUserTrialEnv) == "" { v, _ := cliui.Prompt(inv, cliui.PromptOptions{ - Text: "Start a 30-day trial of Enterprise?", + Text: "Start a trial of Enterprise?", IsConfirm: true, Default: "yes", }) @@ -249,6 +270,7 @@ func (r *RootCmd) login() *serpent.Command { _, err = client.CreateFirstUser(ctx, codersdk.CreateFirstUserRequest{ Email: email, Username: username, + Name: name, Password: password, Trial: trial, }) @@ -336,6 +358,13 @@ func (r *RootCmd) login() *serpent.Command { return xerrors.Errorf("write server url: %w", err) } + // If the current organization cannot be fetched, then reset the organization context. + // Otherwise, organization cli commands will fail. + _, err = CurrentOrganization(r, inv, client) + if err != nil { + _ = config.Organization().Delete() + } + _, _ = fmt.Fprintf(inv.Stdout, Caret+"Welcome to Coder, %s! You're authenticated.\n", pretty.Sprint(cliui.DefaultStyles.Keyword, resp.Username)) return nil }, @@ -353,6 +382,12 @@ func (r *RootCmd) login() *serpent.Command { Description: "Specifies a username to use if creating the first user for the deployment.", Value: serpent.StringOf(&username), }, + { + Flag: "first-user-full-name", + Env: "CODER_FIRST_USER_FULL_NAME", + Description: "Specifies a human-readable name for the first user of the deployment.", + Value: serpent.StringOf(&name), + }, { Flag: "first-user-password", Env: "CODER_FIRST_USER_PASSWORD", diff --git a/cli/login_test.go b/cli/login_test.go index 3cf9dc1945b57..b2f93ad5e6813 100644 --- a/cli/login_test.go +++ b/cli/login_test.go @@ -5,9 +5,11 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" "runtime" "testing" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,6 +20,7 @@ import ( "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/pty/ptytest" + "github.com/coder/coder/v2/testutil" ) func TestLogin(t *testing.T) { @@ -89,10 +92,11 @@ func TestLogin(t *testing.T) { matches := []string{ "first user?", "yes", - "username", "testuser", - "email", "user@coder.com", - "password", "SomeSecurePassword!", - "password", "SomeSecurePassword!", // Confirm. + "username", coderdtest.FirstUserParams.Username, + "name", coderdtest.FirstUserParams.Name, + "email", coderdtest.FirstUserParams.Email, + "password", coderdtest.FirstUserParams.Password, + "password", coderdtest.FirstUserParams.Password, // confirm "trial", "yes", } for i := 0; i < len(matches); i += 2 { @@ -103,6 +107,64 @@ func TestLogin(t *testing.T) { } pty.ExpectMatch("Welcome to Coder") <-doneChan + ctx := testutil.Context(t, testutil.WaitShort) + resp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{ + Email: coderdtest.FirstUserParams.Email, + Password: coderdtest.FirstUserParams.Password, + }) + require.NoError(t, err) + client.SetSessionToken(resp.SessionToken) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Username, me.Username) + assert.Equal(t, coderdtest.FirstUserParams.Name, me.Name) + assert.Equal(t, coderdtest.FirstUserParams.Email, me.Email) + }) + + t.Run("InitialUserTTYNameOptional", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + // The --force-tty flag is required on Windows, because the `isatty` library does not + // accurately detect Windows ptys when they are not attached to a process: + // https://github.com/mattn/go-isatty/issues/59 + doneChan := make(chan struct{}) + root, _ := clitest.New(t, "login", "--force-tty", client.URL.String()) + pty := ptytest.New(t).Attach(root) + go func() { + defer close(doneChan) + err := root.Run() + assert.NoError(t, err) + }() + + matches := []string{ + "first user?", "yes", + "username", coderdtest.FirstUserParams.Username, + "name", "", + "email", coderdtest.FirstUserParams.Email, + "password", coderdtest.FirstUserParams.Password, + "password", coderdtest.FirstUserParams.Password, // confirm + "trial", "yes", + } + for i := 0; i < len(matches); i += 2 { + match := matches[i] + value := matches[i+1] + pty.ExpectMatch(match) + pty.WriteLine(value) + } + pty.ExpectMatch("Welcome to Coder") + <-doneChan + ctx := testutil.Context(t, testutil.WaitShort) + resp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{ + Email: coderdtest.FirstUserParams.Email, + Password: coderdtest.FirstUserParams.Password, + }) + require.NoError(t, err) + client.SetSessionToken(resp.SessionToken) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Username, me.Username) + assert.Equal(t, coderdtest.FirstUserParams.Email, me.Email) + assert.Empty(t, me.Name) }) t.Run("InitialUserTTYFlag", func(t *testing.T) { @@ -119,10 +181,11 @@ func TestLogin(t *testing.T) { pty.ExpectMatch(fmt.Sprintf("Attempting to authenticate with flag URL: '%s'", client.URL.String())) matches := []string{ "first user?", "yes", - "username", "testuser", - "email", "user@coder.com", - "password", "SomeSecurePassword!", - "password", "SomeSecurePassword!", // Confirm. + "username", coderdtest.FirstUserParams.Username, + "name", coderdtest.FirstUserParams.Name, + "email", coderdtest.FirstUserParams.Email, + "password", coderdtest.FirstUserParams.Password, + "password", coderdtest.FirstUserParams.Password, // confirm "trial", "yes", } for i := 0; i < len(matches); i += 2 { @@ -132,6 +195,18 @@ func TestLogin(t *testing.T) { pty.WriteLine(value) } pty.ExpectMatch("Welcome to Coder") + ctx := testutil.Context(t, testutil.WaitShort) + resp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{ + Email: coderdtest.FirstUserParams.Email, + Password: coderdtest.FirstUserParams.Password, + }) + require.NoError(t, err) + client.SetSessionToken(resp.SessionToken) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Username, me.Username) + assert.Equal(t, coderdtest.FirstUserParams.Name, me.Name) + assert.Equal(t, coderdtest.FirstUserParams.Email, me.Email) }) t.Run("InitialUserFlags", func(t *testing.T) { @@ -139,13 +214,56 @@ func TestLogin(t *testing.T) { client := coderdtest.New(t, nil) inv, _ := clitest.New( t, "login", client.URL.String(), - "--first-user-username", "testuser", "--first-user-email", "user@coder.com", - "--first-user-password", "SomeSecurePassword!", "--first-user-trial", + "--first-user-username", coderdtest.FirstUserParams.Username, + "--first-user-full-name", coderdtest.FirstUserParams.Name, + "--first-user-email", coderdtest.FirstUserParams.Email, + "--first-user-password", coderdtest.FirstUserParams.Password, + "--first-user-trial", ) pty := ptytest.New(t).Attach(inv) w := clitest.StartWithWaiter(t, inv) pty.ExpectMatch("Welcome to Coder") w.RequireSuccess() + ctx := testutil.Context(t, testutil.WaitShort) + resp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{ + Email: coderdtest.FirstUserParams.Email, + Password: coderdtest.FirstUserParams.Password, + }) + require.NoError(t, err) + client.SetSessionToken(resp.SessionToken) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Username, me.Username) + assert.Equal(t, coderdtest.FirstUserParams.Name, me.Name) + assert.Equal(t, coderdtest.FirstUserParams.Email, me.Email) + }) + + t.Run("InitialUserFlagsNameOptional", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + inv, _ := clitest.New( + t, "login", client.URL.String(), + "--first-user-username", coderdtest.FirstUserParams.Username, + "--first-user-email", coderdtest.FirstUserParams.Email, + "--first-user-password", coderdtest.FirstUserParams.Password, + "--first-user-trial", + ) + pty := ptytest.New(t).Attach(inv) + w := clitest.StartWithWaiter(t, inv) + pty.ExpectMatch("Welcome to Coder") + w.RequireSuccess() + ctx := testutil.Context(t, testutil.WaitShort) + resp, err := client.LoginWithPassword(ctx, codersdk.LoginWithPasswordRequest{ + Email: coderdtest.FirstUserParams.Email, + Password: coderdtest.FirstUserParams.Password, + }) + require.NoError(t, err) + client.SetSessionToken(resp.SessionToken) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Username, me.Username) + assert.Equal(t, coderdtest.FirstUserParams.Email, me.Email) + assert.Empty(t, me.Name) }) t.Run("InitialUserTTYConfirmPasswordFailAndReprompt", func(t *testing.T) { @@ -167,10 +285,11 @@ func TestLogin(t *testing.T) { matches := []string{ "first user?", "yes", - "username", "testuser", - "email", "user@coder.com", - "password", "MyFirstSecurePassword!", - "password", "MyNonMatchingSecurePassword!", // Confirm. + "username", coderdtest.FirstUserParams.Username, + "name", coderdtest.FirstUserParams.Name, + "email", coderdtest.FirstUserParams.Email, + "password", coderdtest.FirstUserParams.Password, + "password", "something completely different", } for i := 0; i < len(matches); i += 2 { match := matches[i] @@ -183,9 +302,9 @@ func TestLogin(t *testing.T) { pty.ExpectMatch("Passwords do not match") pty.ExpectMatch("Enter a " + pretty.Sprint(cliui.DefaultStyles.Field, "password")) - pty.WriteLine("SomeSecurePassword!") + pty.WriteLine(coderdtest.FirstUserParams.Password) pty.ExpectMatch("Confirm") - pty.WriteLine("SomeSecurePassword!") + pty.WriteLine(coderdtest.FirstUserParams.Password) pty.ExpectMatch("trial") pty.WriteLine("yes") pty.ExpectMatch("Welcome to Coder") @@ -304,4 +423,48 @@ func TestLogin(t *testing.T) { // This **should not be equal** to the token we passed in. require.NotEqual(t, client.SessionToken(), sessionFile) }) + + // Login should reset the configured organization if the user is not a member + t.Run("ResetOrganization", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + coderdtest.CreateFirstUser(t, client) + root, cfg := clitest.New(t, "login", client.URL.String(), "--token", client.SessionToken()) + + notRealOrg := uuid.NewString() + err := cfg.Organization().Write(notRealOrg) + require.NoError(t, err, "write bad org to config") + + err = root.Run() + require.NoError(t, err) + sessionFile, err := cfg.Session().Read() + require.NoError(t, err) + require.NotEqual(t, client.SessionToken(), sessionFile) + + // Organization config should be deleted since the org does not exist + selected, err := cfg.Organization().Read() + require.ErrorIs(t, err, os.ErrNotExist) + require.NotEqual(t, selected, notRealOrg) + }) + + t.Run("KeepOrganizationContext", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, client) + root, cfg := clitest.New(t, "login", client.URL.String(), "--token", client.SessionToken()) + + err := cfg.Organization().Write(first.OrganizationID.String()) + require.NoError(t, err, "write bad org to config") + + err = root.Run() + require.NoError(t, err) + sessionFile, err := cfg.Session().Read() + require.NoError(t, err) + require.NotEqual(t, client.SessionToken(), sessionFile) + + // Organization config should be deleted since the org does not exist + selected, err := cfg.Organization().Read() + require.NoError(t, err) + require.Equal(t, selected, first.OrganizationID.String()) + }) } diff --git a/cli/netcheck.go b/cli/netcheck.go index fb4042b600920..490ed25ce20b2 100644 --- a/cli/netcheck.go +++ b/cli/netcheck.go @@ -10,6 +10,7 @@ import ( "github.com/coder/coder/v2/coderd/healthcheck/derphealth" "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/codersdk/healthsdk" "github.com/coder/coder/v2/codersdk/workspacesdk" "github.com/coder/serpent" ) @@ -34,11 +35,21 @@ func (r *RootCmd) netcheck() *serpent.Command { _, _ = fmt.Fprint(inv.Stderr, "Gathering a network report. This may take a few seconds...\n\n") - var report derphealth.Report - report.Run(ctx, &derphealth.ReportOptions{ + var derpReport derphealth.Report + derpReport.Run(ctx, &derphealth.ReportOptions{ DERPMap: connInfo.DERPMap, }) + ifReport, err := healthsdk.RunInterfacesReport() + if err != nil { + return xerrors.Errorf("failed to run interfaces report: %w", err) + } + + report := healthsdk.ClientNetcheckReport{ + DERP: healthsdk.DERPHealthReport(derpReport), + Interfaces: ifReport, + } + raw, err := json.MarshalIndent(report, "", " ") if err != nil { return err diff --git a/cli/netcheck_test.go b/cli/netcheck_test.go index 45166861db04f..bf124fc77896b 100644 --- a/cli/netcheck_test.go +++ b/cli/netcheck_test.go @@ -5,7 +5,6 @@ import ( "encoding/json" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/coder/coder/v2/cli/clitest" @@ -27,12 +26,13 @@ func TestNetcheck(t *testing.T) { b := out.Bytes() t.Log(string(b)) - var report healthsdk.DERPHealthReport + var report healthsdk.ClientNetcheckReport require.NoError(t, json.Unmarshal(b, &report)) - assert.True(t, report.Healthy) - require.Len(t, report.Regions, 1+1) // 1 built-in region + 1 test-managed STUN region - for _, v := range report.Regions { + // We do not assert that the report is healthy, just that + // it has the expected number of reports per region. + require.Len(t, report.DERP.Regions, 1+1) // 1 built-in region + 1 test-managed STUN region + for _, v := range report.DERP.Regions { require.Len(t, v.NodeReports, len(v.Region.Nodes)) } } diff --git a/cli/organization.go b/cli/organization.go index beb52cb5df8f2..44f9c3308139e 100644 --- a/cli/organization.go +++ b/cli/organization.go @@ -18,11 +18,10 @@ import ( func (r *RootCmd) organizations() *serpent.Command { cmd := &serpent.Command{ - Annotations: workspaceCommand, - Use: "organizations [subcommand]", - Short: "Organization related commands", - Aliases: []string{"organization", "org", "orgs"}, - Hidden: true, // Hidden until these commands are complete. + Use: "organizations [subcommand]", + Short: "Organization related commands", + Aliases: []string{"organization", "org", "orgs"}, + Hidden: true, // Hidden until these commands are complete. Handler: func(inv *serpent.Invocation) error { return inv.Command.HelpHandler(inv) }, @@ -30,6 +29,7 @@ func (r *RootCmd) organizations() *serpent.Command { r.currentOrganization(), r.switchOrganization(), r.createOrganization(), + r.organizationMembers(), r.organizationRoles(), }, } diff --git a/cli/organizationmembers.go b/cli/organizationmembers.go new file mode 100644 index 0000000000000..521ec5bfb7d37 --- /dev/null +++ b/cli/organizationmembers.go @@ -0,0 +1,176 @@ +package cli + +import ( + "fmt" + "strings" + + "golang.org/x/xerrors" + + "github.com/coder/coder/v2/cli/cliui" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/serpent" +) + +func (r *RootCmd) organizationMembers() *serpent.Command { + cmd := &serpent.Command{ + Use: "members", + Aliases: []string{"member"}, + Short: "Manage organization members", + Children: []*serpent.Command{ + r.listOrganizationMembers(), + r.assignOrganizationRoles(), + r.addOrganizationMember(), + r.removeOrganizationMember(), + }, + Handler: func(inv *serpent.Invocation) error { + return inv.Command.HelpHandler(inv) + }, + } + + return cmd +} + +func (r *RootCmd) removeOrganizationMember() *serpent.Command { + client := new(codersdk.Client) + + cmd := &serpent.Command{ + Use: "remove ", + Short: "Remove a new member to the current organization", + Middleware: serpent.Chain( + r.InitClient(client), + serpent.RequireNArgs(1), + ), + Handler: func(inv *serpent.Invocation) error { + ctx := inv.Context() + organization, err := CurrentOrganization(r, inv, client) + if err != nil { + return err + } + user := inv.Args[0] + + err = client.DeleteOrganizationMember(ctx, organization.ID, user) + if err != nil { + return xerrors.Errorf("could not remove member from organization %q: %w", organization.HumanName(), err) + } + + _, _ = fmt.Fprintf(inv.Stdout, "Organization member removed from %q\n", organization.HumanName()) + return nil + }, + } + + return cmd +} + +func (r *RootCmd) addOrganizationMember() *serpent.Command { + client := new(codersdk.Client) + + cmd := &serpent.Command{ + Use: "add ", + Short: "Add a new member to the current organization", + Middleware: serpent.Chain( + r.InitClient(client), + serpent.RequireNArgs(1), + ), + Handler: func(inv *serpent.Invocation) error { + ctx := inv.Context() + organization, err := CurrentOrganization(r, inv, client) + if err != nil { + return err + } + user := inv.Args[0] + + _, err = client.PostOrganizationMember(ctx, organization.ID, user) + if err != nil { + return xerrors.Errorf("could not add member to organization %q: %w", organization.HumanName(), err) + } + + _, _ = fmt.Fprintf(inv.Stdout, "Organization member added to %q\n", organization.HumanName()) + return nil + }, + } + + return cmd +} + +func (r *RootCmd) assignOrganizationRoles() *serpent.Command { + client := new(codersdk.Client) + + cmd := &serpent.Command{ + Use: "edit-roles [roles...]", + Aliases: []string{"edit-role"}, + Short: "Edit organization member's roles", + Middleware: serpent.Chain( + r.InitClient(client), + ), + Handler: func(inv *serpent.Invocation) error { + ctx := inv.Context() + organization, err := CurrentOrganization(r, inv, client) + if err != nil { + return err + } + + if len(inv.Args) < 1 { + return xerrors.Errorf("user_id or username is required as the first argument") + } + userIdentifier := inv.Args[0] + roles := inv.Args[1:] + + member, err := client.UpdateOrganizationMemberRoles(ctx, organization.ID, userIdentifier, codersdk.UpdateRoles{ + Roles: roles, + }) + if err != nil { + return xerrors.Errorf("update member roles: %w", err) + } + + updatedTo := make([]string, 0) + for _, role := range member.Roles { + updatedTo = append(updatedTo, role.String()) + } + + _, _ = fmt.Fprintf(inv.Stdout, "Member roles updated to [%s]\n", strings.Join(updatedTo, ", ")) + return nil + }, + } + + return cmd +} + +func (r *RootCmd) listOrganizationMembers() *serpent.Command { + formatter := cliui.NewOutputFormatter( + cliui.TableFormat([]codersdk.OrganizationMemberWithName{}, []string{"username", "organization_roles"}), + cliui.JSONFormat(), + ) + + client := new(codersdk.Client) + cmd := &serpent.Command{ + Use: "list", + Short: "List all organization members", + Middleware: serpent.Chain( + serpent.RequireNArgs(0), + r.InitClient(client), + ), + Handler: func(inv *serpent.Invocation) error { + ctx := inv.Context() + organization, err := CurrentOrganization(r, inv, client) + if err != nil { + return err + } + + res, err := client.OrganizationMembers(ctx, organization.ID) + if err != nil { + return xerrors.Errorf("fetch members: %w", err) + } + + out, err := formatter.Format(inv.Context(), res) + if err != nil { + return err + } + + _, err = fmt.Fprintln(inv.Stdout, out) + return err + }, + } + formatter.AttachOptions(&cmd.Options) + + return cmd +} diff --git a/cli/organizationmembers_test.go b/cli/organizationmembers_test.go new file mode 100644 index 0000000000000..bb0029d77a98b --- /dev/null +++ b/cli/organizationmembers_test.go @@ -0,0 +1,120 @@ +package cli_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/v2/cli/clitest" + "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/testutil" +) + +func TestListOrganizationMembers(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + ownerClient := coderdtest.New(t, &coderdtest.Options{}) + owner := coderdtest.CreateFirstUser(t, ownerClient) + client, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.RoleUserAdmin()) + + ctx := testutil.Context(t, testutil.WaitMedium) + inv, root := clitest.New(t, "organization", "members", "list", "-c", "user_id,username,roles") + clitest.SetupConfig(t, client, root) + + buf := new(bytes.Buffer) + inv.Stdout = buf + err := inv.WithContext(ctx).Run() + require.NoError(t, err) + require.Contains(t, buf.String(), user.Username) + require.Contains(t, buf.String(), owner.UserID.String()) + }) +} + +func TestAddOrganizationMembers(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + ownerClient := coderdtest.New(t, &coderdtest.Options{}) + owner := coderdtest.CreateFirstUser(t, ownerClient) + _, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID) + + ctx := testutil.Context(t, testutil.WaitMedium) + //nolint:gocritic // must be an owner, only owners can create orgs + otherOrg, err := ownerClient.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "Other", + DisplayName: "", + Description: "", + Icon: "", + }) + require.NoError(t, err, "create another organization") + + inv, root := clitest.New(t, "organization", "members", "add", "--organization", otherOrg.ID.String(), user.Username) + //nolint:gocritic // must be an owner + clitest.SetupConfig(t, ownerClient, root) + + buf := new(bytes.Buffer) + inv.Stdout = buf + err = inv.WithContext(ctx).Run() + require.NoError(t, err) + + //nolint:gocritic // must be an owner + members, err := ownerClient.OrganizationMembers(ctx, otherOrg.ID) + require.NoError(t, err) + + require.Len(t, members, 2) + }) +} + +func TestRemoveOrganizationMembers(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + ownerClient := coderdtest.New(t, &coderdtest.Options{}) + owner := coderdtest.CreateFirstUser(t, ownerClient) + orgAdminClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID)) + _, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID) + + ctx := testutil.Context(t, testutil.WaitMedium) + + inv, root := clitest.New(t, "organization", "members", "remove", "--organization", owner.OrganizationID.String(), user.Username) + clitest.SetupConfig(t, orgAdminClient, root) + + buf := new(bytes.Buffer) + inv.Stdout = buf + err := inv.WithContext(ctx).Run() + require.NoError(t, err) + + members, err := orgAdminClient.OrganizationMembers(ctx, owner.OrganizationID) + require.NoError(t, err) + + require.Len(t, members, 2) + }) + + t.Run("UserNotExists", func(t *testing.T) { + t.Parallel() + + ownerClient := coderdtest.New(t, &coderdtest.Options{}) + owner := coderdtest.CreateFirstUser(t, ownerClient) + orgAdminClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID)) + + ctx := testutil.Context(t, testutil.WaitMedium) + + inv, root := clitest.New(t, "organization", "members", "remove", "--organization", owner.OrganizationID.String(), "random_name") + clitest.SetupConfig(t, orgAdminClient, root) + + buf := new(bytes.Buffer) + inv.Stdout = buf + err := inv.WithContext(ctx).Run() + require.ErrorContains(t, err, "must be an existing uuid or username") + }) +} diff --git a/cli/organizationroles.go b/cli/organizationroles.go index 91d1b20f54dd4..75cf048198b30 100644 --- a/cli/organizationroles.go +++ b/cli/organizationroles.go @@ -1,13 +1,17 @@ package cli import ( + "encoding/json" "fmt" + "io" "slices" "strings" + "github.com/google/uuid" "golang.org/x/xerrors" "github.com/coder/coder/v2/cli/cliui" + "github.com/coder/coder/v2/coderd/util/slice" "github.com/coder/coder/v2/codersdk" "github.com/coder/serpent" ) @@ -23,6 +27,7 @@ func (r *RootCmd) organizationRoles() *serpent.Command { Hidden: true, Children: []*serpent.Command{ r.showOrganizationRoles(), + r.editOrganizationRole(), }, } return cmd @@ -31,25 +36,19 @@ func (r *RootCmd) organizationRoles() *serpent.Command { func (r *RootCmd) showOrganizationRoles() *serpent.Command { formatter := cliui.NewOutputFormatter( cliui.ChangeFormatterData( - cliui.TableFormat([]assignableRolesTableRow{}, []string{"name", "display_name", "built_in", "site_permissions", "org_permissions", "user_permissions"}), + cliui.TableFormat([]roleTableRow{}, []string{"name", "display_name", "site_permissions", "organization_permissions", "user_permissions"}), func(data any) (any, error) { - input, ok := data.([]codersdk.AssignableRoles) + inputs, ok := data.([]codersdk.AssignableRoles) if !ok { return nil, xerrors.Errorf("expected []codersdk.AssignableRoles got %T", data) } - rows := make([]assignableRolesTableRow, 0, len(input)) - for _, role := range input { - rows = append(rows, assignableRolesTableRow{ - Name: role.Name, - DisplayName: role.DisplayName, - SitePermissions: fmt.Sprintf("%d permissions", len(role.SitePermissions)), - OrganizationPermissions: fmt.Sprintf("%d organizations", len(role.OrganizationPermissions)), - UserPermissions: fmt.Sprintf("%d permissions", len(role.UserPermissions)), - Assignable: role.Assignable, - BuiltIn: role.BuiltIn, - }) + + tableRows := make([]roleTableRow, 0) + for _, input := range inputs { + tableRows = append(tableRows, roleToTableView(input.Role)) } - return rows, nil + + return tableRows, nil }, ), cliui.JSONFormat(), @@ -101,13 +100,297 @@ func (r *RootCmd) showOrganizationRoles() *serpent.Command { return cmd } -type assignableRolesTableRow struct { +func (r *RootCmd) editOrganizationRole() *serpent.Command { + formatter := cliui.NewOutputFormatter( + cliui.ChangeFormatterData( + cliui.TableFormat([]roleTableRow{}, []string{"name", "display_name", "site_permissions", "organization_permissions", "user_permissions"}), + func(data any) (any, error) { + typed, _ := data.(codersdk.Role) + return []roleTableRow{roleToTableView(typed)}, nil + }, + ), + cliui.JSONFormat(), + ) + + var ( + dryRun bool + jsonInput bool + ) + + client := new(codersdk.Client) + cmd := &serpent.Command{ + Use: "edit ", + Short: "Edit an organization custom role", + Long: FormatExamples( + Example{ + Description: "Run with an input.json file", + Command: "coder roles edit --stdin < role.json", + }, + ), + Options: []serpent.Option{ + cliui.SkipPromptOption(), + { + Name: "dry-run", + Description: "Does all the work, but does not submit the final updated role.", + Flag: "dry-run", + Value: serpent.BoolOf(&dryRun), + }, + { + Name: "stdin", + Description: "Reads stdin for the json role definition to upload.", + Flag: "stdin", + Value: serpent.BoolOf(&jsonInput), + }, + }, + Middleware: serpent.Chain( + serpent.RequireRangeArgs(0, 1), + r.InitClient(client), + ), + Handler: func(inv *serpent.Invocation) error { + ctx := inv.Context() + org, err := CurrentOrganization(r, inv, client) + if err != nil { + return err + } + + var customRole codersdk.Role + if jsonInput { + // JSON Upload mode + bytes, err := io.ReadAll(inv.Stdin) + if err != nil { + return xerrors.Errorf("reading stdin: %w", err) + } + + err = json.Unmarshal(bytes, &customRole) + if err != nil { + return xerrors.Errorf("parsing stdin json: %w", err) + } + + if customRole.Name == "" { + arr := make([]json.RawMessage, 0) + err = json.Unmarshal(bytes, &arr) + if err == nil && len(arr) > 0 { + return xerrors.Errorf("the input appears to be an array, only 1 role can be sent at a time") + } + return xerrors.Errorf("json input does not appear to be a valid role") + } + } else { + if len(inv.Args) == 0 { + return xerrors.Errorf("missing role name argument, usage: \"coder organizations roles edit \"") + } + + interactiveRole, err := interactiveOrgRoleEdit(inv, org.ID, client) + if err != nil { + return xerrors.Errorf("editing role: %w", err) + } + + customRole = *interactiveRole + + preview := fmt.Sprintf("permissions: %d site, %d org, %d user", + len(customRole.SitePermissions), len(customRole.OrganizationPermissions), len(customRole.UserPermissions)) + _, err = cliui.Prompt(inv, cliui.PromptOptions{ + Text: "Are you sure you wish to update the role? " + preview, + Default: "yes", + IsConfirm: true, + }) + if err != nil { + return xerrors.Errorf("abort: %w", err) + } + } + + var updated codersdk.Role + if dryRun { + // Do not actually post + updated = customRole + } else { + updated, err = client.PatchOrganizationRole(ctx, org.ID, customRole) + if err != nil { + return xerrors.Errorf("patch role: %w", err) + } + } + + output, err := formatter.Format(ctx, updated) + if err != nil { + return xerrors.Errorf("formatting: %w", err) + } + + _, err = fmt.Fprintln(inv.Stdout, output) + return err + }, + } + + formatter.AttachOptions(&cmd.Options) + return cmd +} + +func interactiveOrgRoleEdit(inv *serpent.Invocation, orgID uuid.UUID, client *codersdk.Client) (*codersdk.Role, error) { + ctx := inv.Context() + roles, err := client.ListOrganizationRoles(ctx, orgID) + if err != nil { + return nil, xerrors.Errorf("listing roles: %w", err) + } + + // Make sure the role actually exists first + var originalRole codersdk.AssignableRoles + for _, r := range roles { + if strings.EqualFold(inv.Args[0], r.Name) { + originalRole = r + break + } + } + + if originalRole.Name == "" { + _, err = cliui.Prompt(inv, cliui.PromptOptions{ + Text: "No organization role exists with that name, do you want to create one?", + Default: "yes", + IsConfirm: true, + }) + if err != nil { + return nil, xerrors.Errorf("abort: %w", err) + } + + originalRole.Role = codersdk.Role{ + Name: inv.Args[0], + OrganizationID: orgID.String(), + } + } + + // Some checks since interactive mode is limited in what it currently sees + if len(originalRole.SitePermissions) > 0 { + return nil, xerrors.Errorf("unable to edit role in interactive mode, it contains site wide permissions") + } + + if len(originalRole.UserPermissions) > 0 { + return nil, xerrors.Errorf("unable to edit role in interactive mode, it contains user permissions") + } + + role := &originalRole.Role + allowedResources := []codersdk.RBACResource{ + codersdk.ResourceTemplate, + codersdk.ResourceWorkspace, + codersdk.ResourceUser, + codersdk.ResourceGroup, + } + + const done = "Finish and submit changes" + const abort = "Cancel changes" + + // Now starts the role editing "game". +customRoleLoop: + for { + selected, err := cliui.Select(inv, cliui.SelectOptions{ + Message: "Select which resources to edit permissions", + Options: append(permissionPreviews(role, allowedResources), done, abort), + }) + if err != nil { + return role, xerrors.Errorf("selecting resource: %w", err) + } + switch selected { + case done: + break customRoleLoop + case abort: + return role, xerrors.Errorf("edit role %q aborted", role.Name) + default: + strs := strings.Split(selected, "::") + resource := strings.TrimSpace(strs[0]) + + actions, err := cliui.MultiSelect(inv, cliui.MultiSelectOptions{ + Message: fmt.Sprintf("Select actions to allow across the whole deployment for resources=%q", resource), + Options: slice.ToStrings(codersdk.RBACResourceActions[codersdk.RBACResource(resource)]), + Defaults: defaultActions(role, resource), + }) + if err != nil { + return role, xerrors.Errorf("selecting actions for resource %q: %w", resource, err) + } + applyOrgResourceActions(role, resource, actions) + // back to resources! + } + } + // This println is required because the prompt ends us on the same line as some text. + _, _ = fmt.Println() + + return role, nil +} + +func applyOrgResourceActions(role *codersdk.Role, resource string, actions []string) { + if role.OrganizationPermissions == nil { + role.OrganizationPermissions = make([]codersdk.Permission, 0) + } + + // Construct new site perms with only new perms for the resource + keep := make([]codersdk.Permission, 0) + for _, perm := range role.OrganizationPermissions { + perm := perm + if string(perm.ResourceType) != resource { + keep = append(keep, perm) + } + } + + // Add new perms + for _, action := range actions { + keep = append(keep, codersdk.Permission{ + Negate: false, + ResourceType: codersdk.RBACResource(resource), + Action: codersdk.RBACAction(action), + }) + } + + role.OrganizationPermissions = keep +} + +func defaultActions(role *codersdk.Role, resource string) []string { + if role.OrganizationPermissions == nil { + role.OrganizationPermissions = []codersdk.Permission{} + } + + defaults := make([]string, 0) + for _, perm := range role.OrganizationPermissions { + if string(perm.ResourceType) == resource { + defaults = append(defaults, string(perm.Action)) + } + } + return defaults +} + +func permissionPreviews(role *codersdk.Role, resources []codersdk.RBACResource) []string { + previews := make([]string, 0, len(resources)) + for _, resource := range resources { + previews = append(previews, permissionPreview(role, resource)) + } + return previews +} + +func permissionPreview(role *codersdk.Role, resource codersdk.RBACResource) string { + if role.OrganizationPermissions == nil { + role.OrganizationPermissions = []codersdk.Permission{} + } + + count := 0 + for _, perm := range role.OrganizationPermissions { + if perm.ResourceType == resource { + count++ + } + } + return fmt.Sprintf("%s :: %d permissions", resource, count) +} + +func roleToTableView(role codersdk.Role) roleTableRow { + return roleTableRow{ + Name: role.Name, + DisplayName: role.DisplayName, + OrganizationID: role.OrganizationID, + SitePermissions: fmt.Sprintf("%d permissions", len(role.SitePermissions)), + OrganizationPermissions: fmt.Sprintf("%d permissions", len(role.OrganizationPermissions)), + UserPermissions: fmt.Sprintf("%d permissions", len(role.UserPermissions)), + } +} + +type roleTableRow struct { Name string `table:"name,default_sort"` DisplayName string `table:"display_name"` + OrganizationID string `table:"organization_id"` SitePermissions string ` table:"site_permissions"` // map[] -> Permissions - OrganizationPermissions string `table:"org_permissions"` + OrganizationPermissions string `table:"organization_permissions"` UserPermissions string `table:"user_permissions"` - Assignable bool `table:"assignable"` - BuiltIn bool `table:"built_in"` } diff --git a/cli/prompts.go b/cli/prompts.go new file mode 100644 index 0000000000000..a9dab5f34a1ec --- /dev/null +++ b/cli/prompts.go @@ -0,0 +1,147 @@ +package cli + +import ( + "fmt" + "strings" + + "golang.org/x/xerrors" + + "github.com/coder/coder/v2/cli/cliui" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/serpent" +) + +func (RootCmd) promptExample() *serpent.Command { + promptCmd := func(use string, prompt func(inv *serpent.Invocation) error, options ...serpent.Option) *serpent.Command { + return &serpent.Command{ + Use: use, + Options: options, + Handler: func(inv *serpent.Invocation) error { + return prompt(inv) + }, + } + } + + var useSearch bool + useSearchOption := serpent.Option{ + Name: "search", + Description: "Show the search.", + Required: false, + Flag: "search", + Value: serpent.BoolOf(&useSearch), + } + cmd := &serpent.Command{ + Use: "prompt-example", + Short: "Example of various prompt types used within coder cli.", + Long: "Example of various prompt types used within coder cli. " + + "This command exists to aid in adjusting visuals of command prompts.", + Handler: func(inv *serpent.Invocation) error { + return inv.Command.HelpHandler(inv) + }, + Children: []*serpent.Command{ + promptCmd("confirm", func(inv *serpent.Invocation) error { + value, err := cliui.Prompt(inv, cliui.PromptOptions{ + Text: "Basic confirmation prompt.", + Default: "yes", + IsConfirm: true, + }) + _, _ = fmt.Fprintf(inv.Stdout, "%s\n", value) + return err + }), + promptCmd("validation", func(inv *serpent.Invocation) error { + value, err := cliui.Prompt(inv, cliui.PromptOptions{ + Text: "Input a string that starts with a capital letter.", + Default: "", + Secret: false, + IsConfirm: false, + Validate: func(s string) error { + if len(s) == 0 { + return xerrors.Errorf("an input string is required") + } + if strings.ToUpper(string(s[0])) != string(s[0]) { + return xerrors.Errorf("input string must start with a capital letter") + } + return nil + }, + }) + _, _ = fmt.Fprintf(inv.Stdout, "%s\n", value) + return err + }), + promptCmd("secret", func(inv *serpent.Invocation) error { + value, err := cliui.Prompt(inv, cliui.PromptOptions{ + Text: "Input a secret", + Default: "", + Secret: true, + IsConfirm: false, + Validate: func(s string) error { + if len(s) == 0 { + return xerrors.Errorf("an input string is required") + } + return nil + }, + }) + _, _ = fmt.Fprintf(inv.Stdout, "Your secret of length %d is safe with me\n", len(value)) + return err + }), + promptCmd("select", func(inv *serpent.Invocation) error { + value, err := cliui.Select(inv, cliui.SelectOptions{ + Options: []string{ + "Blue", "Green", "Yellow", "Red", "Something else", + }, + Default: "", + Message: "Select your favorite color:", + Size: 5, + HideSearch: !useSearch, + }) + if value == "Something else" { + _, _ = fmt.Fprint(inv.Stdout, "I would have picked blue.\n") + } else { + _, _ = fmt.Fprintf(inv.Stdout, "%s is a nice color.\n", value) + } + return err + }, useSearchOption), + promptCmd("multi-select", func(inv *serpent.Invocation) error { + values, err := cliui.MultiSelect(inv, cliui.MultiSelectOptions{ + Message: "Select some things:", + Options: []string{ + "Code", "Chair", "Whale", "Diamond", "Carrot", + }, + Defaults: []string{"Code"}, + }) + _, _ = fmt.Fprintf(inv.Stdout, "%q are nice choices.\n", strings.Join(values, ", ")) + return err + }), + promptCmd("rich-parameter", func(inv *serpent.Invocation) error { + value, err := cliui.RichSelect(inv, cliui.RichSelectOptions{ + Options: []codersdk.TemplateVersionParameterOption{ + { + Name: "Blue", + Description: "Like the ocean.", + Value: "blue", + Icon: "/logo/blue.png", + }, + { + Name: "Red", + Description: "Like a clown's nose.", + Value: "red", + Icon: "/logo/red.png", + }, + { + Name: "Yellow", + Description: "Like a bumblebee. ", + Value: "yellow", + Icon: "/logo/yellow.png", + }, + }, + Default: "blue", + Size: 5, + HideSearch: useSearch, + }) + _, _ = fmt.Fprintf(inv.Stdout, "%s is a good choice.\n", value.Name) + return err + }, useSearchOption), + }, + } + + return cmd +} diff --git a/cli/root.go b/cli/root.go index 2c7443cde5749..073486c640744 100644 --- a/cli/root.go +++ b/cli/root.go @@ -657,7 +657,7 @@ func CurrentOrganization(r *RootCmd, inv *serpent.Invocation, client *codersdk.C }) if index < 0 { - return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization?", selected) + return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization? If unsure, run 'coder organizations set \"\" ' to reset your current context.", selected) } return orgs[index], nil } diff --git a/cli/server.go b/cli/server.go index 3706b2ee1bc92..79d2b132ad6e3 100644 --- a/cli/server.go +++ b/cli/server.go @@ -62,7 +62,6 @@ import ( "github.com/coder/coder/v2/cli/config" "github.com/coder/coder/v2/coderd" "github.com/coder/coder/v2/coderd/autobuild" - "github.com/coder/coder/v2/coderd/batchstats" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/awsiamrds" "github.com/coder/coder/v2/coderd/database/dbmem" @@ -87,7 +86,7 @@ import ( stringutil "github.com/coder/coder/v2/coderd/util/strings" "github.com/coder/coder/v2/coderd/workspaceapps" "github.com/coder/coder/v2/coderd/workspaceapps/appurl" - "github.com/coder/coder/v2/coderd/workspaceusage" + "github.com/coder/coder/v2/coderd/workspacestats" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/drpc" "github.com/coder/coder/v2/cryptorand" @@ -169,6 +168,7 @@ func createOIDCConfig(ctx context.Context, vals *codersdk.DeploymentValues) (*co EmailDomain: vals.OIDC.EmailDomain, AllowSignups: vals.OIDC.AllowSignups.Value(), UsernameField: vals.OIDC.UsernameField.String(), + NameField: vals.OIDC.NameField.String(), EmailField: vals.OIDC.EmailField.String(), AuthURLParams: vals.OIDC.AuthURLParams.Value, IgnoreUserInfo: vals.OIDC.IgnoreUserInfo.Value(), @@ -796,31 +796,18 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):") if vals.Telemetry.Enable { - gitAuth := make([]telemetry.GitAuth, 0) - // TODO: - var gitAuthConfigs []codersdk.ExternalAuthConfig - for _, cfg := range gitAuthConfigs { - gitAuth = append(gitAuth, telemetry.GitAuth{ - Type: cfg.Type, - }) + vals, err := vals.WithoutSecrets() + if err != nil { + return xerrors.Errorf("remove secrets from deployment values: %w", err) } - options.Telemetry, err = telemetry.New(telemetry.Options{ - BuiltinPostgres: builtinPostgres, - DeploymentID: deploymentID, - Database: options.Database, - Logger: logger.Named("telemetry"), - URL: vals.Telemetry.URL.Value(), - Wildcard: vals.WildcardAccessURL.String() != "", - DERPServerRelayURL: vals.DERP.Server.RelayURL.String(), - GitAuth: gitAuth, - GitHubOAuth: vals.OAuth2.Github.ClientID != "", - OIDCAuth: vals.OIDC.ClientID != "", - OIDCIssuerURL: vals.OIDC.IssuerURL.String(), - Prometheus: vals.Prometheus.Enable.Value(), - STUN: len(vals.DERP.Server.STUNAddresses) != 0, - Tunnel: tunnel != nil, - Experiments: vals.Experiments.Value(), + BuiltinPostgres: builtinPostgres, + DeploymentID: deploymentID, + Database: options.Database, + Logger: logger.Named("telemetry"), + URL: vals.Telemetry.URL.Value(), + Tunnel: tunnel != nil, + DeploymentConfig: vals, ParseLicenseJWT: func(lic *telemetry.License) error { // This will be nil when running in AGPL-only mode. if options.ParseLicenseClaims == nil { @@ -869,9 +856,9 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. options.SwaggerEndpoint = vals.Swagger.Enable.Value() } - batcher, closeBatcher, err := batchstats.New(ctx, - batchstats.WithLogger(options.Logger.Named("batchstats")), - batchstats.WithStore(options.Database), + batcher, closeBatcher, err := workspacestats.NewBatcher(ctx, + workspacestats.BatcherWithLogger(options.Logger.Named("batchstats")), + workspacestats.BatcherWithStore(options.Database), ) if err != nil { return xerrors.Errorf("failed to create agent stats batcher: %w", err) @@ -976,8 +963,8 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd. defer purger.Close() // Updates workspace usage - tracker := workspaceusage.New(options.Database, - workspaceusage.WithLogger(logger.Named("workspace_usage_tracker")), + tracker := workspacestats.NewTracker(options.Database, + workspacestats.TrackerWithLogger(logger.Named("workspace_usage_tracker")), ) options.WorkspaceUsageTracker = tracker defer tracker.Close() diff --git a/cli/server_createadminuser.go b/cli/server_createadminuser.go index 278ecafb0644a..19326ba728ce6 100644 --- a/cli/server_createadminuser.go +++ b/cli/server_createadminuser.go @@ -85,6 +85,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command { // Use the validator tags so we match the API's validation. req := codersdk.CreateUserRequest{ Username: "username", + Name: "Admin User", Email: "email@coder.com", Password: "ValidPa$$word123!", OrganizationID: uuid.New(), @@ -116,6 +117,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command { return err } } + if newUserEmail == "" { newUserEmail, err = cliui.Prompt(inv, cliui.PromptOptions{ Text: "Email", @@ -189,10 +191,11 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command { ID: uuid.New(), Email: newUserEmail, Username: newUserUsername, + Name: "Admin User", HashedPassword: []byte(hashedPassword), CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), - RBACRoles: []string{rbac.RoleOwner()}, + RBACRoles: []string{rbac.RoleOwner().String()}, LoginType: database.LoginTypePassword, }) if err != nil { @@ -222,7 +225,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command { UserID: newUser.ID, CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), - Roles: []string{rbac.RoleOrgAdmin(org.ID)}, + Roles: []string{rbac.RoleOrgAdmin()}, }) if err != nil { return xerrors.Errorf("insert organization member: %w", err) diff --git a/cli/server_createadminuser_test.go b/cli/server_createadminuser_test.go index 67ce74fd237a3..6e3939ea298d6 100644 --- a/cli/server_createadminuser_test.go +++ b/cli/server_createadminuser_test.go @@ -17,6 +17,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/userpassword" + "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/pty/ptytest" "github.com/coder/coder/v2/testutil" ) @@ -56,7 +57,7 @@ func TestServerCreateAdminUser(t *testing.T) { require.NoError(t, err) require.True(t, ok, "password does not match") - require.EqualValues(t, []string{rbac.RoleOwner()}, user.RBACRoles, "user does not have owner role") + require.EqualValues(t, []string{codersdk.RoleOwner}, user.RBACRoles, "user does not have owner role") // Check that user is admin in every org. orgs, err := db.GetOrganizations(ctx) @@ -66,12 +67,12 @@ func TestServerCreateAdminUser(t *testing.T) { orgIDs[org.ID] = struct{}{} } - orgMemberships, err := db.GetOrganizationMembershipsByUserID(ctx, user.ID) + orgMemberships, err := db.OrganizationMembers(ctx, database.OrganizationMembersParams{UserID: user.ID}) require.NoError(t, err) orgIDs2 := make(map[uuid.UUID]struct{}, len(orgMemberships)) for _, membership := range orgMemberships { - orgIDs2[membership.OrganizationID] = struct{}{} - assert.Equal(t, []string{rbac.RoleOrgAdmin(membership.OrganizationID)}, membership.Roles, "user is not org admin") + orgIDs2[membership.OrganizationMember.OrganizationID] = struct{}{} + assert.Equal(t, []string{rbac.RoleOrgAdmin()}, membership.OrganizationMember.Roles, "user is not org admin") } require.Equal(t, orgIDs, orgIDs2, "user is not in all orgs") diff --git a/cli/server_test.go b/cli/server_test.go index 3ca57cf0ce162..b163713cff303 100644 --- a/cli/server_test.go +++ b/cli/server_test.go @@ -967,26 +967,32 @@ func TestServer(t *testing.T) { assert.NoError(t, err) // nolint:bodyclose res, err = http.DefaultClient.Do(req) - return err == nil - }, testutil.WaitShort, testutil.IntervalFast) - defer res.Body.Close() - - scanner := bufio.NewScanner(res.Body) - hasActiveUsers := false - for scanner.Scan() { - // This metric is manually registered to be tracked in the server. That's - // why we test it's tracked here. - if strings.HasPrefix(scanner.Text(), "coderd_api_active_users_duration_hour") { - hasActiveUsers = true - continue + if err != nil { + return false } - if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") { - t.Fatal("db metrics should not be tracked when --prometheus-collect-db-metrics is not enabled") + defer res.Body.Close() + + scanner := bufio.NewScanner(res.Body) + hasActiveUsers := false + for scanner.Scan() { + // This metric is manually registered to be tracked in the server. That's + // why we test it's tracked here. + if strings.HasPrefix(scanner.Text(), "coderd_api_active_users_duration_hour") { + hasActiveUsers = true + continue + } + if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") { + t.Fatal("db metrics should not be tracked when --prometheus-collect-db-metrics is not enabled") + } + t.Logf("scanned %s", scanner.Text()) } - t.Logf("scanned %s", scanner.Text()) - } - require.NoError(t, scanner.Err()) - require.True(t, hasActiveUsers) + if scanner.Err() != nil { + t.Logf("scanner err: %s", scanner.Err().Error()) + return false + } + + return hasActiveUsers + }, testutil.WaitShort, testutil.IntervalFast, "didn't find coderd_api_active_users_duration_hour in time") }) t.Run("DBMetricsEnabled", func(t *testing.T) { @@ -1017,20 +1023,25 @@ func TestServer(t *testing.T) { assert.NoError(t, err) // nolint:bodyclose res, err = http.DefaultClient.Do(req) - return err == nil - }, testutil.WaitShort, testutil.IntervalFast) - defer res.Body.Close() - - scanner := bufio.NewScanner(res.Body) - hasDBMetrics := false - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") { - hasDBMetrics = true + if err != nil { + return false } - t.Logf("scanned %s", scanner.Text()) - } - require.NoError(t, scanner.Err()) - require.True(t, hasDBMetrics) + defer res.Body.Close() + + scanner := bufio.NewScanner(res.Body) + hasDBMetrics := false + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "coderd_db_query_latencies_seconds") { + hasDBMetrics = true + } + t.Logf("scanned %s", scanner.Text()) + } + if scanner.Err() != nil { + t.Logf("scanner err: %s", scanner.Err().Error()) + return false + } + return hasDBMetrics + }, testutil.WaitShort, testutil.IntervalFast, "didn't find coderd_db_query_latencies_seconds in time") }) }) t.Run("GitHubOAuth", func(t *testing.T) { @@ -1347,7 +1358,7 @@ func TestServer(t *testing.T) { } return lastStat.Size() > 0 }, - testutil.WaitShort, + dur, //nolint:gocritic testutil.IntervalFast, "file at %s should exist, last stat: %+v", fiName, lastStat, diff --git a/cli/speedtest.go b/cli/speedtest.go index 9f8090ef99731..42fe7604c6dc4 100644 --- a/cli/speedtest.go +++ b/cli/speedtest.go @@ -6,7 +6,6 @@ import ( "os" "time" - "github.com/jedib0t/go-pretty/v6/table" "golang.org/x/xerrors" tsspeedtest "tailscale.com/net/speedtest" "tailscale.com/wgengine/capture" @@ -19,12 +18,51 @@ import ( "github.com/coder/serpent" ) +type SpeedtestResult struct { + Overall SpeedtestResultInterval `json:"overall"` + Intervals []SpeedtestResultInterval `json:"intervals"` +} + +type SpeedtestResultInterval struct { + StartTimeSeconds float64 `json:"start_time_seconds"` + EndTimeSeconds float64 `json:"end_time_seconds"` + ThroughputMbits float64 `json:"throughput_mbits"` +} + +type speedtestTableItem struct { + Interval string `table:"Interval,nosort"` + Throughput string `table:"Throughput"` +} + func (r *RootCmd) speedtest() *serpent.Command { var ( direct bool duration time.Duration direction string pcapFile string + formatter = cliui.NewOutputFormatter( + cliui.ChangeFormatterData(cliui.TableFormat([]speedtestTableItem{}, []string{"Interval", "Throughput"}), func(data any) (any, error) { + res, ok := data.(SpeedtestResult) + if !ok { + // This should never happen + return "", xerrors.Errorf("expected speedtestResult, got %T", data) + } + tableRows := make([]any, len(res.Intervals)+2) + for i, r := range res.Intervals { + tableRows[i] = speedtestTableItem{ + Interval: fmt.Sprintf("%.2f-%.2f sec", r.StartTimeSeconds, r.EndTimeSeconds), + Throughput: fmt.Sprintf("%.4f Mbits/sec", r.ThroughputMbits), + } + } + tableRows[len(res.Intervals)] = cliui.TableSeparator{} + tableRows[len(res.Intervals)+1] = speedtestTableItem{ + Interval: fmt.Sprintf("%.2f-%.2f sec", res.Overall.StartTimeSeconds, res.Overall.EndTimeSeconds), + Throughput: fmt.Sprintf("%.4f Mbits/sec", res.Overall.ThroughputMbits), + } + return tableRows, nil + }), + cliui.JSONFormat(), + ) ) client := new(codersdk.Client) cmd := &serpent.Command{ @@ -101,14 +139,14 @@ func (r *RootCmd) speedtest() *serpent.Command { } peer := status.Peer[status.Peers()[0]] if !p2p && direct { - cliui.Infof(inv.Stdout, "Waiting for a direct connection... (%dms via %s)", dur.Milliseconds(), peer.Relay) + cliui.Infof(inv.Stderr, "Waiting for a direct connection... (%dms via %s)", dur.Milliseconds(), peer.Relay) continue } via := peer.Relay if via == "" { via = "direct" } - cliui.Infof(inv.Stdout, "%dms via %s", dur.Milliseconds(), via) + cliui.Infof(inv.Stderr, "%dms via %s", dur.Milliseconds(), via) break } } else { @@ -124,24 +162,32 @@ func (r *RootCmd) speedtest() *serpent.Command { default: return xerrors.Errorf("invalid direction: %q", direction) } - cliui.Infof(inv.Stdout, "Starting a %ds %s test...", int(duration.Seconds()), tsDir) + cliui.Infof(inv.Stderr, "Starting a %ds %s test...", int(duration.Seconds()), tsDir) results, err := conn.Speedtest(ctx, tsDir, duration) if err != nil { return err } - tableWriter := cliui.Table() - tableWriter.AppendHeader(table.Row{"Interval", "Throughput"}) + var outputResult SpeedtestResult startTime := results[0].IntervalStart - for _, r := range results { + outputResult.Intervals = make([]SpeedtestResultInterval, len(results)-1) + for i, r := range results { + interval := SpeedtestResultInterval{ + StartTimeSeconds: r.IntervalStart.Sub(startTime).Seconds(), + EndTimeSeconds: r.IntervalEnd.Sub(startTime).Seconds(), + ThroughputMbits: r.MBitsPerSecond(), + } if r.Total { - tableWriter.AppendSeparator() + interval.StartTimeSeconds = 0 + outputResult.Overall = interval + } else { + outputResult.Intervals[i] = interval } - tableWriter.AppendRow(table.Row{ - fmt.Sprintf("%.2f-%.2f sec", r.IntervalStart.Sub(startTime).Seconds(), r.IntervalEnd.Sub(startTime).Seconds()), - fmt.Sprintf("%.4f Mbits/sec", r.MBitsPerSecond()), - }) } - _, err = fmt.Fprintln(inv.Stdout, tableWriter.Render()) + out, err := formatter.Format(inv.Context(), outputResult) + if err != nil { + return err + } + _, err = fmt.Fprintln(inv.Stdout, out) return err }, } @@ -173,5 +219,6 @@ func (r *RootCmd) speedtest() *serpent.Command { Value: serpent.StringOf(&pcapFile), }, } + formatter.AttachOptions(&cmd.Options) return cmd } diff --git a/cli/speedtest_test.go b/cli/speedtest_test.go index 9878ff04ab527..281fdcc1488d0 100644 --- a/cli/speedtest_test.go +++ b/cli/speedtest_test.go @@ -1,7 +1,9 @@ package cli_test import ( + "bytes" "context" + "encoding/json" "testing" "github.com/stretchr/testify/assert" @@ -10,6 +12,7 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/agent/agenttest" + "github.com/coder/coder/v2/cli" "github.com/coder/coder/v2/cli/clitest" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/codersdk" @@ -56,3 +59,45 @@ func TestSpeedtest(t *testing.T) { }) <-cmdDone } + +func TestSpeedtestJson(t *testing.T) { + t.Parallel() + t.Skip("Potentially flaky test - see https://github.com/coder/coder/issues/6321") + if testing.Short() { + t.Skip("This test takes a minimum of 5ms per a hardcoded value in Tailscale!") + } + client, workspace, agentToken := setupWorkspaceForAgent(t) + _ = agenttest.New(t, client.URL, agentToken) + coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + require.Eventually(t, func() bool { + ws, err := client.Workspace(ctx, workspace.ID) + if !assert.NoError(t, err) { + return false + } + a := ws.LatestBuild.Resources[0].Agents[0] + return a.Status == codersdk.WorkspaceAgentConnected && + a.LifecycleState == codersdk.WorkspaceAgentLifecycleReady + }, testutil.WaitLong, testutil.IntervalFast, "agent is not ready") + + inv, root := clitest.New(t, "speedtest", "--output=json", workspace.Name) + clitest.SetupConfig(t, client, root) + out := bytes.NewBuffer(nil) + inv.Stdout = out + ctx, cancel = context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + inv.Logger = slogtest.Make(t, nil).Named("speedtest").Leveled(slog.LevelDebug) + cmdDone := tGo(t, func() { + err := inv.WithContext(ctx).Run() + assert.NoError(t, err) + }) + <-cmdDone + + var result cli.SpeedtestResult + require.NoError(t, json.Unmarshal(out.Bytes(), &result)) + require.Len(t, result.Intervals, 5) +} diff --git a/cli/ssh.go b/cli/ssh.go index aa8bdadb9d0dd..e4e9fadf5e8e8 100644 --- a/cli/ssh.go +++ b/cli/ssh.go @@ -12,6 +12,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "strings" "sync" "time" @@ -40,6 +41,10 @@ import ( "github.com/coder/serpent" ) +const ( + disableUsageApp = "disable" +) + var ( workspacePollInterval = time.Minute autostopNotifyCountdown = []time.Duration{30 * time.Minute} @@ -57,6 +62,7 @@ func (r *RootCmd) ssh() *serpent.Command { logDirPath string remoteForwards []string env []string + usageApp string disableAutostart bool ) client := new(codersdk.Client) @@ -251,6 +257,15 @@ func (r *RootCmd) ssh() *serpent.Command { stopPolling := tryPollWorkspaceAutostop(ctx, client, workspace) defer stopPolling() + usageAppName := getUsageAppName(usageApp) + if usageAppName != "" { + closeUsage := client.UpdateWorkspaceUsageWithBodyContext(ctx, workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspaceAgent.ID, + AppName: usageAppName, + }) + defer closeUsage() + } + if stdio { rawSSH, err := conn.SSH(ctx) if err != nil { @@ -509,6 +524,13 @@ func (r *RootCmd) ssh() *serpent.Command { FlagShorthand: "e", Value: serpent.StringArrayOf(&env), }, + { + Flag: "usage-app", + Description: "Specifies the usage app to use for workspace activity tracking.", + Env: "CODER_SSH_USAGE_APP", + Value: serpent.StringOf(&usageApp), + Hidden: true, + }, sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)), } return cmd @@ -711,12 +733,12 @@ func tryPollWorkspaceAutostop(ctx context.Context, client *codersdk.Client, work lock := flock.New(filepath.Join(os.TempDir(), "coder-autostop-notify-"+workspace.ID.String())) conditionCtx, cancelCondition := context.WithCancel(ctx) condition := notifyCondition(conditionCtx, client, workspace.ID, lock) - stopFunc := notify.Notify(condition, workspacePollInterval, autostopNotifyCountdown...) + notifier := notify.New(condition, workspacePollInterval, autostopNotifyCountdown) return func() { // With many "ssh" processes running, `lock.TryLockContext` can be hanging until the context canceled. // Without this cancellation, a CLI process with failed remote-forward could be hanging indefinitely. cancelCondition() - stopFunc() + notifier.Close() } } @@ -1044,3 +1066,20 @@ func (r stdioErrLogReader) Read(_ []byte) (int, error) { r.l.Error(context.Background(), "reading from stdin in stdio mode is not allowed") return 0, io.EOF } + +func getUsageAppName(usageApp string) codersdk.UsageAppName { + if usageApp == disableUsageApp { + return "" + } + + allowedUsageApps := []string{ + string(codersdk.UsageAppNameSSH), + string(codersdk.UsageAppNameVscode), + string(codersdk.UsageAppNameJetbrains), + } + if slices.Contains(allowedUsageApps, usageApp) { + return codersdk.UsageAppName(usageApp) + } + + return codersdk.UsageAppNameSSH +} diff --git a/cli/ssh_test.go b/cli/ssh_test.go index 8c3c1a4e40fd1..ae93c4b0cea05 100644 --- a/cli/ssh_test.go +++ b/cli/ssh_test.go @@ -36,6 +36,7 @@ import ( "github.com/coder/coder/v2/agent" "github.com/coder/coder/v2/agent/agentssh" "github.com/coder/coder/v2/agent/agenttest" + agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/cli/clitest" "github.com/coder/coder/v2/cli/cliui" "github.com/coder/coder/v2/coderd/coderdtest" @@ -43,6 +44,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbfake" "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/workspacestats/workspacestatstest" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisioner/echo" "github.com/coder/coder/v2/provisionersdk/proto" @@ -1292,6 +1294,115 @@ func TestSSH(t *testing.T) { require.NoError(t, err) require.Len(t, ents, 1, "expected one file in logdir %s", logDir) }) + t.Run("UpdateUsage", func(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + experiment bool + usageAppName string + expectedCalls int + expectedCountSSH int + expectedCountJetbrains int + expectedCountVscode int + } + tcs := []testCase{ + { + name: "NoExperiment", + }, + { + name: "Empty", + experiment: true, + expectedCalls: 1, + expectedCountSSH: 1, + }, + { + name: "SSH", + experiment: true, + usageAppName: "ssh", + expectedCalls: 1, + expectedCountSSH: 1, + }, + { + name: "Jetbrains", + experiment: true, + usageAppName: "jetbrains", + expectedCalls: 1, + expectedCountJetbrains: 1, + }, + { + name: "Vscode", + experiment: true, + usageAppName: "vscode", + expectedCalls: 1, + expectedCountVscode: 1, + }, + { + name: "InvalidDefaultsToSSH", + experiment: true, + usageAppName: "invalid", + expectedCalls: 1, + expectedCountSSH: 1, + }, + { + name: "Disable", + experiment: true, + usageAppName: "disable", + }, + } + + for _, tc := range tcs { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + dv := coderdtest.DeploymentValues(t) + if tc.experiment { + dv.Experiments = []string{string(codersdk.ExperimentWorkspaceUsage)} + } + batcher := &workspacestatstest.StatsBatcher{ + LastStats: &agentproto.Stats{}, + } + admin, store := coderdtest.NewWithDatabase(t, &coderdtest.Options{ + DeploymentValues: dv, + StatsBatcher: batcher, + }) + admin.SetLogger(slogtest.Make(t, nil).Named("client").Leveled(slog.LevelDebug)) + first := coderdtest.CreateFirstUser(t, admin) + client, user := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID) + r := dbfake.WorkspaceBuild(t, store, database.Workspace{ + OrganizationID: first.OrganizationID, + OwnerID: user.ID, + }).WithAgent().Do() + workspace := r.Workspace + agentToken := r.AgentToken + inv, root := clitest.New(t, "ssh", workspace.Name, fmt.Sprintf("--usage-app=%s", tc.usageAppName)) + clitest.SetupConfig(t, client, root) + pty := ptytest.New(t).Attach(inv) + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + cmdDone := tGo(t, func() { + err := inv.WithContext(ctx).Run() + assert.NoError(t, err) + }) + pty.ExpectMatch("Waiting") + + _ = agenttest.New(t, client.URL, agentToken) + coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) + + // Shells on Mac, Windows, and Linux all exit shells with the "exit" command. + pty.WriteLine("exit") + <-cmdDone + + require.EqualValues(t, tc.expectedCalls, batcher.Called) + require.EqualValues(t, tc.expectedCountSSH, batcher.LastStats.SessionCountSsh) + require.EqualValues(t, tc.expectedCountJetbrains, batcher.LastStats.SessionCountJetbrains) + require.EqualValues(t, tc.expectedCountVscode, batcher.LastStats.SessionCountVscode) + }) + } + }) } //nolint:paralleltest // This test uses t.Setenv, parent test MUST NOT be parallel. diff --git a/cli/support.go b/cli/support.go index f66bcda13ba6f..5dfe7a45a151b 100644 --- a/cli/support.go +++ b/cli/support.go @@ -254,6 +254,7 @@ func writeBundle(src *support.Bundle, dest *zip.Writer) error { "deployment/health.json": src.Deployment.HealthReport, "network/connection_info.json": src.Network.ConnectionInfo, "network/netcheck.json": src.Network.Netcheck, + "network/interfaces.json": src.Network.Interfaces, "workspace/template.json": src.Workspace.Template, "workspace/template_version.json": src.Workspace.TemplateVersion, "workspace/parameters.json": src.Workspace.Parameters, diff --git a/cli/support_test.go b/cli/support_test.go index d9bee0fb2fb20..d53aac66c820c 100644 --- a/cli/support_test.go +++ b/cli/support_test.go @@ -197,6 +197,10 @@ func assertBundleContents(t *testing.T, path string, wantWorkspace bool, wantAge var v derphealth.Report decodeJSONFromZip(t, f, &v) require.NotEmpty(t, v, "netcheck should not be empty") + case "network/interfaces.json": + var v healthsdk.InterfacesReport + decodeJSONFromZip(t, f, &v) + require.NotEmpty(t, v, "interfaces should not be empty") case "workspace/workspace.json": var v codersdk.Workspace decodeJSONFromZip(t, f, &v) diff --git a/cli/templatepush.go b/cli/templatepush.go index e360aca9f77a7..b4ff8e50eb5ed 100644 --- a/cli/templatepush.go +++ b/cli/templatepush.go @@ -100,6 +100,16 @@ func (r *RootCmd) templatePush() *serpent.Command { return err } + // If user hasn't provided new provisioner tags, inherit ones from the active template version. + if len(tags) == 0 && template.ActiveVersionID != uuid.Nil { + templateVersion, err := client.TemplateVersion(inv.Context(), template.ActiveVersionID) + if err != nil { + return err + } + tags = templateVersion.Job.Tags + inv.Logger.Info(inv.Context(), "reusing existing provisioner tags", "tags", tags) + } + userVariableValues, err := ParseUserVariableValues( varsFiles, variablesFile, @@ -407,9 +417,8 @@ func createValidTemplateVersion(inv *serpent.Invocation, args createValidTemplat if errors.As(err, &jobErr) && !codersdk.JobIsMissingParameterErrorCode(jobErr.Code) { return nil, err } - if err != nil { - return nil, err - } + + return nil, err } version, err = client.TemplateVersion(inv.Context(), version.ID) if err != nil { diff --git a/cli/templatepush_test.go b/cli/templatepush_test.go index 13c9fbc1f35c4..4e9c8613961e5 100644 --- a/cli/templatepush_test.go +++ b/cli/templatepush_test.go @@ -403,6 +403,135 @@ func TestTemplatePush(t *testing.T) { assert.NotEqual(t, template.ActiveVersionID, templateVersions[1].ID) }) + t.Run("ProvisionerTags", func(t *testing.T) { + t.Parallel() + + t.Run("ChangeTags", func(t *testing.T) { + t.Parallel() + + // Start the first provisioner + client, provisionerDocker, api := coderdtest.NewWithAPI(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + ProvisionerDaemonTags: map[string]string{ + "docker": "true", + }, + }) + defer provisionerDocker.Close() + + // Start the second provisioner + provisionerFoobar := coderdtest.NewTaggedProvisionerDaemon(t, api, "provisioner-foobar", map[string]string{ + "foobar": "foobaz", + }) + defer provisionerFoobar.Close() + + owner := coderdtest.CreateFirstUser(t, client) + templateAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleTemplateAdmin()) + + // Create the template with initial tagged template version. + templateVersion := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil, func(ctvr *codersdk.CreateTemplateVersionRequest) { + ctvr.ProvisionerTags = map[string]string{ + "docker": "true", + } + }) + templateVersion = coderdtest.AwaitTemplateVersionJobCompleted(t, client, templateVersion.ID) + template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, templateVersion.ID) + + // Push new template version without provisioner tags. CLI should reuse tags from the previous version. + source := clitest.CreateTemplateVersionSource(t, &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionApply: echo.ApplyComplete, + }) + inv, root := clitest.New(t, "templates", "push", template.Name, "--directory", source, "--test.provisioner", string(database.ProvisionerTypeEcho), "--name", template.Name, + "--provisioner-tag", "foobar=foobaz") + clitest.SetupConfig(t, templateAdmin, root) + pty := ptytest.New(t).Attach(inv) + + execDone := make(chan error) + go func() { + execDone <- inv.Run() + }() + + matches := []struct { + match string + write string + }{ + {match: "Upload", write: "yes"}, + } + for _, m := range matches { + pty.ExpectMatch(m.match) + pty.WriteLine(m.write) + } + + require.NoError(t, <-execDone) + + // Verify template version tags + template, err := client.Template(context.Background(), template.ID) + require.NoError(t, err) + + templateVersion, err = client.TemplateVersion(context.Background(), template.ActiveVersionID) + require.NoError(t, err) + require.EqualValues(t, map[string]string{"foobar": "foobaz", "owner": "", "scope": "organization"}, templateVersion.Job.Tags) + }) + + t.Run("DoNotChangeTags", func(t *testing.T) { + t.Parallel() + + // Start the tagged provisioner + client := coderdtest.New(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + ProvisionerDaemonTags: map[string]string{ + "docker": "true", + }, + }) + owner := coderdtest.CreateFirstUser(t, client) + templateAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleTemplateAdmin()) + + // Create the template with initial tagged template version. + templateVersion := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil, func(ctvr *codersdk.CreateTemplateVersionRequest) { + ctvr.ProvisionerTags = map[string]string{ + "docker": "true", + } + }) + templateVersion = coderdtest.AwaitTemplateVersionJobCompleted(t, client, templateVersion.ID) + template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, templateVersion.ID) + + // Push new template version without provisioner tags. CLI should reuse tags from the previous version. + source := clitest.CreateTemplateVersionSource(t, &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionApply: echo.ApplyComplete, + }) + inv, root := clitest.New(t, "templates", "push", template.Name, "--directory", source, "--test.provisioner", string(database.ProvisionerTypeEcho), "--name", template.Name) + clitest.SetupConfig(t, templateAdmin, root) + pty := ptytest.New(t).Attach(inv) + + execDone := make(chan error) + go func() { + execDone <- inv.Run() + }() + + matches := []struct { + match string + write string + }{ + {match: "Upload", write: "yes"}, + } + for _, m := range matches { + pty.ExpectMatch(m.match) + pty.WriteLine(m.write) + } + + require.NoError(t, <-execDone) + + // Verify template version tags + template, err := client.Template(context.Background(), template.ID) + require.NoError(t, err) + + templateVersion, err = client.TemplateVersion(context.Background(), template.ActiveVersionID) + require.NoError(t, err) + require.EqualValues(t, map[string]string{"docker": "true", "owner": "", "scope": "organization"}, templateVersion.Job.Tags) + }) + }) + t.Run("Variables", func(t *testing.T) { t.Parallel() diff --git a/cli/templateversionarchive.go b/cli/templateversionarchive.go index b63cf2e2441d7..f9ae87e330be0 100644 --- a/cli/templateversionarchive.go +++ b/cli/templateversionarchive.go @@ -166,7 +166,7 @@ func (r *RootCmd) archiveTemplateVersions() *serpent.Command { inv.Stdout, fmt.Sprintf("Archived %d versions from "+pretty.Sprint(cliui.DefaultStyles.Keyword, template.Name)+" at "+cliui.Timestamp(time.Now()), len(resp.ArchivedIDs)), ) - if ok, _ := inv.ParsedFlags().GetBool("verbose"); err == nil && ok { + if ok, _ := inv.ParsedFlags().GetBool("verbose"); ok { data, err := json.Marshal(resp) if err != nil { return xerrors.Errorf("marshal verbose response: %w", err) diff --git a/cli/testdata/coder_agent_--help.golden b/cli/testdata/coder_agent_--help.golden index 372395c4ba5fe..d6982fda18e7c 100644 --- a/cli/testdata/coder_agent_--help.golden +++ b/cli/testdata/coder_agent_--help.golden @@ -18,6 +18,9 @@ OPTIONS: --auth string, $CODER_AGENT_AUTH (default: token) Specify the authentication type to use for the agent. + --block-file-transfer bool, $CODER_AGENT_BLOCK_FILE_TRANSFER (default: false) + Block file transfer using known applications: nc,rsync,scp,sftp. + --debug-address string, $CODER_AGENT_DEBUG_ADDRESS (default: 127.0.0.1:2113) The bind address to serve a debug HTTP server. diff --git a/cli/testdata/coder_login_--help.golden b/cli/testdata/coder_login_--help.golden index f6fe15dc07273..e4109a494ed39 100644 --- a/cli/testdata/coder_login_--help.golden +++ b/cli/testdata/coder_login_--help.golden @@ -10,6 +10,9 @@ OPTIONS: Specifies an email address to use if creating the first user for the deployment. + --first-user-full-name string, $CODER_FIRST_USER_FULL_NAME + Specifies a human-readable name for the first user of the deployment. + --first-user-password string, $CODER_FIRST_USER_PASSWORD Specifies a password to use if creating the first user for the deployment. diff --git a/cli/testdata/coder_server_--help.golden b/cli/testdata/coder_server_--help.golden index 6d8f866c11c0b..acd2c62ead445 100644 --- a/cli/testdata/coder_server_--help.golden +++ b/cli/testdata/coder_server_--help.golden @@ -407,6 +407,9 @@ OIDC OPTIONS: --oidc-issuer-url string, $CODER_OIDC_ISSUER_URL Issuer URL to use for Login with OIDC. + --oidc-name-field string, $CODER_OIDC_NAME_FIELD (default: name) + OIDC claim field to use as the name. + --oidc-group-regex-filter regexp, $CODER_OIDC_GROUP_REGEX_FILTER (default: .*) If provided any group name not matching the regex is ignored. This allows for filtering out groups that are not needed. This filter is diff --git a/cli/testdata/coder_speedtest_--help.golden b/cli/testdata/coder_speedtest_--help.golden index 60eb4026b1028..538c955fae252 100644 --- a/cli/testdata/coder_speedtest_--help.golden +++ b/cli/testdata/coder_speedtest_--help.golden @@ -6,6 +6,10 @@ USAGE: Run upload and download tests from your machine to a workspace OPTIONS: + -c, --column string-array (default: Interval,Throughput) + Columns to display in table output. Available columns: Interval, + Throughput. + -d, --direct bool Specifies whether to wait for a direct connection before testing speed. @@ -14,6 +18,9 @@ OPTIONS: Specifies whether to run in reverse mode where the client receives and the server sends. + -o, --output string (default: table) + Output format. Available formats: table, json. + --pcap-file string Specifies a file to write a network capture to. diff --git a/cli/testdata/coder_users_create_--help.golden b/cli/testdata/coder_users_create_--help.golden index 5216e00f3467b..d55d522181c95 100644 --- a/cli/testdata/coder_users_create_--help.golden +++ b/cli/testdata/coder_users_create_--help.golden @@ -7,6 +7,9 @@ OPTIONS: -e, --email string Specifies an email address for the new user. + -n, --full-name string + Specifies an optional human-readable name for the new user. + --login-type string Optionally specify the login type for the user. Valid values are: password, none, github, oidc. Using 'none' prevents the user from diff --git a/cli/testdata/coder_users_list_--output_json.golden b/cli/testdata/coder_users_list_--output_json.golden index b62ce009922f6..3c7ff44b6675a 100644 --- a/cli/testdata/coder_users_list_--output_json.golden +++ b/cli/testdata/coder_users_list_--output_json.golden @@ -3,7 +3,7 @@ "id": "[first user ID]", "username": "testuser", "avatar_url": "", - "name": "", + "name": "Test User", "email": "testuser@coder.com", "created_at": "[timestamp]", "last_seen_at": "[timestamp]", diff --git a/cli/testdata/server-config.yaml.golden b/cli/testdata/server-config.yaml.golden index bf49239bc4e63..9a34d6be56b20 100644 --- a/cli/testdata/server-config.yaml.golden +++ b/cli/testdata/server-config.yaml.golden @@ -306,6 +306,9 @@ oidc: # OIDC claim field to use as the username. # (default: preferred_username, type: string) usernameField: preferred_username + # OIDC claim field to use as the name. + # (default: name, type: string) + nameField: name # OIDC claim field to use as the email. # (default: email, type: string) emailField: email diff --git a/cli/usercreate.go b/cli/usercreate.go index 28cc3c0fe7049..3c4a43b33bc2d 100644 --- a/cli/usercreate.go +++ b/cli/usercreate.go @@ -10,6 +10,7 @@ import ( "github.com/coder/pretty" "github.com/coder/coder/v2/cli/cliui" + "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/cryptorand" "github.com/coder/serpent" @@ -19,6 +20,7 @@ func (r *RootCmd) userCreate() *serpent.Command { var ( email string username string + name string password string disableLogin bool loginType string @@ -35,6 +37,9 @@ func (r *RootCmd) userCreate() *serpent.Command { if err != nil { return err } + // We only prompt for the full name if both username and email have not + // been set. This is to avoid breaking existing non-interactive usage. + shouldPromptName := username == "" && email == "" if username == "" { username, err = cliui.Prompt(inv, cliui.PromptOptions{ Text: "Username:", @@ -58,6 +63,18 @@ func (r *RootCmd) userCreate() *serpent.Command { return err } } + if name == "" && shouldPromptName { + rawName, err := cliui.Prompt(inv, cliui.PromptOptions{ + Text: "Full name (optional):", + }) + if err != nil { + return err + } + name = httpapi.NormalizeRealUsername(rawName) + if !strings.EqualFold(rawName, name) { + cliui.Warnf(inv.Stderr, "Normalized name to %q", name) + } + } userLoginType := codersdk.LoginTypePassword if disableLogin && loginType != "" { return xerrors.New("You cannot specify both --disable-login and --login-type") @@ -79,6 +96,7 @@ func (r *RootCmd) userCreate() *serpent.Command { _, err = client.CreateUser(inv.Context(), codersdk.CreateUserRequest{ Email: email, Username: username, + Name: name, Password: password, OrganizationID: organization.ID, UserLoginType: userLoginType, @@ -127,6 +145,12 @@ Create a workspace `+pretty.Sprint(cliui.DefaultStyles.Code, "coder create")+`! Description: "Specifies a username for the new user.", Value: serpent.StringOf(&username), }, + { + Flag: "full-name", + FlagShorthand: "n", + Description: "Specifies an optional human-readable name for the new user.", + Value: serpent.StringOf(&name), + }, { Flag: "password", FlagShorthand: "p", diff --git a/cli/usercreate_test.go b/cli/usercreate_test.go index 5726cc84d25b5..66f7975d0bcdf 100644 --- a/cli/usercreate_test.go +++ b/cli/usercreate_test.go @@ -4,16 +4,19 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/coder/coder/v2/cli/clitest" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/pty/ptytest" + "github.com/coder/coder/v2/testutil" ) func TestUserCreate(t *testing.T) { t.Parallel() t.Run("Prompts", func(t *testing.T) { t.Parallel() + ctx := testutil.Context(t, testutil.WaitLong) client := coderdtest.New(t, nil) coderdtest.CreateFirstUser(t, client) inv, root := clitest.New(t, "users", "create") @@ -28,6 +31,7 @@ func TestUserCreate(t *testing.T) { matches := []string{ "Username", "dean", "Email", "dean@coder.com", + "Full name (optional):", "Mr. Dean Deanington", } for i := 0; i < len(matches); i += 2 { match := matches[i] @@ -35,6 +39,89 @@ func TestUserCreate(t *testing.T) { pty.ExpectMatch(match) pty.WriteLine(value) } - <-doneChan + _ = testutil.RequireRecvCtx(ctx, t, doneChan) + created, err := client.User(ctx, matches[1]) + require.NoError(t, err) + assert.Equal(t, matches[1], created.Username) + assert.Equal(t, matches[3], created.Email) + assert.Equal(t, matches[5], created.Name) + }) + + t.Run("PromptsNoName", func(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitLong) + client := coderdtest.New(t, nil) + coderdtest.CreateFirstUser(t, client) + inv, root := clitest.New(t, "users", "create") + clitest.SetupConfig(t, client, root) + doneChan := make(chan struct{}) + pty := ptytest.New(t).Attach(inv) + go func() { + defer close(doneChan) + err := inv.Run() + assert.NoError(t, err) + }() + matches := []string{ + "Username", "noname", + "Email", "noname@coder.com", + "Full name (optional):", "", + } + for i := 0; i < len(matches); i += 2 { + match := matches[i] + value := matches[i+1] + pty.ExpectMatch(match) + pty.WriteLine(value) + } + _ = testutil.RequireRecvCtx(ctx, t, doneChan) + created, err := client.User(ctx, matches[1]) + require.NoError(t, err) + assert.Equal(t, matches[1], created.Username) + assert.Equal(t, matches[3], created.Email) + assert.Empty(t, created.Name) + }) + + t.Run("Args", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + coderdtest.CreateFirstUser(t, client) + args := []string{ + "users", "create", + "-e", "dean@coder.com", + "-u", "dean", + "-n", "Mr. Dean Deanington", + "-p", "1n5ecureP4ssw0rd!", + } + inv, root := clitest.New(t, args...) + clitest.SetupConfig(t, client, root) + err := inv.Run() + require.NoError(t, err) + ctx := testutil.Context(t, testutil.WaitShort) + created, err := client.User(ctx, "dean") + require.NoError(t, err) + assert.Equal(t, args[3], created.Email) + assert.Equal(t, args[5], created.Username) + assert.Equal(t, args[7], created.Name) + }) + + t.Run("ArgsNoName", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + coderdtest.CreateFirstUser(t, client) + args := []string{ + "users", "create", + "-e", "dean@coder.com", + "-u", "dean", + "-p", "1n5ecureP4ssw0rd!", + } + inv, root := clitest.New(t, args...) + clitest.SetupConfig(t, client, root) + err := inv.Run() + require.NoError(t, err) + ctx := testutil.Context(t, testutil.WaitShort) + created, err := client.User(ctx, args[5]) + require.NoError(t, err) + assert.Equal(t, args[3], created.Email) + assert.Equal(t, args[5], created.Username) + assert.Empty(t, created.Name) }) } diff --git a/cli/userlist.go b/cli/userlist.go index 955154ce30f62..616126699cc03 100644 --- a/cli/userlist.go +++ b/cli/userlist.go @@ -137,6 +137,7 @@ func (*userShowFormat) Format(_ context.Context, out interface{}) (string, error // Add rows for each of the user's fields. addRow("ID", user.ID.String()) addRow("Username", user.Username) + addRow("Full name", user.Name) addRow("Email", user.Email) addRow("Status", user.Status) addRow("Created At", user.CreatedAt.Format(time.Stamp)) diff --git a/cli/userlist_test.go b/cli/userlist_test.go index feca8746df32c..1a4409bb898ac 100644 --- a/cli/userlist_test.go +++ b/cli/userlist_test.go @@ -57,7 +57,14 @@ func TestUserList(t *testing.T) { err := json.Unmarshal(buf.Bytes(), &users) require.NoError(t, err, "unmarshal JSON output") require.Len(t, users, 2) - require.Contains(t, users[0].Email, "coder.com") + for _, u := range users { + assert.NotEmpty(t, u.ID) + assert.NotEmpty(t, u.Email) + assert.NotEmpty(t, u.Username) + assert.NotEmpty(t, u.Name) + assert.NotEmpty(t, u.CreatedAt) + assert.NotEmpty(t, u.Status) + } }) t.Run("NoURLFileErrorHasHelperText", func(t *testing.T) { t.Parallel() @@ -133,5 +140,6 @@ func TestUserShow(t *testing.T) { require.Equal(t, otherUser.ID, newUser.ID) require.Equal(t, otherUser.Username, newUser.Username) require.Equal(t, otherUser.Email, newUser.Email) + require.Equal(t, otherUser.Name, newUser.Name) }) } diff --git a/cli/vscodessh.go b/cli/vscodessh.go index 147436374b1f6..558b50c00fe95 100644 --- a/cli/vscodessh.go +++ b/cli/vscodessh.go @@ -110,7 +110,7 @@ func (r *RootCmd) vscodeSSH() *serpent.Command { // will call this command after the workspace is started. autostart := false - _, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, autostart, fmt.Sprintf("%s/%s", owner, name)) + workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, autostart, fmt.Sprintf("%s/%s", owner, name)) if err != nil { return xerrors.Errorf("find workspace and agent: %w", err) } @@ -176,6 +176,13 @@ func (r *RootCmd) vscodeSSH() *serpent.Command { defer agentConn.Close() agentConn.AwaitReachable(ctx) + + closeUsage := client.UpdateWorkspaceUsageWithBodyContext(ctx, workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspaceAgent.ID, + AppName: codersdk.UsageAppNameVscode, + }) + defer closeUsage() + rawSSH, err := agentConn.SSH(ctx) if err != nil { return err diff --git a/cli/vscodessh_test.go b/cli/vscodessh_test.go index a4f6ca19132c6..f80b6b0b6029e 100644 --- a/cli/vscodessh_test.go +++ b/cli/vscodessh_test.go @@ -9,9 +9,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/agent/agenttest" + agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/cli/clitest" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbfake" + "github.com/coder/coder/v2/coderd/workspacestats/workspacestatstest" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/pty/ptytest" "github.com/coder/coder/v2/testutil" @@ -22,7 +29,25 @@ import ( func TestVSCodeSSH(t *testing.T) { t.Parallel() ctx := testutil.Context(t, testutil.WaitLong) - client, workspace, agentToken := setupWorkspaceForAgent(t) + dv := coderdtest.DeploymentValues(t) + dv.Experiments = []string{string(codersdk.ExperimentWorkspaceUsage)} + batcher := &workspacestatstest.StatsBatcher{ + LastStats: &agentproto.Stats{}, + } + admin, store := coderdtest.NewWithDatabase(t, &coderdtest.Options{ + DeploymentValues: dv, + StatsBatcher: batcher, + }) + admin.SetLogger(slogtest.Make(t, nil).Named("client").Leveled(slog.LevelDebug)) + first := coderdtest.CreateFirstUser(t, admin) + client, user := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID) + r := dbfake.WorkspaceBuild(t, store, database.Workspace{ + OrganizationID: first.OrganizationID, + OwnerID: user.ID, + }).WithAgent().Do() + workspace := r.Workspace + agentToken := r.AgentToken + user, err := client.User(ctx, codersdk.Me) require.NoError(t, err) @@ -65,4 +90,7 @@ func TestVSCodeSSH(t *testing.T) { if err := waiter.Wait(); err != nil { waiter.RequireIs(context.Canceled) } + + require.EqualValues(t, 1, batcher.Called) + require.EqualValues(t, 1, batcher.LastStats.SessionCountVscode) } diff --git a/clock/README.md b/clock/README.md new file mode 100644 index 0000000000000..34f72444884a0 --- /dev/null +++ b/clock/README.md @@ -0,0 +1,635 @@ +# Quartz + +A Go time testing library for writing deterministic unit tests + +_Note: Quartz is the name I'm targeting for the standalone open source project when we spin this +out._ + +Our high level goal is to write unit tests that + +1. execute quickly +2. don't flake +3. are straightforward to write and understand + +For tests to execute quickly without flakes, we want to focus on _determinism_: the test should run +the same each time, and it should be easy to force the system into a known state (no races) before +executing test assertions. `time.Sleep`, `runtime.Gosched()`, and +polling/[Eventually](https://pkg.go.dev/github.com/stretchr/testify/assert#Eventually) are all +symptoms of an inability to do this easily. + +## Usage + +### `Clock` interface + +In your application code, maintain a reference to a `quartz.Clock` instance to start timers and +tickers, instead of the bare `time` standard library. + +```go +import "github.com/coder/quartz" + +type Component struct { + ... + + // for testing + clock quartz.Clock +} +``` + +Whenever you would call into `time` to start a timer or ticker, call `Component`'s `clock` instead. + +In production, set this clock to `quartz.NewReal()` to create a clock that just transparently passes +through to the standard `time` library. + +### Mocking + +In your tests, you can use a `*Mock` to control the tickers and timers your code under test gets. + +```go +import ( + "testing" + "github.com/coder/quartz" +) + +func TestComponent(t *testing.T) { + mClock := quartz.NewMock(t) + comp := &Component{ + ... + clock: mClock, + } +} +``` + +The `*Mock` clock starts at Jan 1, 2024, 00:00 UTC by default, but you can set any start time you'd like prior to your test. + +```go +mClock := quartz.NewMock(t) +mClock.Set(time.Date(2021, 6, 18, 12, 0, 0, 0, time.UTC)) // June 18, 2021 @ 12pm UTC +``` + +#### Advancing the clock + +Once you begin setting timers or tickers, you cannot change the time backward, only advance it +forward. You may continue to use `Set()`, but it is often easier and clearer to use `Advance()`. + +For example, with a timer: + +```go +fired := false + +tmr := mClock.Afterfunc(time.Second, func() { + fired = true +}) +mClock.Advance(time.Second) +``` + +When you call `Advance()` it immediately moves the clock forward the given amount, and triggers any +tickers or timers that are scheduled to happen at that time. Any triggered events happen on separate +goroutines, so _do not_ immediately assert the results: + +```go +fired := false + +tmr := mClock.Afterfunc(time.Second, func() { + fired = true +}) +mClock.Advance(time.Second) + +// RACE CONDITION, DO NOT DO THIS! +if !fired { + t.Fatal("didn't fire") +} +``` + +`Advance()` (and `Set()` for that matter) return an `AdvanceWaiter` object you can use to wait for +all triggered events to complete. + +```go +fired := false +// set a test timeout so we don't wait the default `go test` timeout for a failure +ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + +tmr := mClock.Afterfunc(time.Second, func() { + fired = true +}) + +w := mClock.Advance(time.Second) +err := w.Wait(ctx) +if err != nil { + t.Fatal("AfterFunc f never completed") +} +if !fired { + t.Fatal("didn't fire") +} +``` + +The construction of waiting for the triggered events and failing the test if they don't complete is +very common, so there is a shorthand: + +```go +w := mClock.Advance(time.Second) +err := w.Wait(ctx) +if err != nil { + t.Fatal("AfterFunc f never completed") +} +``` + +is equivalent to: + +```go +w := mClock.Advance(time.Second) +w.MustWait(ctx) +``` + +or even more briefly: + +```go +mClock.Advance(time.Second).MustWait(ctx) +``` + +### Advance only to the next event + +One important restriction on advancing the clock is that you may only advance forward to the next +timer or ticker event and no further. The following will result in a test failure: + +```go +func TestAdvanceTooFar(t *testing.T) { + ctx, cancel := context.WithTimeout(10*time.Second) + defer cancel() + mClock := quartz.NewMock(t) + var firedAt time.Time + mClock.AfterFunc(time.Second, func() { + firedAt := mClock.Now() + }) + mClock.Advance(2*time.Second).MustWait(ctx) +} +``` + +This is a deliberate design decision to allow `Advance()` to immediately and synchronously move the +clock forward (even without calling `Wait()` on returned waiter). This helps meet Quartz's design +goals of writing deterministic and easy to understand unit tests. It also allows the clock to be +advanced, deterministically _during_ the execution of a tick or timer function, as explained in the +next sections on Traps. + +Advancing multiple events can be accomplished via looping. E.g. if you have a 1-second ticker + +```go +for i := 0; i < 10; i++ { + mClock.Advance(time.Second).MustWait(ctx) +} +``` + +will advance 10 ticks. + +If you don't know or don't want to compute the time to the next event, you can use `AdvanceNext()`. + +```go +d, w := mClock.AdvanceNext() +w.MustWait(ctx) +// d contains the duration we advanced +``` + +`d, ok := Peek()` returns the duration until the next event, if any (`ok` is `true`). You can use +this to advance a specific time, regardless of the tickers and timer events: + +```go +desired := time.Minute // time to advance +for desired > 0 { + p, ok := mClock.Peek() + if !ok || p > desired { + mClock.Advance(desired).MustWait(ctx) + break + } + mClock.Advance(p).MustWait(ctx) + desired -= p +} +``` + +### Traps + +A trap allows you to match specific calls into the library while mocking, block their return, +inspect their arguments, then release them to allow them to return. They help you write +deterministic unit tests even when the code under test executes asynchronously from the test. + +You set your traps prior to executing code under test, and then wait for them to be triggered. + +```go +func TestTrap(t *testing.T) { + ctx, cancel := context.WithTimeout(10*time.Second) + defer cancel() + mClock := quartz.NewMock(t) + trap := mClock.Trap().AfterFunc() + defer trap.Close() // stop trapping AfterFunc calls + + count := 0 + go mClock.AfterFunc(time.Hour, func(){ + count++ + }) + call := trap.MustWait(ctx) + call.Release() + if call.Duration != time.Hour { + t.Fatal("wrong duration") + } + + // Now that the async call to AfterFunc has occurred, we can advance the clock to trigger it + mClock.Advance(call.Duration).MustWait(ctx) + if count != 1 { + t.Fatal("wrong count") + } +} +``` + +In this test, the trap serves 2 purposes. Firstly, it allows us to capture and assert the duration +passed to the `AfterFunc` call. Secondly, it prevents a race between setting the timer and advancing +it. Since these things happen on different goroutines, if `Advance()` completes before +`AfterFunc()` is called, then the timer never pops in this test. + +Any untrapped calls immediately complete using the current time, and calling `Close()` on a trap +causes the mock clock to stop trapping those calls. + +You may also `Advance()` the clock between trapping a call and releasing it. The call uses the +current (mocked) time at the moment it is released. + +```go +func TestTrap2(t *testing.T) { + ctx, cancel := context.WithTimeout(10*time.Second) + defer cancel() + mClock := quartz.NewMock(t) + trap := mClock.Trap().Now() + defer trap.Close() // stop trapping AfterFunc calls + + var logs []string + done := make(chan struct{}) + go func(clk quartz.Clock){ + defer close(done) + start := clk.Now() + phase1() + p1end := clk.Now() + logs = append(fmt.Sprintf("Phase 1 took %s", p1end.Sub(start).String())) + phase2() + p2end := clk.Now() + logs = append(fmt.Sprintf("Phase 2 took %s", p2end.Sub(p1end).String())) + }(mClock) + + // start + trap.MustWait(ctx).Release() + // phase 1 + call := trap.MustWait(ctx) + mClock.Advance(3*time.Second).MustWait(ctx) + call.Release() + // phase 2 + call = trap.MustWait(ctx) + mClock.Advance(5*time.Second).MustWait(ctx) + call.Release() + + <-done + // Now logs contains []string{"Phase 1 took 3s", "Phase 2 took 5s"} +} +``` + +### Tags + +When multiple goroutines in the code under test call into the Clock, you can use `tags` to +distinguish them in your traps. + +```go +trap := mClock.Trap.Now("foo") // traps any calls that contain "foo" +defer trap.Close() + +foo := make(chan time.Time) +go func(){ + foo <- mClock.Now("foo", "bar") +}() +baz := make(chan time.Time) +go func(){ + baz <- mClock.Now("baz") +}() +call := trap.MustWait(ctx) +mClock.Advance(time.Second).MustWait(ctx) +call.Release() +// call.Tags contains []string{"foo", "bar"} + +gotFoo := <-foo // 1s after start +gotBaz := <-baz // ?? never trapped, so races with Advance() +``` + +Tags appear as an optional suffix on all `Clock` methods (type `...string`) and are ignored entirely +by the real clock. They also appear on all methods on returned timers and tickers. + +## Recommended Patterns + +### Options + +We use the Option pattern to inject the mock clock for testing, keeping the call signature in +production clean. The option pattern is compatible with other optional fields as well. + +```go +type Option func(*Thing) + +// WithTestClock is used in tests to inject a mock Clock +func WithTestClock(clk quartz.Clock) Option { + return func(t *Thing) { + t.clock = clk + } +} + +func NewThing(, opts ...Option) *Thing { + t := &Thing{ + ... + clock: quartz.NewReal() + } + for _, o := range opts { + o(t) + } + return t +} +``` + +In tests, this becomes + +```go +func TestThing(t *testing.T) { + mClock := quartz.NewMock(t) + thing := NewThing(, WithTestClock(mClock)) + ... +} +``` + +### Tagging convention + +Tag your `Clock` method calls as: + +```go +func (c *Component) Method() { + now := c.clock.Now("Component", "Method") +} +``` + +or + +```go +func (c *Component) Method() { + start := c.clock.Now("Component", "Method", "start") + ... + end := c.clock.Now("Component", "Method", "end") +} +``` + +This makes it much less likely that code changes that introduce new components or methods will spoil +existing unit tests. + +## Why another time testing library? + +Writing good unit tests for components and functions that use the `time` package is difficult, even +though several open source libraries exist. In building Quartz, we took some inspiration from + +- [github.com/benbjohnson/clock](https://github.com/benbjohnson/clock) +- Tailscale's [tstest.Clock](https://github.com/coder/tailscale/blob/main/tstest/clock.go) +- [github.com/aspenmesh/tock](https://github.com/aspenmesh/tock) + +Quartz shares the high level design of a `Clock` interface that closely resembles the functions in +the `time` standard library, and a "real" clock passes thru to the standard library in production, +while a mock clock gives precise control in testing. + +As mentioned in our introduction, our high level goal is to write unit tests that + +1. execute quickly +2. don't flake +3. are straightforward to write and understand + +For several reasons, this is a tall order when it comes to code that depends on time, and we found +the existing libraries insufficient for our goals. + +### Preventing test flakes + +The following example comes from the README from benbjohnson/clock: + +```go +mock := clock.NewMock() +count := 0 + +// Kick off a timer to increment every 1 mock second. +go func() { + ticker := mock.Ticker(1 * time.Second) + for { + <-ticker.C + count++ + } +}() +runtime.Gosched() + +// Move the clock forward 10 seconds. +mock.Add(10 * time.Second) + +// This prints 10. +fmt.Println(count) +``` + +The first race condition is fairly obvious: moving the clock forward 10 seconds may generate 10 +ticks on the `ticker.C` channel, but there is no guarantee that `count++` executes before +`fmt.Println(count)`. + +The second race condition is more subtle, but `runtime.Gosched()` is the tell. Since the ticker +is started on a separate goroutine, there is no guarantee that `mock.Ticker()` executes before +`mock.Add()`. `runtime.Gosched()` is an attempt to get this to happen, but it makes no hard +promises. On a busy system, especially when running tests in parallel, this can flake, advance the +time 10 seconds first, then start the ticker and never generate a tick. + +Let's talk about how Quartz tackles these problems. + +In our experience, an extremely common use case is creating a ticker then doing a 2-arm `select` +with ticks in one and context expiring in another, i.e. + +```go +t := time.NewTicker(duration) +for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + err := do() + if err != nil { + return err + } + } +} +``` + +In Quartz, we refactor this to be more compact and testing friendly: + +```go +t := clock.TickerFunc(ctx, duration, do) +return t.Wait() +``` + +This affords the mock `Clock` the ability to explicitly know when processing of a tick is finished +because it's wrapped in the function passed to `TickerFunc` (`do()` in this example). + +In Quartz, when you advance the clock, you are returned an object you can `Wait()` on to ensure all +ticks and timers triggered are finished. This solves the first race condition in the example. + +(As an aside, we still support a traditional standard library-style `Ticker`. You may find it useful +if you want to keep your code as close as possible to the standard library, or if you need to use +the channel in a larger `select` block. In that case, you'll have to find some other mechanism to +sync tick processing to your test code.) + +To prevent race conditions related to the starting of the ticker, Quartz allows you to set "traps" +for calls that access the clock. + +```go +func TestTicker(t *testing.T) { + mClock := quartz.NewMock(t) + trap := mClock.Trap().TickerFunc() + defer trap.Close() // stop trapping at end + go runMyTicker(mClock) // async calls TickerFunc() + call := trap.Wait(context.Background()) // waits for a call and blocks its return + call.Release() // allow the TickerFunc() call to return + // optionally check the duration using call.Duration + // Move the clock forward 1 tick + mClock.Advance(time.Second).MustWait(context.Background()) + // assert results of the tick +} +``` + +Trapping and then releasing the call to `TickerFunc()` ensures the ticker is started at a +deterministic time, so our calls to `Advance()` will have a predictable effect. + +Take a look at `TestExampleTickerFunc` in `example_test.go` for a complete worked example. + +### Complex time dependence + +Another difficult issue to handle when unit testing is when some code under test makes multiple +calls that depend on the time, and you want to simulate some time passing between them. + +A very basic example is measuring how long something took: + +```go +var measurement time.Duration +go func(clock quartz.Clock) { + start := clock.Now() + doSomething() + measurement = clock.Since(start) +}(mClock) + +// how to get measurement to be, say, 5 seconds? +``` + +The two calls into the clock happen asynchronously, so we need to be able to advance the clock after +the first call to `Now()` but before the call to `Since()`. Doing this with the libraries we +mentioned above means that you have to be able to mock out or otherwise block the completion of +`doSomething()`. + +But, with the trap functionality we mentioned in the previous section, you can deterministically +control the time each call sees. + +```go +trap := mClock.Trap().Since() +var measurement time.Duration +go func(clock quartz.Clock) { + start := clock.Now() + doSomething() + measurement = clock.Since(start) +}(mClock) + +c := trap.Wait(ctx) +mClock.Advance(5*time.Second) +c.Release() +``` + +We wait until we trap the `clock.Since()` call, which implies that `clock.Now()` has completed, then +advance the mock clock 5 seconds. Finally, we release the `clock.Since()` call. Any changes to the +clock that happen _before_ we release the call will be included in the time used for the +`clock.Since()` call. + +As a more involved example, consider an inactivity timeout: we want something to happen if there is +no activity recorded for some period, say 10 minutes in the following example: + +```go +type InactivityTimer struct { + mu sync.Mutex + activity time.Time + clock quartz.Clock +} + +func (i *InactivityTimer) Start() { + i.mu.Lock() + defer i.mu.Unlock() + next := i.clock.Until(i.activity.Add(10*time.Minute)) + t := i.clock.AfterFunc(next, func() { + i.mu.Lock() + defer i.mu.Unlock() + next := i.clock.Until(i.activity.Add(10*time.Minute)) + if next == 0 { + i.timeoutLocked() + return + } + t.Reset(next) + }) +} +``` + +The actual contents of `timeoutLocked()` doesn't matter for this example, and assume there are other +functions that record the latest `activity`. + +We found that some time testing libraries hold a lock on the mock clock while calling the function +passed to `AfterFunc`, resulting in a deadlock if you made clock calls from within. + +Others allow this sort of thing, but don't have the flexibility to test edge cases. There is a +subtle bug in our `Start()` function. The timer may pop a little late, and/or some measurable real +time may elapse before `Until()` gets called inside the `AfterFunc`. If there hasn't been activity, +`next` might be negative. + +To test this in Quartz, we'll use a trap. We only want to trap the inner `Until()` call, not the +initial one, so to make testing easier we can "tag" the call we want. Like this: + +```go +func (i *InactivityTimer) Start() { + i.mu.Lock() + defer i.mu.Unlock() + next := i.clock.Until(i.activity.Add(10*time.Minute)) + t := i.clock.AfterFunc(next, func() { + i.mu.Lock() + defer i.mu.Unlock() + next := i.clock.Until(i.activity.Add(10*time.Minute), "inner") + if next == 0 { + i.timeoutLocked() + return + } + t.Reset(next) + }) +} +``` + +All Quartz `Clock` functions, and functions on returned timers and tickers support zero or more +string tags that allow traps to match on them. + +```go +func TestInactivityTimer_Late(t *testing.T) { + // set a timeout on the test itself, so that if Wait functions get blocked, we don't have to + // wait for the default test timeout of 10 minutes. + ctx, cancel := context.WithTimeout(10*time.Second) + defer cancel() + mClock := quartz.NewMock(t) + trap := mClock.Trap.Until("inner") + defer trap.Close() + + it := &InactivityTimer{ + activity: mClock.Now(), + clock: mClock, + } + it.Start() + + // Trigger the AfterFunc + w := mClock.Advance(10*time.Minute) + c := trap.Wait(ctx) + // Advance the clock a few ms to simulate a busy system + mClock.Advance(3*time.Millisecond) + c.Release() // Until() returns + w.MustWait(ctx) // Wait for the AfterFunc to wrap up + + // Assert that the timeoutLocked() function was called +} +``` + +This test case will fail with our bugged implementation, since the triggered AfterFunc won't call +`timeoutLocked()` and instead will reset the timer with a negative number. The fix is easy, use +`next <= 0` as the comparison. diff --git a/clock/clock.go b/clock/clock.go new file mode 100644 index 0000000000000..ae550334844c2 --- /dev/null +++ b/clock/clock.go @@ -0,0 +1,43 @@ +// Package clock is a library for testing time related code. It exports an interface Clock that +// mimics the standard library time package functions. In production, an implementation that calls +// thru to the standard library is used. In testing, a Mock clock is used to precisely control and +// intercept time functions. +package clock + +import ( + "context" + "time" +) + +type Clock interface { + // NewTicker returns a new Ticker containing a channel that will send the current time on the + // channel after each tick. The period of the ticks is specified by the duration argument. The + // ticker will adjust the time interval or drop ticks to make up for slow receivers. The + // duration d must be greater than zero; if not, NewTicker will panic. Stop the ticker to + // release associated resources. + NewTicker(d time.Duration, tags ...string) *Ticker + // TickerFunc is a convenience function that calls f on the interval d until either the given + // context expires or f returns an error. Callers may call Wait() on the returned Waiter to + // wait until this happens and obtain the error. The duration d must be greater than zero; if + // not, TickerFunc will panic. + TickerFunc(ctx context.Context, d time.Duration, f func() error, tags ...string) Waiter + // NewTimer creates a new Timer that will send the current time on its channel after at least + // duration d. + NewTimer(d time.Duration, tags ...string) *Timer + // AfterFunc waits for the duration to elapse and then calls f in its own goroutine. It returns + // a Timer that can be used to cancel the call using its Stop method. The returned Timer's C + // field is not used and will be nil. + AfterFunc(d time.Duration, f func(), tags ...string) *Timer + + // Now returns the current local time. + Now(tags ...string) time.Time + // Since returns the time elapsed since t. It is shorthand for Clock.Now().Sub(t). + Since(t time.Time, tags ...string) time.Duration + // Until returns the duration until t. It is shorthand for t.Sub(Clock.Now()). + Until(t time.Time, tags ...string) time.Duration +} + +// Waiter can be waited on for an error. +type Waiter interface { + Wait(tags ...string) error +} diff --git a/clock/example_test.go b/clock/example_test.go new file mode 100644 index 0000000000000..de72312d7d036 --- /dev/null +++ b/clock/example_test.go @@ -0,0 +1,149 @@ +package clock_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/coder/coder/v2/clock" +) + +type exampleTickCounter struct { + ctx context.Context + mu sync.Mutex + ticks int + clock clock.Clock +} + +func (c *exampleTickCounter) Ticks() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.ticks +} + +func (c *exampleTickCounter) count() { + _ = c.clock.TickerFunc(c.ctx, time.Hour, func() error { + c.mu.Lock() + defer c.mu.Unlock() + c.ticks++ + return nil + }, "mytag") +} + +func newExampleTickCounter(ctx context.Context, clk clock.Clock) *exampleTickCounter { + tc := &exampleTickCounter{ctx: ctx, clock: clk} + go tc.count() + return tc +} + +// TestExampleTickerFunc demonstrates how to test the use of TickerFunc. +func TestExampleTickerFunc(t *testing.T) { + t.Parallel() + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + + // Because the ticker is started on a goroutine, we can't immediately start + // advancing the clock, or we will race with the start of the ticker. If we + // win that race, the clock gets advanced _before_ the ticker starts, and + // our ticker will not get a tick. + // + // To handle this, we set a trap for the call to TickerFunc(), so that we + // can assert it has been called before advancing the clock. + trap := mClock.Trap().TickerFunc("mytag") + defer trap.Close() + + tc := newExampleTickCounter(ctx, mClock) + + // Here, we wait for our trap to be triggered. + call, err := trap.Wait(ctx) + if err != nil { + t.Fatal("ticker never started") + } + // it's good practice to release calls before any possible t.Fatal() calls + // so that we don't leave dangling goroutines waiting for the call to be + // released. + call.Release() + if call.Duration != time.Hour { + t.Fatal("unexpected duration") + } + + if tks := tc.Ticks(); tks != 0 { + t.Fatalf("expected 0 got %d ticks", tks) + } + + // Now that we know the ticker is started, we can advance the time. + mClock.Advance(time.Hour).MustWait(ctx) + + if tks := tc.Ticks(); tks != 1 { + t.Fatalf("expected 1 got %d ticks", tks) + } +} + +type exampleLatencyMeasurer struct { + mu sync.Mutex + lastLatency time.Duration +} + +func newExampleLatencyMeasurer(ctx context.Context, clk clock.Clock) *exampleLatencyMeasurer { + m := &exampleLatencyMeasurer{} + clk.TickerFunc(ctx, 10*time.Second, func() error { + start := clk.Now() + // m.doSomething() + latency := clk.Since(start) + m.mu.Lock() + defer m.mu.Unlock() + m.lastLatency = latency + return nil + }) + return m +} + +func (m *exampleLatencyMeasurer) LastLatency() time.Duration { + m.mu.Lock() + defer m.mu.Unlock() + return m.lastLatency +} + +func TestExampleLatencyMeasurer(t *testing.T) { + t.Parallel() + + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + trap := mClock.Trap().Since() + defer trap.Close() + + lm := newExampleLatencyMeasurer(ctx, mClock) + + w := mClock.Advance(10 * time.Second) // triggers first tick + c := trap.MustWait(ctx) // call to Since() + mClock.Advance(33 * time.Millisecond) + c.Release() + w.MustWait(ctx) + + if l := lm.LastLatency(); l != 33*time.Millisecond { + t.Fatalf("expected 33ms got %s", l.String()) + } + + // Next tick is in 10s - 33ms, but if we don't want to calculate, we can use: + d, w2 := mClock.AdvanceNext() + c = trap.MustWait(ctx) + mClock.Advance(17 * time.Millisecond) + c.Release() + w2.MustWait(ctx) + + expectedD := 10*time.Second - 33*time.Millisecond + if d != expectedD { + t.Fatalf("expected %s got %s", expectedD.String(), d.String()) + } + + if l := lm.LastLatency(); l != 17*time.Millisecond { + t.Fatalf("expected 17ms got %s", l.String()) + } +} diff --git a/clock/mock.go b/clock/mock.go new file mode 100644 index 0000000000000..650d65a6b2128 --- /dev/null +++ b/clock/mock.go @@ -0,0 +1,647 @@ +package clock + +import ( + "context" + "fmt" + "slices" + "sync" + "testing" + "time" + + "golang.org/x/xerrors" +) + +// Mock is the testing implementation of Clock. It tracks a time that monotonically increases +// during a test, triggering any timers or tickers automatically. +type Mock struct { + tb testing.TB + mu sync.Mutex + + // cur is the current time + cur time.Time + + all []event + nextTime time.Time + nextEvents []event + traps []*Trap +} + +type event interface { + next() time.Time + fire(t time.Time) +} + +func (m *Mock) TickerFunc(ctx context.Context, d time.Duration, f func() error, tags ...string) Waiter { + if d <= 0 { + panic("TickerFunc called with negative or zero duration") + } + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionTickerFunc, tags, withDuration(d)) + m.matchCallLocked(c) + defer close(c.complete) + t := &mockTickerFunc{ + ctx: ctx, + d: d, + f: f, + nxt: m.cur.Add(d), + mock: m, + cond: sync.NewCond(&m.mu), + } + m.all = append(m.all, t) + m.recomputeNextLocked() + go t.waitForCtx() + return t +} + +func (m *Mock) NewTicker(d time.Duration, tags ...string) *Ticker { + if d <= 0 { + panic("NewTicker called with negative or zero duration") + } + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionNewTicker, tags, withDuration(d)) + m.matchCallLocked(c) + defer close(c.complete) + // 1 element buffer follows standard library implementation + ticks := make(chan time.Time, 1) + t := &Ticker{ + C: ticks, + c: ticks, + d: d, + nxt: m.cur.Add(d), + mock: m, + } + m.addEventLocked(t) + return t +} + +func (m *Mock) NewTimer(d time.Duration, tags ...string) *Timer { + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionNewTimer, tags, withDuration(d)) + defer close(c.complete) + m.matchCallLocked(c) + ch := make(chan time.Time, 1) + t := &Timer{ + C: ch, + c: ch, + nxt: m.cur.Add(d), + mock: m, + } + if d <= 0 { + // zero or negative duration timer means we should immediately fire + // it, rather than add it. + go t.fire(t.mock.cur) + return t + } + m.addEventLocked(t) + return t +} + +func (m *Mock) AfterFunc(d time.Duration, f func(), tags ...string) *Timer { + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionAfterFunc, tags, withDuration(d)) + defer close(c.complete) + m.matchCallLocked(c) + t := &Timer{ + nxt: m.cur.Add(d), + fn: f, + mock: m, + } + if d <= 0 { + // zero or negative duration timer means we should immediately fire + // it, rather than add it. + go t.fire(t.mock.cur) + return t + } + m.addEventLocked(t) + return t +} + +func (m *Mock) Now(tags ...string) time.Time { + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionNow, tags) + defer close(c.complete) + m.matchCallLocked(c) + return m.cur +} + +func (m *Mock) Since(t time.Time, tags ...string) time.Duration { + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionSince, tags, withTime(t)) + defer close(c.complete) + m.matchCallLocked(c) + return m.cur.Sub(t) +} + +func (m *Mock) Until(t time.Time, tags ...string) time.Duration { + m.mu.Lock() + defer m.mu.Unlock() + c := newCall(clockFunctionUntil, tags, withTime(t)) + defer close(c.complete) + m.matchCallLocked(c) + return t.Sub(m.cur) +} + +func (m *Mock) addEventLocked(e event) { + m.all = append(m.all, e) + m.recomputeNextLocked() +} + +func (m *Mock) recomputeNextLocked() { + var best time.Time + var events []event + for _, e := range m.all { + if best.IsZero() || e.next().Before(best) { + best = e.next() + events = []event{e} + continue + } + if e.next().Equal(best) { + events = append(events, e) + continue + } + } + m.nextTime = best + m.nextEvents = events +} + +func (m *Mock) removeTimer(t *Timer) { + m.mu.Lock() + defer m.mu.Unlock() + m.removeTimerLocked(t) +} + +func (m *Mock) removeTimerLocked(t *Timer) { + t.stopped = true + m.removeEventLocked(t) +} + +func (m *Mock) removeEventLocked(e event) { + defer m.recomputeNextLocked() + for i := range m.all { + if m.all[i] == e { + m.all = append(m.all[:i], m.all[i+1:]...) + return + } + } +} + +func (m *Mock) matchCallLocked(c *Call) { + var traps []*Trap + for _, t := range m.traps { + if t.matches(c) { + traps = append(traps, t) + } + } + if len(traps) == 0 { + return + } + c.releases.Add(len(traps)) + m.mu.Unlock() + for _, t := range traps { + go t.catch(c) + } + c.releases.Wait() + m.mu.Lock() +} + +// AdvanceWaiter is returned from Advance and Set calls and allows you to wait for ticks and timers +// to complete. In the case of functions passed to AfterFunc or TickerFunc, it waits for the +// functions to return. For other ticks & timers, it just waits for the tick to be delivered to +// the channel. +// +// If multiple timers or tickers trigger simultaneously, they are all run on separate +// go routines. +type AdvanceWaiter struct { + tb testing.TB + ch chan struct{} +} + +// Wait for all timers and ticks to complete, or until context expires. +func (w AdvanceWaiter) Wait(ctx context.Context) error { + select { + case <-w.ch: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// MustWait waits for all timers and ticks to complete, and fails the test immediately if the +// context completes first. MustWait must be called from the goroutine running the test or +// benchmark, similar to `t.FailNow()`. +func (w AdvanceWaiter) MustWait(ctx context.Context) { + w.tb.Helper() + select { + case <-w.ch: + return + case <-ctx.Done(): + w.tb.Fatalf("context expired while waiting for clock to advance: %s", ctx.Err()) + } +} + +// Done returns a channel that is closed when all timers and ticks complete. +func (w AdvanceWaiter) Done() <-chan struct{} { + return w.ch +} + +// Advance moves the clock forward by d, triggering any timers or tickers. The returned value can +// be used to wait for all timers and ticks to complete. Advance sets the clock forward before +// returning, and can only advance up to the next timer or tick event. It will fail the test if you +// attempt to advance beyond. +// +// If you need to advance exactly to the next event, and don't know or don't wish to calculate it, +// consider AdvanceNext(). +func (m *Mock) Advance(d time.Duration) AdvanceWaiter { + m.tb.Helper() + w := AdvanceWaiter{tb: m.tb, ch: make(chan struct{})} + m.mu.Lock() + fin := m.cur.Add(d) + // nextTime.IsZero implies no events scheduled. + if m.nextTime.IsZero() || fin.Before(m.nextTime) { + m.cur = fin + m.mu.Unlock() + close(w.ch) + return w + } + if fin.After(m.nextTime) { + m.tb.Errorf(fmt.Sprintf("cannot advance %s which is beyond next timer/ticker event in %s", + d.String(), m.nextTime.Sub(m.cur))) + m.mu.Unlock() + close(w.ch) + return w + } + + m.cur = m.nextTime + go m.advanceLocked(w) + return w +} + +func (m *Mock) advanceLocked(w AdvanceWaiter) { + defer close(w.ch) + wg := sync.WaitGroup{} + for i := range m.nextEvents { + e := m.nextEvents[i] + t := m.cur + wg.Add(1) + go func() { + e.fire(t) + wg.Done() + }() + } + // release the lock and let the events resolve. This allows them to call back into the + // Mock to query the time or set new timers. Each event should remove or reschedule + // itself from nextEvents. + m.mu.Unlock() + wg.Wait() +} + +// Set the time to t. If the time is after the current mocked time, then this is equivalent to +// Advance() with the difference. You may only Set the time earlier than the current time before +// starting tickers and timers (e.g. at the start of your test case). +func (m *Mock) Set(t time.Time) AdvanceWaiter { + m.tb.Helper() + w := AdvanceWaiter{tb: m.tb, ch: make(chan struct{})} + m.mu.Lock() + if t.Before(m.cur) { + defer close(w.ch) + defer m.mu.Unlock() + // past + if !m.nextTime.IsZero() { + m.tb.Error("Set mock clock to the past after timers/tickers started") + } + m.cur = t + return w + } + // future + // nextTime.IsZero implies no events scheduled. + if m.nextTime.IsZero() || t.Before(m.nextTime) { + defer close(w.ch) + defer m.mu.Unlock() + m.cur = t + return w + } + if t.After(m.nextTime) { + defer close(w.ch) + defer m.mu.Unlock() + m.tb.Errorf("cannot Set time to %s which is beyond next timer/ticker event at %s", + t.String(), m.nextTime) + return w + } + + m.cur = m.nextTime + go m.advanceLocked(w) + return w +} + +// AdvanceNext advances the clock to the next timer or tick event. It fails the test if there are +// none scheduled. It returns the duration the clock was advanced and a waiter that can be used to +// wait for the timer/tick event(s) to finish. +func (m *Mock) AdvanceNext() (time.Duration, AdvanceWaiter) { + m.mu.Lock() + m.tb.Helper() + w := AdvanceWaiter{tb: m.tb, ch: make(chan struct{})} + if m.nextTime.IsZero() { + defer close(w.ch) + defer m.mu.Unlock() + m.tb.Error("cannot AdvanceNext because there are no timers or tickers running") + } + d := m.nextTime.Sub(m.cur) + m.cur = m.nextTime + go m.advanceLocked(w) + return d, w +} + +// Peek returns the duration until the next ticker or timer event and the value +// true, or, if there are no running tickers or timers, it returns zero and +// false. +func (m *Mock) Peek() (d time.Duration, ok bool) { + m.mu.Lock() + defer m.mu.Unlock() + if m.nextTime.IsZero() { + return 0, false + } + return m.nextTime.Sub(m.cur), true +} + +// Trapper allows the creation of Traps +type Trapper struct { + // mock is the underlying Mock. This is a thin wrapper around Mock so that + // we can have our interface look like mClock.Trap().NewTimer("foo") + mock *Mock +} + +func (t Trapper) NewTimer(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionNewTimer, tags) +} + +func (t Trapper) AfterFunc(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionAfterFunc, tags) +} + +func (t Trapper) TimerStop(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTimerStop, tags) +} + +func (t Trapper) TimerReset(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTimerReset, tags) +} + +func (t Trapper) TickerFunc(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTickerFunc, tags) +} + +func (t Trapper) TickerFuncWait(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTickerFuncWait, tags) +} + +func (t Trapper) NewTicker(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionNewTicker, tags) +} + +func (t Trapper) TickerStop(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTickerStop, tags) +} + +func (t Trapper) TickerReset(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionTickerReset, tags) +} + +func (t Trapper) Now(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionNow, tags) +} + +func (t Trapper) Since(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionSince, tags) +} + +func (t Trapper) Until(tags ...string) *Trap { + return t.mock.newTrap(clockFunctionUntil, tags) +} + +func (m *Mock) Trap() Trapper { + return Trapper{m} +} + +func (m *Mock) newTrap(fn clockFunction, tags []string) *Trap { + m.mu.Lock() + defer m.mu.Unlock() + tr := &Trap{ + fn: fn, + tags: tags, + mock: m, + calls: make(chan *Call), + done: make(chan struct{}), + } + m.traps = append(m.traps, tr) + return tr +} + +// NewMock creates a new Mock with the time set to midnight UTC on Jan 1, 2024. +// You may re-set the time earlier than this, but only before timers or tickers +// are created. +func NewMock(tb testing.TB) *Mock { + cur, err := time.Parse(time.RFC3339, "2024-01-01T00:00:00Z") + if err != nil { + panic(err) + } + return &Mock{ + tb: tb, + cur: cur, + } +} + +var _ Clock = &Mock{} + +type mockTickerFunc struct { + ctx context.Context + d time.Duration + f func() error + nxt time.Time + mock *Mock + + // cond is a condition Locked on the main Mock.mu + cond *sync.Cond + // done is true when the ticker exits + done bool + // err holds the error when the ticker exits + err error +} + +func (m *mockTickerFunc) next() time.Time { + return m.nxt +} + +func (m *mockTickerFunc) fire(_ time.Time) { + m.mock.mu.Lock() + defer m.mock.mu.Unlock() + if m.done { + return + } + m.nxt = m.nxt.Add(m.d) + m.mock.recomputeNextLocked() + + m.mock.mu.Unlock() + err := m.f() + m.mock.mu.Lock() + if err != nil { + m.exitLocked(err) + } +} + +func (m *mockTickerFunc) exitLocked(err error) { + if m.done { + return + } + m.done = true + m.err = err + m.mock.removeEventLocked(m) + m.cond.Broadcast() +} + +func (m *mockTickerFunc) waitForCtx() { + <-m.ctx.Done() + m.mock.mu.Lock() + defer m.mock.mu.Unlock() + m.exitLocked(m.ctx.Err()) +} + +func (m *mockTickerFunc) Wait(tags ...string) error { + m.mock.mu.Lock() + defer m.mock.mu.Unlock() + c := newCall(clockFunctionTickerFuncWait, tags) + m.mock.matchCallLocked(c) + defer close(c.complete) + for !m.done { + m.cond.Wait() + } + return m.err +} + +var _ Waiter = &mockTickerFunc{} + +type clockFunction int + +const ( + clockFunctionNewTimer clockFunction = iota + clockFunctionAfterFunc + clockFunctionTimerStop + clockFunctionTimerReset + clockFunctionTickerFunc + clockFunctionTickerFuncWait + clockFunctionNewTicker + clockFunctionTickerReset + clockFunctionTickerStop + clockFunctionNow + clockFunctionSince + clockFunctionUntil +) + +type callArg func(c *Call) + +type Call struct { + Time time.Time + Duration time.Duration + Tags []string + + fn clockFunction + releases sync.WaitGroup + complete chan struct{} +} + +func (c *Call) Release() { + c.releases.Done() + <-c.complete +} + +func withTime(t time.Time) callArg { + return func(c *Call) { + c.Time = t + } +} + +func withDuration(d time.Duration) callArg { + return func(c *Call) { + c.Duration = d + } +} + +func newCall(fn clockFunction, tags []string, args ...callArg) *Call { + c := &Call{ + fn: fn, + Tags: tags, + complete: make(chan struct{}), + } + for _, a := range args { + a(c) + } + return c +} + +type Trap struct { + fn clockFunction + tags []string + mock *Mock + calls chan *Call + done chan struct{} +} + +func (t *Trap) catch(c *Call) { + select { + case t.calls <- c: + case <-t.done: + c.Release() + } +} + +func (t *Trap) matches(c *Call) bool { + if t.fn != c.fn { + return false + } + for _, tag := range t.tags { + if !slices.Contains(c.Tags, tag) { + return false + } + } + return true +} + +func (t *Trap) Close() { + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + for i, tr := range t.mock.traps { + if t == tr { + t.mock.traps = append(t.mock.traps[:i], t.mock.traps[i+1:]...) + } + } + close(t.done) +} + +var ErrTrapClosed = xerrors.New("trap closed") + +func (t *Trap) Wait(ctx context.Context) (*Call, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-t.done: + return nil, ErrTrapClosed + case c := <-t.calls: + return c, nil + } +} + +// MustWait calls Wait() and then if there is an error, immediately fails the +// test via tb.Fatalf() +func (t *Trap) MustWait(ctx context.Context) *Call { + t.mock.tb.Helper() + c, err := t.Wait(ctx) + if err != nil { + t.mock.tb.Fatalf("context expired while waiting for trap: %s", err.Error()) + } + return c +} diff --git a/clock/mock_test.go b/clock/mock_test.go new file mode 100644 index 0000000000000..69aa683fded4a --- /dev/null +++ b/clock/mock_test.go @@ -0,0 +1,216 @@ +package clock_test + +import ( + "context" + "testing" + "time" + + "github.com/coder/coder/v2/clock" +) + +func TestTimer_NegativeDuration(t *testing.T) { + t.Parallel() + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + start := mClock.Now() + trap := mClock.Trap().NewTimer() + defer trap.Close() + + timers := make(chan *clock.Timer, 1) + go func() { + timers <- mClock.NewTimer(-time.Second) + }() + c := trap.MustWait(ctx) + c.Release() + // trap returns the actual passed value + if c.Duration != -time.Second { + t.Fatalf("expected -time.Second, got: %v", c.Duration) + } + + tmr := <-timers + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for timer") + case tme := <-tmr.C: + // the tick is the current time, not the past + if !tme.Equal(start) { + t.Fatalf("expected time %v, got %v", start, tme) + } + } + if tmr.Stop() { + t.Fatal("timer still running") + } +} + +func TestAfterFunc_NegativeDuration(t *testing.T) { + t.Parallel() + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + trap := mClock.Trap().AfterFunc() + defer trap.Close() + + timers := make(chan *clock.Timer, 1) + done := make(chan struct{}) + go func() { + timers <- mClock.AfterFunc(-time.Second, func() { + close(done) + }) + }() + c := trap.MustWait(ctx) + c.Release() + // trap returns the actual passed value + if c.Duration != -time.Second { + t.Fatalf("expected -time.Second, got: %v", c.Duration) + } + + tmr := <-timers + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for timer") + case <-done: + // OK! + } + if tmr.Stop() { + t.Fatal("timer still running") + } +} + +func TestNewTicker(t *testing.T) { + t.Parallel() + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + start := mClock.Now() + trapNT := mClock.Trap().NewTicker("new") + defer trapNT.Close() + trapStop := mClock.Trap().TickerStop("stop") + defer trapStop.Close() + trapReset := mClock.Trap().TickerReset("reset") + defer trapReset.Close() + + tickers := make(chan *clock.Ticker, 1) + go func() { + tickers <- mClock.NewTicker(time.Hour, "new") + }() + c := trapNT.MustWait(ctx) + c.Release() + if c.Duration != time.Hour { + t.Fatalf("expected time.Hour, got: %v", c.Duration) + } + tkr := <-tickers + + for i := 0; i < 3; i++ { + mClock.Advance(time.Hour).MustWait(ctx) + } + + // should get first tick, rest dropped + tTime := start.Add(time.Hour) + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for ticker") + case tick := <-tkr.C: + if !tick.Equal(tTime) { + t.Fatalf("expected time %v, got %v", tTime, tick) + } + } + + go tkr.Reset(time.Minute, "reset") + c = trapReset.MustWait(ctx) + mClock.Advance(time.Second).MustWait(ctx) + c.Release() + if c.Duration != time.Minute { + t.Fatalf("expected time.Minute, got: %v", c.Duration) + } + mClock.Advance(time.Minute).MustWait(ctx) + + // tick should show present time, ensuring the 2 hour ticks got dropped when + // we didn't read from the channel. + tTime = mClock.Now() + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for ticker") + case tick := <-tkr.C: + if !tick.Equal(tTime) { + t.Fatalf("expected time %v, got %v", tTime, tick) + } + } + + go tkr.Stop("stop") + trapStop.MustWait(ctx).Release() + mClock.Advance(time.Hour).MustWait(ctx) + select { + case <-tkr.C: + t.Fatal("ticker still running") + default: + // OK + } + + // Resetting after stop + go tkr.Reset(time.Minute, "reset") + trapReset.MustWait(ctx).Release() + mClock.Advance(time.Minute).MustWait(ctx) + tTime = mClock.Now() + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for ticker") + case tick := <-tkr.C: + if !tick.Equal(tTime) { + t.Fatalf("expected time %v, got %v", tTime, tick) + } + } +} + +func TestPeek(t *testing.T) { + t.Parallel() + // nolint:gocritic // trying to avoid Coder-specific stuff with an eye toward spinning this out + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + mClock := clock.NewMock(t) + d, ok := mClock.Peek() + if d != 0 { + t.Fatal("expected Peek() to return 0") + } + if ok { + t.Fatal("expected Peek() to return false") + } + + tmr := mClock.NewTimer(time.Second) + d, ok = mClock.Peek() + if d != time.Second { + t.Fatal("expected Peek() to return 1s") + } + if !ok { + t.Fatal("expected Peek() to return true") + } + + mClock.Advance(999 * time.Millisecond).MustWait(ctx) + d, ok = mClock.Peek() + if d != time.Millisecond { + t.Fatal("expected Peek() to return 1ms") + } + if !ok { + t.Fatal("expected Peek() to return true") + } + + stopped := tmr.Stop() + if !stopped { + t.Fatal("expected Stop() to return true") + } + + d, ok = mClock.Peek() + if d != 0 { + t.Fatal("expected Peek() to return 0") + } + if ok { + t.Fatal("expected Peek() to return false") + } +} diff --git a/clock/real.go b/clock/real.go new file mode 100644 index 0000000000000..55800c87c58ba --- /dev/null +++ b/clock/real.go @@ -0,0 +1,80 @@ +package clock + +import ( + "context" + "time" +) + +type realClock struct{} + +func NewReal() Clock { + return realClock{} +} + +func (realClock) NewTicker(d time.Duration, _ ...string) *Ticker { + tkr := time.NewTicker(d) + return &Ticker{ticker: tkr, C: tkr.C} +} + +func (realClock) TickerFunc(ctx context.Context, d time.Duration, f func() error, _ ...string) Waiter { + ct := &realContextTicker{ + ctx: ctx, + tkr: time.NewTicker(d), + f: f, + err: make(chan error, 1), + } + go ct.run() + return ct +} + +type realContextTicker struct { + ctx context.Context + tkr *time.Ticker + f func() error + err chan error +} + +func (t *realContextTicker) Wait(_ ...string) error { + return <-t.err +} + +func (t *realContextTicker) run() { + defer t.tkr.Stop() + for { + select { + case <-t.ctx.Done(): + t.err <- t.ctx.Err() + return + case <-t.tkr.C: + err := t.f() + if err != nil { + t.err <- err + return + } + } + } +} + +func (realClock) NewTimer(d time.Duration, _ ...string) *Timer { + rt := time.NewTimer(d) + return &Timer{C: rt.C, timer: rt} +} + +func (realClock) AfterFunc(d time.Duration, f func(), _ ...string) *Timer { + rt := time.AfterFunc(d, f) + return &Timer{C: rt.C, timer: rt} +} + +func (realClock) Now(_ ...string) time.Time { + return time.Now() +} + +func (realClock) Since(t time.Time, _ ...string) time.Duration { + return time.Since(t) +} + +func (realClock) Until(t time.Time, _ ...string) time.Duration { + return time.Until(t) +} + +var _ Clock = realClock{} diff --git a/clock/ticker.go b/clock/ticker.go new file mode 100644 index 0000000000000..43700f31d4635 --- /dev/null +++ b/clock/ticker.go @@ -0,0 +1,75 @@ +package clock + +import "time" + +// A Ticker holds a channel that delivers “ticks” of a clock at intervals. +type Ticker struct { + C <-chan time.Time + //nolint: revive + c chan time.Time + ticker *time.Ticker // realtime impl, if set + d time.Duration // period, if set + nxt time.Time // next tick time + mock *Mock // mock clock, if set + stopped bool // true if the ticker is not running +} + +func (t *Ticker) fire(tt time.Time) { + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + if t.stopped { + return + } + for !t.nxt.After(t.mock.cur) { + t.nxt = t.nxt.Add(t.d) + } + t.mock.recomputeNextLocked() + select { + case t.c <- tt: + default: + } +} + +func (t *Ticker) next() time.Time { + return t.nxt +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. Stop does +// not close the channel, to prevent a concurrent goroutine reading from the +// channel from seeing an erroneous "tick". +func (t *Ticker) Stop(tags ...string) { + if t.ticker != nil { + t.ticker.Stop() + return + } + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + c := newCall(clockFunctionTickerStop, tags) + t.mock.matchCallLocked(c) + defer close(c.complete) + t.mock.removeEventLocked(t) + t.stopped = true +} + +// Reset stops a ticker and resets its period to the specified duration. The +// next tick will arrive after the new period elapses. The duration d must be +// greater than zero; if not, Reset will panic. +func (t *Ticker) Reset(d time.Duration, tags ...string) { + if t.ticker != nil { + t.ticker.Reset(d) + return + } + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + c := newCall(clockFunctionTickerReset, tags, withDuration(d)) + t.mock.matchCallLocked(c) + defer close(c.complete) + t.nxt = t.mock.cur.Add(d) + t.d = d + if t.stopped { + t.stopped = false + t.mock.addEventLocked(t) + } else { + t.mock.recomputeNextLocked() + } +} diff --git a/clock/timer.go b/clock/timer.go new file mode 100644 index 0000000000000..b0cf0b33ac07d --- /dev/null +++ b/clock/timer.go @@ -0,0 +1,81 @@ +package clock + +import "time" + +// The Timer type represents a single event. When the Timer expires, the current time will be sent +// on C, unless the Timer was created by AfterFunc. A Timer must be created with NewTimer or +// AfterFunc. +type Timer struct { + C <-chan time.Time + //nolint: revive + c chan time.Time + timer *time.Timer // realtime impl, if set + nxt time.Time // next tick time + mock *Mock // mock clock, if set + fn func() // AfterFunc function, if set + stopped bool // True if stopped, false if running +} + +func (t *Timer) fire(tt time.Time) { + t.mock.removeTimer(t) + if t.fn != nil { + t.fn() + } else { + t.c <- tt + } +} + +func (t *Timer) next() time.Time { + return t.nxt +} + +// Stop prevents the Timer from firing. It returns true if the call stops the timer, false if the +// timer has already expired or been stopped. Stop does not close the channel, to prevent a read +// from the channel succeeding incorrectly. +// +// See https://pkg.go.dev/time#Timer.Stop for more information. +func (t *Timer) Stop(tags ...string) bool { + if t.timer != nil { + return t.timer.Stop() + } + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + c := newCall(clockFunctionTimerStop, tags) + t.mock.matchCallLocked(c) + defer close(c.complete) + result := !t.stopped + t.mock.removeTimerLocked(t) + return result +} + +// Reset changes the timer to expire after duration d. It returns true if the timer had been active, +// false if the timer had expired or been stopped. +// +// See https://pkg.go.dev/time#Timer.Reset for more information. +func (t *Timer) Reset(d time.Duration, tags ...string) bool { + if t.timer != nil { + return t.timer.Reset(d) + } + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + c := newCall(clockFunctionTimerReset, tags, withDuration(d)) + t.mock.matchCallLocked(c) + defer close(c.complete) + result := !t.stopped + select { + case <-t.c: + default: + } + if d <= 0 { + // zero or negative duration timer means we should immediately re-fire + // it, rather than remove and re-add it. + t.stopped = false + go t.fire(t.mock.cur) + return result + } + t.mock.removeTimerLocked(t) + t.stopped = false + t.nxt = t.mock.cur.Add(d) + t.mock.addEventLocked(t) + return result +} diff --git a/coderd/agentapi/api.go b/coderd/agentapi/api.go index ae0d594314e66..4e5e30ad9c761 100644 --- a/coderd/agentapi/api.go +++ b/coderd/agentapi/api.go @@ -25,6 +25,7 @@ import ( "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/tracing" "github.com/coder/coder/v2/coderd/workspacestats" + "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/tailnet" tailnetproto "github.com/coder/coder/v2/tailnet/proto" @@ -72,6 +73,7 @@ type Options struct { DerpForceWebSockets bool DerpMapUpdateFrequency time.Duration ExternalAuthConfigs []*externalauth.Config + Experiments codersdk.Experiments // Optional: // WorkspaceID avoids a future lookup to find the workspace ID by setting @@ -118,6 +120,7 @@ func New(opts Options) *API { Log: opts.Log, StatsReporter: opts.StatsReporter, AgentStatsRefreshInterval: opts.AgentStatsRefreshInterval, + Experiments: opts.Experiments, } api.LifecycleAPI = &LifecycleAPI{ diff --git a/coderd/agentapi/stats.go b/coderd/agentapi/stats.go index ee17897572f3d..4f6a6da1c8c66 100644 --- a/coderd/agentapi/stats.go +++ b/coderd/agentapi/stats.go @@ -7,25 +7,21 @@ import ( "golang.org/x/xerrors" "google.golang.org/protobuf/types/known/durationpb" - "github.com/google/uuid" - "cdr.dev/slog" agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/workspacestats" + "github.com/coder/coder/v2/codersdk" ) -type StatsBatcher interface { - Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats) error -} - type StatsAPI struct { AgentFn func(context.Context) (database.WorkspaceAgent, error) Database database.Store Log slog.Logger StatsReporter *workspacestats.Reporter AgentStatsRefreshInterval time.Duration + Experiments codersdk.Experiments TimeNowFn func() time.Time // defaults to dbtime.Now() } @@ -61,6 +57,16 @@ func (a *StatsAPI) UpdateStats(ctx context.Context, req *agentproto.UpdateStatsR slog.F("payload", req), ) + if a.Experiments.Enabled(codersdk.ExperimentWorkspaceUsage) { + // while the experiment is enabled we will not report + // session stats from the agent. This is because it is + // being handled by the CLI and the postWorkspaceUsage route. + req.Stats.SessionCountSsh = 0 + req.Stats.SessionCountJetbrains = 0 + req.Stats.SessionCountVscode = 0 + req.Stats.SessionCountReconnectingPty = 0 + } + err = a.StatsReporter.ReportAgentStats( ctx, a.now(), diff --git a/coderd/agentapi/stats_test.go b/coderd/agentapi/stats_test.go index c304dea93ecc9..57534208be110 100644 --- a/coderd/agentapi/stats_test.go +++ b/coderd/agentapi/stats_test.go @@ -3,7 +3,6 @@ package agentapi_test import ( "context" "database/sql" - "sync" "sync/atomic" "testing" "time" @@ -23,37 +22,11 @@ import ( "github.com/coder/coder/v2/coderd/prometheusmetrics" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/workspacestats" + "github.com/coder/coder/v2/coderd/workspacestats/workspacestatstest" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) -type statsBatcher struct { - mu sync.Mutex - - called int64 - lastTime time.Time - lastAgentID uuid.UUID - lastTemplateID uuid.UUID - lastUserID uuid.UUID - lastWorkspaceID uuid.UUID - lastStats *agentproto.Stats -} - -var _ agentapi.StatsBatcher = &statsBatcher{} - -func (b *statsBatcher) Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats) error { - b.mu.Lock() - defer b.mu.Unlock() - b.called++ - b.lastTime = now - b.lastAgentID = agentID - b.lastTemplateID = templateID - b.lastUserID = userID - b.lastWorkspaceID = workspaceID - b.lastStats = st - return nil -} - func TestUpdateStates(t *testing.T) { t.Parallel() @@ -94,7 +67,7 @@ func TestUpdateStates(t *testing.T) { panic("not implemented") }, } - batcher = &statsBatcher{} + batcher = &workspacestatstest.StatsBatcher{} updateAgentMetricsFnCalled = false req = &agentproto.UpdateStatsRequest{ @@ -188,15 +161,15 @@ func TestUpdateStates(t *testing.T) { ReportInterval: durationpb.New(10 * time.Second), }, resp) - batcher.mu.Lock() - defer batcher.mu.Unlock() - require.Equal(t, int64(1), batcher.called) - require.Equal(t, now, batcher.lastTime) - require.Equal(t, agent.ID, batcher.lastAgentID) - require.Equal(t, template.ID, batcher.lastTemplateID) - require.Equal(t, user.ID, batcher.lastUserID) - require.Equal(t, workspace.ID, batcher.lastWorkspaceID) - require.Equal(t, req.Stats, batcher.lastStats) + batcher.Mu.Lock() + defer batcher.Mu.Unlock() + require.Equal(t, int64(1), batcher.Called) + require.Equal(t, now, batcher.LastTime) + require.Equal(t, agent.ID, batcher.LastAgentID) + require.Equal(t, template.ID, batcher.LastTemplateID) + require.Equal(t, user.ID, batcher.LastUserID) + require.Equal(t, workspace.ID, batcher.LastWorkspaceID) + require.Equal(t, req.Stats, batcher.LastStats) ctx := testutil.Context(t, testutil.WaitShort) select { case <-ctx.Done(): @@ -222,7 +195,7 @@ func TestUpdateStates(t *testing.T) { panic("not implemented") }, } - batcher = &statsBatcher{} + batcher = &workspacestatstest.StatsBatcher{} req = &agentproto.UpdateStatsRequest{ Stats: &agentproto.Stats{ @@ -336,7 +309,7 @@ func TestUpdateStates(t *testing.T) { panic("not implemented") }, } - batcher = &statsBatcher{} + batcher = &workspacestatstest.StatsBatcher{} updateAgentMetricsFnCalled = false req = &agentproto.UpdateStatsRequest{ @@ -406,6 +379,138 @@ func TestUpdateStates(t *testing.T) { require.True(t, updateAgentMetricsFnCalled) }) + + t.Run("WorkspaceUsageExperiment", func(t *testing.T) { + t.Parallel() + + var ( + now = dbtime.Now() + dbM = dbmock.NewMockStore(gomock.NewController(t)) + ps = pubsub.NewInMemory() + + templateScheduleStore = schedule.MockTemplateScheduleStore{ + GetFn: func(context.Context, database.Store, uuid.UUID) (schedule.TemplateScheduleOptions, error) { + t.Fatal("getfn should not be called") + return schedule.TemplateScheduleOptions{}, nil + }, + SetFn: func(context.Context, database.Store, database.Template, schedule.TemplateScheduleOptions) (database.Template, error) { + t.Fatal("setfn not implemented") + return database.Template{}, nil + }, + } + batcher = &workspacestatstest.StatsBatcher{} + updateAgentMetricsFnCalled = false + + req = &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + ConnectionsByProto: map[string]int64{ + "tcp": 1, + "dean": 2, + }, + ConnectionCount: 3, + ConnectionMedianLatencyMs: 23, + RxPackets: 120, + RxBytes: 1000, + TxPackets: 130, + TxBytes: 2000, + SessionCountVscode: 1, + SessionCountJetbrains: 2, + SessionCountReconnectingPty: 3, + SessionCountSsh: 4, + Metrics: []*agentproto.Stats_Metric{ + { + Name: "awesome metric", + Value: 42, + }, + { + Name: "uncool metric", + Value: 0, + }, + }, + }, + } + ) + api := agentapi.StatsAPI{ + AgentFn: func(context.Context) (database.WorkspaceAgent, error) { + return agent, nil + }, + Database: dbM, + StatsReporter: workspacestats.NewReporter(workspacestats.ReporterOptions{ + Database: dbM, + Pubsub: ps, + StatsBatcher: batcher, + TemplateScheduleStore: templateScheduleStorePtr(templateScheduleStore), + UpdateAgentMetricsFn: func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) { + updateAgentMetricsFnCalled = true + assert.Equal(t, prometheusmetrics.AgentMetricLabels{ + Username: user.Username, + WorkspaceName: workspace.Name, + AgentName: agent.Name, + TemplateName: template.Name, + }, labels) + assert.Equal(t, req.Stats.Metrics, metrics) + }, + }), + AgentStatsRefreshInterval: 10 * time.Second, + TimeNowFn: func() time.Time { + return now + }, + Experiments: codersdk.Experiments{ + codersdk.ExperimentWorkspaceUsage, + }, + } + + // Workspace gets fetched. + dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(database.GetWorkspaceByAgentIDRow{ + Workspace: workspace, + TemplateName: template.Name, + }, nil) + + // We expect an activity bump because ConnectionCount > 0. + dbM.EXPECT().ActivityBumpWorkspace(gomock.Any(), database.ActivityBumpWorkspaceParams{ + WorkspaceID: workspace.ID, + NextAutostart: time.Time{}.UTC(), + }).Return(nil) + + // Workspace last used at gets bumped. + dbM.EXPECT().UpdateWorkspaceLastUsedAt(gomock.Any(), database.UpdateWorkspaceLastUsedAtParams{ + ID: workspace.ID, + LastUsedAt: now, + }).Return(nil) + + // User gets fetched to hit the UpdateAgentMetricsFn. + dbM.EXPECT().GetUserByID(gomock.Any(), user.ID).Return(user, nil) + + // Ensure that pubsub notifications are sent. + notifyDescription := make(chan []byte) + ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, description []byte) { + go func() { + notifyDescription <- description + }() + }) + + resp, err := api.UpdateStats(context.Background(), req) + require.NoError(t, err) + require.Equal(t, &agentproto.UpdateStatsResponse{ + ReportInterval: durationpb.New(10 * time.Second), + }, resp) + + batcher.Mu.Lock() + defer batcher.Mu.Unlock() + require.EqualValues(t, 1, batcher.Called) + require.EqualValues(t, 0, batcher.LastStats.SessionCountSsh) + require.EqualValues(t, 0, batcher.LastStats.SessionCountJetbrains) + require.EqualValues(t, 0, batcher.LastStats.SessionCountVscode) + require.EqualValues(t, 0, batcher.LastStats.SessionCountReconnectingPty) + ctx := testutil.Context(t, testutil.WaitShort) + select { + case <-ctx.Done(): + t.Error("timed out while waiting for pubsub notification") + case description := <-notifyDescription: + require.Equal(t, description, []byte{}) + } + require.True(t, updateAgentMetricsFnCalled) + }) } func templateScheduleStorePtr(store schedule.TemplateScheduleStore) *atomic.Pointer[schedule.TemplateScheduleStore] { diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index a284e46d0a0bb..0d923db69d8fc 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -1158,6 +1158,15 @@ const docTemplate = `{ ], "summary": "Get deployment DAUs", "operationId": "get-deployment-daus", + "parameters": [ + { + "type": "integer", + "description": "Time-zone offset (e.g. -2)", + "name": "tz_offset", + "in": "query", + "required": true + } + ], "responses": { "200": { "description": "OK", @@ -1185,18 +1194,41 @@ const docTemplate = `{ "operationId": "get-insights-about-templates", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", + "in": "query", + "required": true + }, + { + "enum": [ + "week", + "day" + ], + "type": "string", + "description": "Interval", + "name": "interval", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -1226,18 +1258,30 @@ const docTemplate = `{ "operationId": "get-insights-about-user-activity", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -1267,18 +1311,30 @@ const docTemplate = `{ "operationId": "get-insights-about-user-latency", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -2189,6 +2245,43 @@ const docTemplate = `{ } } }, + "/organizations/{organization}/members": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Members" + ], + "summary": "List organization members", + "operationId": "list-organization-members", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.OrganizationMemberWithName" + } + } + } + } + } + }, "/organizations/{organization}/members/roles": { "get": { "security": [ @@ -2263,6 +2356,86 @@ const docTemplate = `{ } } }, + "/organizations/{organization}/members/{user}": { + "post": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Members" + ], + "summary": "Add organization member", + "operationId": "add-organization-member", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "User ID, name, or me", + "name": "user", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.OrganizationMember" + } + } + } + }, + "delete": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Members" + ], + "summary": "Remove organization member", + "operationId": "remove-organization-member", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "User ID, name, or me", + "name": "user", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.OrganizationMember" + } + } + } + } + }, "/organizations/{organization}/members/{user}/roles": { "put": { "security": [ @@ -2928,6 +3101,34 @@ const docTemplate = `{ } } }, + "/templates": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Templates" + ], + "summary": "Get all templates", + "operationId": "get-all-templates", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.Template" + } + } + } + } + } + }, "/templates/{template}": { "get": { "security": [ @@ -5640,62 +5841,6 @@ const docTemplate = `{ } } }, - "/workspaceagents/me/app-health": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Submit workspace agent application health", - "operationId": "submit-workspace-agent-application-health", - "parameters": [ - { - "description": "Application health request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostAppHealthsRequest" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/workspaceagents/me/coordinate": { - "get": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "description": "It accepts a WebSocket connection to an agent that listens to\nincoming connections and publishes node updates.", - "tags": [ - "Agents" - ], - "summary": "Coordinate workspace agent via Tailnet", - "operationId": "coordinate-workspace-agent-via-tailnet", - "responses": { - "101": { - "description": "Switching Protocols" - } - } - } - }, "/workspaceagents/me/external-auth": { "get": { "security": [ @@ -5815,8 +5960,8 @@ const docTemplate = `{ } } }, - "/workspaceagents/me/logs": { - "patch": { + "/workspaceagents/me/log-source": { + "post": { "security": [ { "CoderSessionToken": [] @@ -5831,16 +5976,16 @@ const docTemplate = `{ "tags": [ "Agents" ], - "summary": "Patch workspace agent logs", - "operationId": "patch-workspace-agent-logs", + "summary": "Post workspace agent log source", + "operationId": "post-workspace-agent-log-source", "parameters": [ { - "description": "logs", + "description": "Log source request", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/agentsdk.PatchLogs" + "$ref": "#/definitions/agentsdk.PostLogSourceRequest" } } ], @@ -5848,183 +5993,38 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/codersdk.Response" + "$ref": "#/definitions/codersdk.WorkspaceAgentLogSource" } } } } }, - "/workspaceagents/me/manifest": { - "get": { + "/workspaceagents/me/logs": { + "patch": { "security": [ { "CoderSessionToken": [] } ], + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ "Agents" ], - "summary": "Get authorized workspace agent manifest", - "operationId": "get-authorized-workspace-agent-manifest", - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/agentsdk.Manifest" - } - } - } - } - }, - "/workspaceagents/me/metadata": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Submit workspace agent metadata", - "operationId": "submit-workspace-agent-metadata", - "parameters": [ - { - "description": "Workspace agent metadata request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.PostMetadataRequest" - } - } - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/metadata/{key}": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Removed: Submit workspace agent metadata", - "operationId": "removed-submit-workspace-agent-metadata", - "parameters": [ - { - "description": "Workspace agent metadata request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostMetadataRequestDeprecated" - } - }, - { - "type": "string", - "format": "string", - "description": "metadata key", - "name": "key", - "in": "path", - "required": true - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/report-lifecycle": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Submit workspace agent lifecycle state", - "operationId": "submit-workspace-agent-lifecycle-state", - "parameters": [ - { - "description": "Workspace agent lifecycle request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostLifecycleRequest" - } - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/report-stats": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Submit workspace agent stats", - "operationId": "submit-workspace-agent-stats", - "deprecated": true, + "summary": "Patch workspace agent logs", + "operationId": "patch-workspace-agent-logs", "parameters": [ { - "description": "Stats request", + "description": "logs", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/agentsdk.Stats" + "$ref": "#/definitions/agentsdk.PatchLogs" } } ], @@ -6032,7 +6032,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/agentsdk.StatsResponse" + "$ref": "#/definitions/codersdk.Response" } } } @@ -6060,84 +6060,6 @@ const docTemplate = `{ } } }, - "/workspaceagents/me/startup": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Submit workspace agent startup", - "operationId": "submit-workspace-agent-startup", - "parameters": [ - { - "description": "Startup request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostStartupRequest" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/startup-logs": { - "patch": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Agents" - ], - "summary": "Removed: Patch workspace agent logs", - "operationId": "removed-patch-workspace-agent-logs", - "parameters": [ - { - "description": "logs", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PatchLogs" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/codersdk.Response" - } - } - } - } - }, "/workspaceagents/{workspaceagent}": { "get": { "security": [ @@ -7713,6 +7635,9 @@ const docTemplate = `{ "CoderSessionToken": [] } ], + "consumes": [ + "application/json" + ], "tags": [ "Workspaces" ], @@ -7726,6 +7651,14 @@ const docTemplate = `{ "name": "workspace", "in": "path", "required": true + }, + { + "description": "Post workspace usage request", + "name": "request", + "in": "body", + "schema": { + "$ref": "#/definitions/codersdk.PostWorkspaceUsageRequest" + } } ], "responses": { @@ -7772,402 +7705,131 @@ const docTemplate = `{ } }, "definitions": { - "agentsdk.AWSInstanceIdentityToken": { - "type": "object", - "required": [ - "document", - "signature" - ], - "properties": { - "document": { - "type": "string" - }, - "signature": { - "type": "string" - } - } - }, - "agentsdk.AgentMetric": { - "type": "object", - "required": [ - "name", - "type", - "value" - ], - "properties": { - "labels": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.AgentMetricLabel" - } - }, - "name": { - "type": "string" - }, - "type": { - "enum": [ - "counter", - "gauge" - ], - "allOf": [ - { - "$ref": "#/definitions/agentsdk.AgentMetricType" - } - ] - }, - "value": { - "type": "number" - } - } - }, - "agentsdk.AgentMetricLabel": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - } - }, - "agentsdk.AgentMetricType": { - "type": "string", - "enum": [ - "counter", - "gauge" - ], - "x-enum-varnames": [ - "AgentMetricTypeCounter", - "AgentMetricTypeGauge" - ] - }, - "agentsdk.AuthenticateResponse": { - "type": "object", - "properties": { - "session_token": { - "type": "string" - } - } - }, - "agentsdk.AzureInstanceIdentityToken": { - "type": "object", - "required": [ - "encoding", - "signature" - ], - "properties": { - "encoding": { - "type": "string" - }, - "signature": { - "type": "string" - } - } - }, - "agentsdk.ExternalAuthResponse": { - "type": "object", - "properties": { - "access_token": { - "type": "string" - }, - "password": { - "type": "string" - }, - "token_extra": { - "type": "object", - "additionalProperties": true - }, - "type": { - "type": "string" - }, - "url": { - "type": "string" - }, - "username": { - "description": "Deprecated: Only supported on ` + "`" + `/workspaceagents/me/gitauth` + "`" + `\nfor backwards compatibility.", - "type": "string" - } - } - }, - "agentsdk.GitSSHKey": { - "type": "object", - "properties": { - "private_key": { - "type": "string" - }, - "public_key": { - "type": "string" - } - } - }, - "agentsdk.GoogleInstanceIdentityToken": { - "type": "object", - "required": [ - "json_web_token" - ], - "properties": { - "json_web_token": { - "type": "string" - } - } - }, - "agentsdk.Log": { - "type": "object", - "properties": { - "created_at": { - "type": "string" - }, - "level": { - "$ref": "#/definitions/codersdk.LogLevel" - }, - "output": { - "type": "string" - } - } - }, - "agentsdk.Manifest": { - "type": "object", - "properties": { - "agent_id": { - "type": "string" - }, - "agent_name": { - "type": "string" - }, - "apps": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceApp" - } - }, - "derp_force_websockets": { - "type": "boolean" - }, - "derpmap": { - "$ref": "#/definitions/tailcfg.DERPMap" - }, - "directory": { - "type": "string" - }, - "disable_direct_connections": { - "type": "boolean" - }, - "environment_variables": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "git_auth_configs": { - "description": "GitAuthConfigs stores the number of Git configurations\nthe Coder deployment has. If this number is \u003e0, we\nset up special configuration in the workspace.", - "type": "integer" - }, - "metadata": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceAgentMetadataDescription" - } - }, - "motd_file": { - "type": "string" - }, - "owner_name": { - "description": "OwnerName and WorkspaceID are used by an open-source user to identify the workspace.\nWe do not provide insurance that this will not be removed in the future,\nbut if it's easy to persist lets keep it around.", - "type": "string" - }, - "scripts": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceAgentScript" - } - }, - "vscode_port_proxy_uri": { - "type": "string" - }, - "workspace_id": { - "type": "string" - }, - "workspace_name": { - "type": "string" - } - } - }, - "agentsdk.Metadata": { - "type": "object", - "properties": { - "age": { - "description": "Age is the number of seconds since the metadata was collected.\nIt is provided in addition to CollectedAt to protect against clock skew.", - "type": "integer" - }, - "collected_at": { - "type": "string", - "format": "date-time" - }, - "error": { - "type": "string" - }, - "key": { + "agentsdk.AWSInstanceIdentityToken": { + "type": "object", + "required": [ + "document", + "signature" + ], + "properties": { + "document": { "type": "string" }, - "value": { + "signature": { "type": "string" } } }, - "agentsdk.PatchLogs": { + "agentsdk.AuthenticateResponse": { "type": "object", "properties": { - "log_source_id": { + "session_token": { "type": "string" - }, - "logs": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.Log" - } } } }, - "agentsdk.PostAppHealthsRequest": { + "agentsdk.AzureInstanceIdentityToken": { "type": "object", + "required": [ + "encoding", + "signature" + ], "properties": { - "healths": { - "description": "Healths is a map of the workspace app name and the health of the app.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/codersdk.WorkspaceAppHealth" - } + "encoding": { + "type": "string" + }, + "signature": { + "type": "string" } } }, - "agentsdk.PostLifecycleRequest": { + "agentsdk.ExternalAuthResponse": { "type": "object", "properties": { - "changed_at": { + "access_token": { "type": "string" }, - "state": { - "$ref": "#/definitions/codersdk.WorkspaceAgentLifecycle" + "password": { + "type": "string" + }, + "token_extra": { + "type": "object", + "additionalProperties": true + }, + "type": { + "type": "string" + }, + "url": { + "type": "string" + }, + "username": { + "description": "Deprecated: Only supported on ` + "`" + `/workspaceagents/me/gitauth` + "`" + `\nfor backwards compatibility.", + "type": "string" } } }, - "agentsdk.PostMetadataRequest": { + "agentsdk.GitSSHKey": { "type": "object", "properties": { - "metadata": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.Metadata" - } + "private_key": { + "type": "string" + }, + "public_key": { + "type": "string" } } }, - "agentsdk.PostMetadataRequestDeprecated": { + "agentsdk.GoogleInstanceIdentityToken": { "type": "object", + "required": [ + "json_web_token" + ], "properties": { - "age": { - "description": "Age is the number of seconds since the metadata was collected.\nIt is provided in addition to CollectedAt to protect against clock skew.", - "type": "integer" - }, - "collected_at": { - "type": "string", - "format": "date-time" - }, - "error": { - "type": "string" - }, - "value": { + "json_web_token": { "type": "string" } } }, - "agentsdk.PostStartupRequest": { + "agentsdk.Log": { "type": "object", "properties": { - "expanded_directory": { + "created_at": { "type": "string" }, - "subsystems": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.AgentSubsystem" - } + "level": { + "$ref": "#/definitions/codersdk.LogLevel" }, - "version": { + "output": { "type": "string" } } }, - "agentsdk.Stats": { + "agentsdk.PatchLogs": { "type": "object", "properties": { - "connection_count": { - "description": "ConnectionCount is the number of connections received by an agent.", - "type": "integer" - }, - "connection_median_latency_ms": { - "description": "ConnectionMedianLatencyMS is the median latency of all connections in milliseconds.", - "type": "number" - }, - "connections_by_proto": { - "description": "ConnectionsByProto is a count of connections by protocol.", - "type": "object", - "additionalProperties": { - "type": "integer" - } + "log_source_id": { + "type": "string" }, - "metrics": { - "description": "Metrics collected by the agent", + "logs": { "type": "array", "items": { - "$ref": "#/definitions/agentsdk.AgentMetric" + "$ref": "#/definitions/agentsdk.Log" } - }, - "rx_bytes": { - "description": "RxBytes is the number of received bytes.", - "type": "integer" - }, - "rx_packets": { - "description": "RxPackets is the number of received packets.", - "type": "integer" - }, - "session_count_jetbrains": { - "description": "SessionCountJetBrains is the number of connections received by an agent\nthat are from our JetBrains extension.", - "type": "integer" - }, - "session_count_reconnecting_pty": { - "description": "SessionCountReconnectingPTY is the number of connections received by an agent\nthat are from the reconnecting web terminal.", - "type": "integer" - }, - "session_count_ssh": { - "description": "SessionCountSSH is the number of connections received by an agent\nthat are normal, non-tagged SSH sessions.", - "type": "integer" - }, - "session_count_vscode": { - "description": "SessionCountVSCode is the number of connections received by an agent\nthat are from our VS Code extension.", - "type": "integer" - }, - "tx_bytes": { - "description": "TxBytes is the number of transmitted bytes.", - "type": "integer" - }, - "tx_packets": { - "description": "TxPackets is the number of transmitted bytes.", - "type": "integer" } } }, - "agentsdk.StatsResponse": { + "agentsdk.PostLogSourceRequest": { "type": "object", "properties": { - "report_interval": { - "description": "ReportInterval is the duration after which the agent should send stats\nagain.", - "type": "integer" + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, + "id": { + "description": "ID is a unique identifier for the log source.\nIt is scoped to a workspace agent, and can be statically\ndefined inside code to prevent duplicate sources from being\ncreated for the same agent.", + "type": "string" } } }, @@ -8717,6 +8379,10 @@ const docTemplate = `{ "description": "ExternalURL references the current Coder version.\nFor production builds, this will link directly to a release. For development builds, this will link to a commit.", "type": "string" }, + "telemetry": { + "description": "Telemetry is a boolean that indicates whether telemetry is enabled.", + "type": "boolean" + }, "upgrade_message": { "description": "UpgradeMessage is the message displayed to users when an outdated client\nis detected.", "type": "string" @@ -8787,6 +8453,9 @@ const docTemplate = `{ "email": { "type": "string" }, + "name": { + "type": "string" + }, "password": { "type": "string" }, @@ -8842,6 +8511,9 @@ const docTemplate = `{ }, "codersdk.CreateGroupRequest": { "type": "object", + "required": [ + "name" + ], "properties": { "avatar_url": { "type": "string" @@ -8863,6 +8535,16 @@ const docTemplate = `{ "name" ], "properties": { + "description": { + "type": "string" + }, + "display_name": { + "description": "DisplayName will default to the same value as ` + "`" + `Name` + "`" + ` if not provided.", + "type": "string" + }, + "icon": { + "type": "string" + }, "name": { "type": "string" } @@ -9065,6 +8747,10 @@ const docTemplate = `{ } ] }, + "organization_id": { + "type": "string", + "format": "uuid" + }, "resource_id": { "type": "string", "format": "uuid" @@ -9136,6 +8822,9 @@ const docTemplate = `{ } ] }, + "name": { + "type": "string" + }, "organization_id": { "type": "string", "format": "uuid" @@ -9687,19 +9376,22 @@ const docTemplate = `{ "example", "auto-fill-parameters", "multi-organization", - "custom-roles" + "custom-roles", + "workspace-usage" ], "x-enum-comments": { "ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.", "ExperimentCustomRoles": "Allows creating runtime custom roles", "ExperimentExample": "This isn't used for anything.", - "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed." + "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed.", + "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking" }, "x-enum-varnames": [ "ExperimentExample", "ExperimentAutoFillParameters", "ExperimentMultiOrganization", - "ExperimentCustomRoles" + "ExperimentCustomRoles", + "ExperimentWorkspaceUsage" ] }, "codersdk.ExternalAuth": { @@ -9782,12 +9474,6 @@ const docTemplate = `{ "description": "DisplayName is shown in the UI to identify the auth config.", "type": "string" }, - "extra_token_keys": { - "type": "array", - "items": { - "type": "string" - } - }, "id": { "description": "ID is a unique identifier for the auth config.\nIt defaults to ` + "`" + `type` + "`" + ` when not provided.", "type": "string" @@ -10441,6 +10127,9 @@ const docTemplate = `{ "issuer_url": { "type": "string" }, + "name_field": { + "type": "string" + }, "scopes": { "type": "array", "items": { @@ -10476,7 +10165,6 @@ const docTemplate = `{ "created_at", "id", "is_default", - "name", "updated_at" ], "properties": { @@ -10484,6 +10172,15 @@ const docTemplate = `{ "type": "string", "format": "date-time" }, + "description": { + "type": "string" + }, + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, "id": { "type": "string", "format": "uuid" @@ -10527,6 +10224,36 @@ const docTemplate = `{ } } }, + "codersdk.OrganizationMemberWithName": { + "type": "object", + "properties": { + "created_at": { + "type": "string", + "format": "date-time" + }, + "organization_id": { + "type": "string", + "format": "uuid" + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.SlimRole" + } + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "user_id": { + "type": "string", + "format": "uuid" + }, + "username": { + "type": "string" + } + } + }, "codersdk.PatchGroupRequest": { "type": "object", "properties": { @@ -10627,6 +10354,18 @@ const docTemplate = `{ } } }, + "codersdk.PostWorkspaceUsageRequest": { + "type": "object", + "properties": { + "agent_id": { + "type": "string", + "format": "uuid" + }, + "app_name": { + "$ref": "#/definitions/codersdk.UsageAppName" + } + } + }, "codersdk.PprofConfig": { "type": "object", "properties": { @@ -11203,7 +10942,8 @@ const docTemplate = `{ "workspace_proxy", "organization", "oauth2_provider_app", - "oauth2_provider_app_secret" + "oauth2_provider_app_secret", + "custom_role" ], "x-enum-varnames": [ "ResourceTypeTemplate", @@ -11220,7 +10960,8 @@ const docTemplate = `{ "ResourceTypeWorkspaceProxy", "ResourceTypeOrganization", "ResourceTypeOAuth2ProviderApp", - "ResourceTypeOAuth2ProviderAppSecret" + "ResourceTypeOAuth2ProviderAppSecret", + "ResourceTypeCustomRole" ] }, "codersdk.Response": { @@ -11348,6 +11089,9 @@ const docTemplate = `{ }, "name": { "type": "string" + }, + "organization_id": { + "type": "string" } } }, @@ -12189,10 +11933,16 @@ const docTemplate = `{ }, "codersdk.UpdateOrganizationRequest": { "type": "object", - "required": [ - "name" - ], "properties": { + "description": { + "type": "string" + }, + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, "name": { "type": "string" } @@ -12371,6 +12121,21 @@ const docTemplate = `{ } } }, + "codersdk.UsageAppName": { + "type": "string", + "enum": [ + "vscode", + "jetbrains", + "reconnecting-pty", + "ssh" + ], + "x-enum-varnames": [ + "UsageAppNameVscode", + "UsageAppNameJetbrains", + "UsageAppNameReconnectingPty", + "UsageAppNameSSH" + ] + }, "codersdk.User": { "type": "object", "required": [ @@ -13031,26 +12796,6 @@ const docTemplate = `{ } } }, - "codersdk.WorkspaceAgentMetadataDescription": { - "type": "object", - "properties": { - "display_name": { - "type": "string" - }, - "interval": { - "type": "integer" - }, - "key": { - "type": "string" - }, - "script": { - "type": "string" - }, - "timeout": { - "type": "integer" - } - } - }, "codersdk.WorkspaceAgentPortShare": { "type": "object", "properties": { @@ -14060,13 +13805,6 @@ const docTemplate = `{ "derp": { "$ref": "#/definitions/healthsdk.DERPHealthReport" }, - "failing_sections": { - "description": "FailingSections is a list of sections that have failed their healthcheck.", - "type": "array", - "items": { - "$ref": "#/definitions/healthsdk.HealthSection" - } - }, "healthy": { "description": "Healthy is true if the report returns no errors.\nDeprecated: use ` + "`" + `Severity` + "`" + ` instead", "type": "boolean" diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index 28212bdaa8342..46caa7d6146da 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -998,6 +998,15 @@ "tags": ["Insights"], "summary": "Get deployment DAUs", "operationId": "get-deployment-daus", + "parameters": [ + { + "type": "integer", + "description": "Time-zone offset (e.g. -2)", + "name": "tz_offset", + "in": "query", + "required": true + } + ], "responses": { "200": { "description": "OK", @@ -1021,18 +1030,38 @@ "operationId": "get-insights-about-templates", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", + "in": "query", + "required": true + }, + { + "enum": ["week", "day"], + "type": "string", + "description": "Interval", + "name": "interval", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -1058,18 +1087,30 @@ "operationId": "get-insights-about-user-activity", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -1095,18 +1136,30 @@ "operationId": "get-insights-about-user-latency", "parameters": [ { - "type": "integer", + "type": "string", + "format": "date-time", "description": "Start time", - "name": "before", + "name": "start_time", "in": "query", "required": true }, { - "type": "integer", + "type": "string", + "format": "date-time", "description": "End time", - "name": "after", + "name": "end_time", "in": "query", "required": true + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv", + "description": "Template IDs", + "name": "template_ids", + "in": "query" } ], "responses": { @@ -1910,6 +1963,39 @@ } } }, + "/organizations/{organization}/members": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Members"], + "summary": "List organization members", + "operationId": "list-organization-members", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.OrganizationMemberWithName" + } + } + } + } + } + }, "/organizations/{organization}/members/roles": { "get": { "security": [ @@ -1976,6 +2062,78 @@ } } }, + "/organizations/{organization}/members/{user}": { + "post": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Members"], + "summary": "Add organization member", + "operationId": "add-organization-member", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "User ID, name, or me", + "name": "user", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.OrganizationMember" + } + } + } + }, + "delete": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Members"], + "summary": "Remove organization member", + "operationId": "remove-organization-member", + "parameters": [ + { + "type": "string", + "description": "Organization ID", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "User ID, name, or me", + "name": "user", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.OrganizationMember" + } + } + } + } + }, "/organizations/{organization}/members/{user}/roles": { "put": { "security": [ @@ -2567,6 +2725,30 @@ } } }, + "/templates": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Templates"], + "summary": "Get all templates", + "operationId": "get-all-templates", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.Template" + } + } + } + } + } + }, "/templates/{template}": { "get": { "security": [ @@ -4971,54 +5153,6 @@ } } }, - "/workspaceagents/me/app-health": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "produces": ["application/json"], - "tags": ["Agents"], - "summary": "Submit workspace agent application health", - "operationId": "submit-workspace-agent-application-health", - "parameters": [ - { - "description": "Application health request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostAppHealthsRequest" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/workspaceagents/me/coordinate": { - "get": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "description": "It accepts a WebSocket connection to an agent that listens to\nincoming connections and publishes node updates.", - "tags": ["Agents"], - "summary": "Coordinate workspace agent via Tailnet", - "operationId": "coordinate-workspace-agent-via-tailnet", - "responses": { - "101": { - "description": "Switching Protocols" - } - } - } - }, "/workspaceagents/me/external-auth": { "get": { "security": [ @@ -5126,6 +5260,39 @@ } } }, + "/workspaceagents/me/log-source": { + "post": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "consumes": ["application/json"], + "produces": ["application/json"], + "tags": ["Agents"], + "summary": "Post workspace agent log source", + "operationId": "post-workspace-agent-log-source", + "parameters": [ + { + "description": "Log source request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/agentsdk.PostLogSourceRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.WorkspaceAgentLogSource" + } + } + } + } + }, "/workspaceagents/me/logs": { "patch": { "security": [ @@ -5159,256 +5326,28 @@ } } }, - "/workspaceagents/me/manifest": { + "/workspaceagents/me/rpc": { "get": { "security": [ { "CoderSessionToken": [] } ], - "produces": ["application/json"], "tags": ["Agents"], - "summary": "Get authorized workspace agent manifest", - "operationId": "get-authorized-workspace-agent-manifest", + "summary": "Workspace agent RPC API", + "operationId": "workspace-agent-rpc-api", "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/agentsdk.Manifest" - } + "101": { + "description": "Switching Protocols" } + }, + "x-apidocgen": { + "skip": true } } }, - "/workspaceagents/me/metadata": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "tags": ["Agents"], - "summary": "Submit workspace agent metadata", - "operationId": "submit-workspace-agent-metadata", - "parameters": [ - { - "description": "Workspace agent metadata request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.PostMetadataRequest" - } - } - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/metadata/{key}": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "tags": ["Agents"], - "summary": "Removed: Submit workspace agent metadata", - "operationId": "removed-submit-workspace-agent-metadata", - "parameters": [ - { - "description": "Workspace agent metadata request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostMetadataRequestDeprecated" - } - }, - { - "type": "string", - "format": "string", - "description": "metadata key", - "name": "key", - "in": "path", - "required": true - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/report-lifecycle": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "tags": ["Agents"], - "summary": "Submit workspace agent lifecycle state", - "operationId": "submit-workspace-agent-lifecycle-state", - "parameters": [ - { - "description": "Workspace agent lifecycle request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostLifecycleRequest" - } - } - ], - "responses": { - "204": { - "description": "Success" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/report-stats": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "produces": ["application/json"], - "tags": ["Agents"], - "summary": "Submit workspace agent stats", - "operationId": "submit-workspace-agent-stats", - "deprecated": true, - "parameters": [ - { - "description": "Stats request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.Stats" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/agentsdk.StatsResponse" - } - } - } - } - }, - "/workspaceagents/me/rpc": { - "get": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "tags": ["Agents"], - "summary": "Workspace agent RPC API", - "operationId": "workspace-agent-rpc-api", - "responses": { - "101": { - "description": "Switching Protocols" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/startup": { - "post": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "produces": ["application/json"], - "tags": ["Agents"], - "summary": "Submit workspace agent startup", - "operationId": "submit-workspace-agent-startup", - "parameters": [ - { - "description": "Startup request", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PostStartupRequest" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - }, - "x-apidocgen": { - "skip": true - } - } - }, - "/workspaceagents/me/startup-logs": { - "patch": { - "security": [ - { - "CoderSessionToken": [] - } - ], - "consumes": ["application/json"], - "produces": ["application/json"], - "tags": ["Agents"], - "summary": "Removed: Patch workspace agent logs", - "operationId": "removed-patch-workspace-agent-logs", - "parameters": [ - { - "description": "logs", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/agentsdk.PatchLogs" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/codersdk.Response" - } - } - } - } - }, - "/workspaceagents/{workspaceagent}": { - "get": { + "/workspaceagents/{workspaceagent}": { + "get": { "security": [ { "CoderSessionToken": [] @@ -6818,6 +6757,7 @@ "CoderSessionToken": [] } ], + "consumes": ["application/json"], "tags": ["Workspaces"], "summary": "Post Workspace Usage by ID", "operationId": "post-workspace-usage-by-id", @@ -6829,6 +6769,14 @@ "name": "workspace", "in": "path", "required": true + }, + { + "description": "Post workspace usage request", + "name": "request", + "in": "body", + "schema": { + "$ref": "#/definitions/codersdk.PostWorkspaceUsageRequest" + } } ], "responses": { @@ -6862,387 +6810,132 @@ "responses": { "200": { "description": "OK", - "schema": { - "$ref": "#/definitions/codersdk.Response" - } - } - } - } - } - }, - "definitions": { - "agentsdk.AWSInstanceIdentityToken": { - "type": "object", - "required": ["document", "signature"], - "properties": { - "document": { - "type": "string" - }, - "signature": { - "type": "string" - } - } - }, - "agentsdk.AgentMetric": { - "type": "object", - "required": ["name", "type", "value"], - "properties": { - "labels": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.AgentMetricLabel" - } - }, - "name": { - "type": "string" - }, - "type": { - "enum": ["counter", "gauge"], - "allOf": [ - { - "$ref": "#/definitions/agentsdk.AgentMetricType" - } - ] - }, - "value": { - "type": "number" - } - } - }, - "agentsdk.AgentMetricLabel": { - "type": "object", - "required": ["name", "value"], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - } - }, - "agentsdk.AgentMetricType": { - "type": "string", - "enum": ["counter", "gauge"], - "x-enum-varnames": ["AgentMetricTypeCounter", "AgentMetricTypeGauge"] - }, - "agentsdk.AuthenticateResponse": { - "type": "object", - "properties": { - "session_token": { - "type": "string" - } - } - }, - "agentsdk.AzureInstanceIdentityToken": { - "type": "object", - "required": ["encoding", "signature"], - "properties": { - "encoding": { - "type": "string" - }, - "signature": { - "type": "string" - } - } - }, - "agentsdk.ExternalAuthResponse": { - "type": "object", - "properties": { - "access_token": { - "type": "string" - }, - "password": { - "type": "string" - }, - "token_extra": { - "type": "object", - "additionalProperties": true - }, - "type": { - "type": "string" - }, - "url": { - "type": "string" - }, - "username": { - "description": "Deprecated: Only supported on `/workspaceagents/me/gitauth`\nfor backwards compatibility.", - "type": "string" - } - } - }, - "agentsdk.GitSSHKey": { - "type": "object", - "properties": { - "private_key": { - "type": "string" - }, - "public_key": { - "type": "string" - } - } - }, - "agentsdk.GoogleInstanceIdentityToken": { - "type": "object", - "required": ["json_web_token"], - "properties": { - "json_web_token": { - "type": "string" - } - } - }, - "agentsdk.Log": { - "type": "object", - "properties": { - "created_at": { - "type": "string" - }, - "level": { - "$ref": "#/definitions/codersdk.LogLevel" - }, - "output": { - "type": "string" - } - } - }, - "agentsdk.Manifest": { - "type": "object", - "properties": { - "agent_id": { - "type": "string" - }, - "agent_name": { - "type": "string" - }, - "apps": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceApp" - } - }, - "derp_force_websockets": { - "type": "boolean" - }, - "derpmap": { - "$ref": "#/definitions/tailcfg.DERPMap" - }, - "directory": { - "type": "string" - }, - "disable_direct_connections": { - "type": "boolean" - }, - "environment_variables": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "git_auth_configs": { - "description": "GitAuthConfigs stores the number of Git configurations\nthe Coder deployment has. If this number is \u003e0, we\nset up special configuration in the workspace.", - "type": "integer" - }, - "metadata": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceAgentMetadataDescription" - } - }, - "motd_file": { - "type": "string" - }, - "owner_name": { - "description": "OwnerName and WorkspaceID are used by an open-source user to identify the workspace.\nWe do not provide insurance that this will not be removed in the future,\nbut if it's easy to persist lets keep it around.", - "type": "string" - }, - "scripts": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.WorkspaceAgentScript" - } - }, - "vscode_port_proxy_uri": { - "type": "string" - }, - "workspace_id": { - "type": "string" - }, - "workspace_name": { - "type": "string" + "schema": { + "$ref": "#/definitions/codersdk.Response" + } + } } } - }, - "agentsdk.Metadata": { + } + }, + "definitions": { + "agentsdk.AWSInstanceIdentityToken": { "type": "object", + "required": ["document", "signature"], "properties": { - "age": { - "description": "Age is the number of seconds since the metadata was collected.\nIt is provided in addition to CollectedAt to protect against clock skew.", - "type": "integer" - }, - "collected_at": { - "type": "string", - "format": "date-time" - }, - "error": { - "type": "string" - }, - "key": { + "document": { "type": "string" }, - "value": { + "signature": { "type": "string" } } }, - "agentsdk.PatchLogs": { + "agentsdk.AuthenticateResponse": { "type": "object", "properties": { - "log_source_id": { + "session_token": { "type": "string" - }, - "logs": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.Log" - } } } }, - "agentsdk.PostAppHealthsRequest": { + "agentsdk.AzureInstanceIdentityToken": { "type": "object", + "required": ["encoding", "signature"], "properties": { - "healths": { - "description": "Healths is a map of the workspace app name and the health of the app.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/codersdk.WorkspaceAppHealth" - } + "encoding": { + "type": "string" + }, + "signature": { + "type": "string" } } }, - "agentsdk.PostLifecycleRequest": { + "agentsdk.ExternalAuthResponse": { "type": "object", "properties": { - "changed_at": { + "access_token": { "type": "string" }, - "state": { - "$ref": "#/definitions/codersdk.WorkspaceAgentLifecycle" + "password": { + "type": "string" + }, + "token_extra": { + "type": "object", + "additionalProperties": true + }, + "type": { + "type": "string" + }, + "url": { + "type": "string" + }, + "username": { + "description": "Deprecated: Only supported on `/workspaceagents/me/gitauth`\nfor backwards compatibility.", + "type": "string" } } }, - "agentsdk.PostMetadataRequest": { + "agentsdk.GitSSHKey": { "type": "object", "properties": { - "metadata": { - "type": "array", - "items": { - "$ref": "#/definitions/agentsdk.Metadata" - } + "private_key": { + "type": "string" + }, + "public_key": { + "type": "string" } } }, - "agentsdk.PostMetadataRequestDeprecated": { + "agentsdk.GoogleInstanceIdentityToken": { "type": "object", + "required": ["json_web_token"], "properties": { - "age": { - "description": "Age is the number of seconds since the metadata was collected.\nIt is provided in addition to CollectedAt to protect against clock skew.", - "type": "integer" - }, - "collected_at": { - "type": "string", - "format": "date-time" - }, - "error": { - "type": "string" - }, - "value": { + "json_web_token": { "type": "string" } } }, - "agentsdk.PostStartupRequest": { + "agentsdk.Log": { "type": "object", "properties": { - "expanded_directory": { + "created_at": { "type": "string" }, - "subsystems": { - "type": "array", - "items": { - "$ref": "#/definitions/codersdk.AgentSubsystem" - } + "level": { + "$ref": "#/definitions/codersdk.LogLevel" }, - "version": { + "output": { "type": "string" } } }, - "agentsdk.Stats": { + "agentsdk.PatchLogs": { "type": "object", "properties": { - "connection_count": { - "description": "ConnectionCount is the number of connections received by an agent.", - "type": "integer" - }, - "connection_median_latency_ms": { - "description": "ConnectionMedianLatencyMS is the median latency of all connections in milliseconds.", - "type": "number" - }, - "connections_by_proto": { - "description": "ConnectionsByProto is a count of connections by protocol.", - "type": "object", - "additionalProperties": { - "type": "integer" - } + "log_source_id": { + "type": "string" }, - "metrics": { - "description": "Metrics collected by the agent", + "logs": { "type": "array", "items": { - "$ref": "#/definitions/agentsdk.AgentMetric" + "$ref": "#/definitions/agentsdk.Log" } - }, - "rx_bytes": { - "description": "RxBytes is the number of received bytes.", - "type": "integer" - }, - "rx_packets": { - "description": "RxPackets is the number of received packets.", - "type": "integer" - }, - "session_count_jetbrains": { - "description": "SessionCountJetBrains is the number of connections received by an agent\nthat are from our JetBrains extension.", - "type": "integer" - }, - "session_count_reconnecting_pty": { - "description": "SessionCountReconnectingPTY is the number of connections received by an agent\nthat are from the reconnecting web terminal.", - "type": "integer" - }, - "session_count_ssh": { - "description": "SessionCountSSH is the number of connections received by an agent\nthat are normal, non-tagged SSH sessions.", - "type": "integer" - }, - "session_count_vscode": { - "description": "SessionCountVSCode is the number of connections received by an agent\nthat are from our VS Code extension.", - "type": "integer" - }, - "tx_bytes": { - "description": "TxBytes is the number of transmitted bytes.", - "type": "integer" - }, - "tx_packets": { - "description": "TxPackets is the number of transmitted bytes.", - "type": "integer" } } }, - "agentsdk.StatsResponse": { + "agentsdk.PostLogSourceRequest": { "type": "object", "properties": { - "report_interval": { - "description": "ReportInterval is the duration after which the agent should send stats\nagain.", - "type": "integer" + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, + "id": { + "description": "ID is a unique identifier for the log source.\nIt is scoped to a workspace agent, and can be statically\ndefined inside code to prevent duplicate sources from being\ncreated for the same agent.", + "type": "string" } } }, @@ -7761,6 +7454,10 @@ "description": "ExternalURL references the current Coder version.\nFor production builds, this will link directly to a release. For development builds, this will link to a commit.", "type": "string" }, + "telemetry": { + "description": "Telemetry is a boolean that indicates whether telemetry is enabled.", + "type": "boolean" + }, "upgrade_message": { "description": "UpgradeMessage is the message displayed to users when an outdated client\nis detected.", "type": "string" @@ -7820,6 +7517,9 @@ "email": { "type": "string" }, + "name": { + "type": "string" + }, "password": { "type": "string" }, @@ -7875,6 +7575,7 @@ }, "codersdk.CreateGroupRequest": { "type": "object", + "required": ["name"], "properties": { "avatar_url": { "type": "string" @@ -7894,6 +7595,16 @@ "type": "object", "required": ["name"], "properties": { + "description": { + "type": "string" + }, + "display_name": { + "description": "DisplayName will default to the same value as `Name` if not provided.", + "type": "string" + }, + "icon": { + "type": "string" + }, "name": { "type": "string" } @@ -8075,6 +7786,10 @@ } ] }, + "organization_id": { + "type": "string", + "format": "uuid" + }, "resource_id": { "type": "string", "format": "uuid" @@ -8140,6 +7855,9 @@ } ] }, + "name": { + "type": "string" + }, "organization_id": { "type": "string", "format": "uuid" @@ -8674,19 +8392,22 @@ "example", "auto-fill-parameters", "multi-organization", - "custom-roles" + "custom-roles", + "workspace-usage" ], "x-enum-comments": { "ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.", "ExperimentCustomRoles": "Allows creating runtime custom roles", "ExperimentExample": "This isn't used for anything.", - "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed." + "ExperimentMultiOrganization": "Requires organization context for interactions, default org is assumed.", + "ExperimentWorkspaceUsage": "Enables the new workspace usage tracking" }, "x-enum-varnames": [ "ExperimentExample", "ExperimentAutoFillParameters", "ExperimentMultiOrganization", - "ExperimentCustomRoles" + "ExperimentCustomRoles", + "ExperimentWorkspaceUsage" ] }, "codersdk.ExternalAuth": { @@ -8769,12 +8490,6 @@ "description": "DisplayName is shown in the UI to identify the auth config.", "type": "string" }, - "extra_token_keys": { - "type": "array", - "items": { - "type": "string" - } - }, "id": { "description": "ID is a unique identifier for the auth config.\nIt defaults to `type` when not provided.", "type": "string" @@ -9381,6 +9096,9 @@ "issuer_url": { "type": "string" }, + "name_field": { + "type": "string" + }, "scopes": { "type": "array", "items": { @@ -9412,12 +9130,21 @@ }, "codersdk.Organization": { "type": "object", - "required": ["created_at", "id", "is_default", "name", "updated_at"], + "required": ["created_at", "id", "is_default", "updated_at"], "properties": { "created_at": { "type": "string", "format": "date-time" }, + "description": { + "type": "string" + }, + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, "id": { "type": "string", "format": "uuid" @@ -9461,6 +9188,36 @@ } } }, + "codersdk.OrganizationMemberWithName": { + "type": "object", + "properties": { + "created_at": { + "type": "string", + "format": "date-time" + }, + "organization_id": { + "type": "string", + "format": "uuid" + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.SlimRole" + } + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "user_id": { + "type": "string", + "format": "uuid" + }, + "username": { + "type": "string" + } + } + }, "codersdk.PatchGroupRequest": { "type": "object", "properties": { @@ -9553,6 +9310,18 @@ } } }, + "codersdk.PostWorkspaceUsageRequest": { + "type": "object", + "properties": { + "agent_id": { + "type": "string", + "format": "uuid" + }, + "app_name": { + "$ref": "#/definitions/codersdk.UsageAppName" + } + } + }, "codersdk.PprofConfig": { "type": "object", "properties": { @@ -10095,7 +9864,8 @@ "workspace_proxy", "organization", "oauth2_provider_app", - "oauth2_provider_app_secret" + "oauth2_provider_app_secret", + "custom_role" ], "x-enum-varnames": [ "ResourceTypeTemplate", @@ -10112,7 +9882,8 @@ "ResourceTypeWorkspaceProxy", "ResourceTypeOrganization", "ResourceTypeOAuth2ProviderApp", - "ResourceTypeOAuth2ProviderAppSecret" + "ResourceTypeOAuth2ProviderAppSecret", + "ResourceTypeCustomRole" ] }, "codersdk.Response": { @@ -10240,6 +10011,9 @@ }, "name": { "type": "string" + }, + "organization_id": { + "type": "string" } } }, @@ -11038,8 +10812,16 @@ }, "codersdk.UpdateOrganizationRequest": { "type": "object", - "required": ["name"], "properties": { + "description": { + "type": "string" + }, + "display_name": { + "type": "string" + }, + "icon": { + "type": "string" + }, "name": { "type": "string" } @@ -11203,6 +10985,16 @@ } } }, + "codersdk.UsageAppName": { + "type": "string", + "enum": ["vscode", "jetbrains", "reconnecting-pty", "ssh"], + "x-enum-varnames": [ + "UsageAppNameVscode", + "UsageAppNameJetbrains", + "UsageAppNameReconnectingPty", + "UsageAppNameSSH" + ] + }, "codersdk.User": { "type": "object", "required": ["created_at", "email", "id", "username"], @@ -11842,26 +11634,6 @@ } } }, - "codersdk.WorkspaceAgentMetadataDescription": { - "type": "object", - "properties": { - "display_name": { - "type": "string" - }, - "interval": { - "type": "integer" - }, - "key": { - "type": "string" - }, - "script": { - "type": "string" - }, - "timeout": { - "type": "integer" - } - } - }, "codersdk.WorkspaceAgentPortShare": { "type": "object", "properties": { @@ -12792,13 +12564,6 @@ "derp": { "$ref": "#/definitions/healthsdk.DERPHealthReport" }, - "failing_sections": { - "description": "FailingSections is a list of sections that have failed their healthcheck.", - "type": "array", - "items": { - "$ref": "#/definitions/healthsdk.HealthSection" - } - }, "healthy": { "description": "Healthy is true if the report returns no errors.\nDeprecated: use `Severity` instead", "type": "boolean" diff --git a/coderd/apikey/apikey_test.go b/coderd/apikey/apikey_test.go index 734a187219bf5..41f64fe0d866f 100644 --- a/coderd/apikey/apikey_test.go +++ b/coderd/apikey/apikey_test.go @@ -128,7 +128,7 @@ func TestGenerate(t *testing.T) { // Assert that the hashed secret is correct. hashed := sha256.Sum256([]byte(keytokens[1])) - assert.ElementsMatch(t, hashed, key.HashedSecret[:]) + assert.ElementsMatch(t, hashed, key.HashedSecret) assert.Equal(t, tc.params.UserID, key.UserID) assert.WithinDuration(t, dbtime.Now(), key.CreatedAt, time.Second*5) diff --git a/coderd/audit.go b/coderd/audit.go index 315913dff49c2..ae0d63f543438 100644 --- a/coderd/audit.go +++ b/coderd/audit.go @@ -18,9 +18,9 @@ import ( "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" + "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/searchquery" "github.com/coder/coder/v2/codersdk" ) @@ -45,7 +45,7 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) { } queryStr := r.URL.Query().Get("q") - filter, errs := searchquery.AuditLogs(queryStr) + filter, errs := searchquery.AuditLogs(ctx, api.Database, queryStr) if len(errs) > 0 { httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ Message: "Invalid audit search query.", @@ -53,8 +53,8 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) { }) return } - filter.Offset = int32(page.Offset) - filter.Limit = int32(page.Limit) + filter.OffsetOpt = int32(page.Offset) + filter.LimitOpt = int32(page.Limit) if filter.Username == "me" { filter.UserID = apiKey.UserID @@ -62,6 +62,10 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) { } dblogs, err := api.Database.GetAuditLogsOffset(ctx, filter) + if dbauthz.IsNotAuthorizedError(err) { + httpapi.Forbidden(rw) + return + } if err != nil { httpapi.InternalServerError(rw, err) return @@ -140,6 +144,9 @@ func (api *API) generateFakeAuditLog(rw http.ResponseWriter, r *http.Request) { if len(params.AdditionalFields) == 0 { params.AdditionalFields = json.RawMessage("{}") } + if params.OrganizationID == uuid.Nil { + params.OrganizationID = uuid.New() + } _, err = api.Database.InsertAuditLog(ctx, database.InsertAuditLogParams{ ID: uuid.New(), @@ -156,7 +163,7 @@ func (api *API) generateFakeAuditLog(rw http.ResponseWriter, r *http.Request) { AdditionalFields: params.AdditionalFields, RequestID: uuid.Nil, // no request ID to attach this to ResourceIcon: "", - OrganizationID: uuid.New(), + OrganizationID: params.OrganizationID, }) if err != nil { httpapi.InternalServerError(rw, err) @@ -183,26 +190,26 @@ func (api *API) convertAuditLog(ctx context.Context, dblog database.GetAuditLogs _ = json.Unmarshal(dblog.Diff, &diff) var user *codersdk.User - if dblog.UserUsername.Valid { - user = &codersdk.User{ - ReducedUser: codersdk.ReducedUser{ - MinimalUser: codersdk.MinimalUser{ - ID: dblog.UserID, - Username: dblog.UserUsername.String, - AvatarURL: dblog.UserAvatarUrl.String, - }, - Email: dblog.UserEmail.String, - CreatedAt: dblog.UserCreatedAt.Time, - Status: codersdk.UserStatus(dblog.UserStatus.UserStatus), - }, - Roles: []codersdk.SlimRole{}, - } - - for _, roleName := range dblog.UserRoles { - rbacRole, _ := rbac.RoleByName(roleName) - user.Roles = append(user.Roles, db2sdk.SlimRole(rbacRole)) - } + // Leaving the organization IDs blank for now; not sure they are useful for + // the audit query anyway? + sdkUser := db2sdk.User(database.User{ + ID: dblog.UserID, + Email: dblog.UserEmail.String, + Username: dblog.UserUsername.String, + CreatedAt: dblog.UserCreatedAt.Time, + UpdatedAt: dblog.UserUpdatedAt.Time, + Status: dblog.UserStatus.UserStatus, + RBACRoles: dblog.UserRoles, + LoginType: dblog.UserLoginType.LoginType, + AvatarURL: dblog.UserAvatarUrl.String, + Deleted: dblog.UserDeleted.Bool, + LastSeenAt: dblog.UserLastSeenAt.Time, + QuietHoursSchedule: dblog.UserQuietHoursSchedule.String, + ThemePreference: dblog.UserThemePreference.String, + Name: dblog.UserName.String, + }, []uuid.UUID{}) + user = &sdkUser } var ( diff --git a/coderd/audit/diff.go b/coderd/audit/diff.go index a6835014d4fe2..09ae80c9ddf90 100644 --- a/coderd/audit/diff.go +++ b/coderd/audit/diff.go @@ -21,7 +21,10 @@ type Auditable interface { database.AuditOAuthConvertState | database.HealthSettings | database.OAuth2ProviderApp | - database.OAuth2ProviderAppSecret + database.OAuth2ProviderAppSecret | + database.CustomRole | + database.AuditableOrganizationMember | + database.Organization } // Map is a map of changed fields in an audited resource. It maps field names to diff --git a/coderd/audit/request.go b/coderd/audit/request.go index e6d9d01fbfd27..1c027fc85527f 100644 --- a/coderd/audit/request.go +++ b/coderd/audit/request.go @@ -31,7 +31,7 @@ type RequestParams struct { OrganizationID uuid.UUID Request *http.Request Action database.AuditAction - AdditionalFields json.RawMessage + AdditionalFields interface{} } type Request[T Auditable] struct { @@ -103,6 +103,12 @@ func ResourceTarget[T Auditable](tgt T) string { return typed.Name case database.OAuth2ProviderAppSecret: return typed.DisplaySecret + case database.CustomRole: + return typed.Name + case database.AuditableOrganizationMember: + return typed.Username + case database.Organization: + return typed.Name default: panic(fmt.Sprintf("unknown resource %T for ResourceTarget", tgt)) } @@ -140,6 +146,12 @@ func ResourceID[T Auditable](tgt T) uuid.UUID { return typed.ID case database.OAuth2ProviderAppSecret: return typed.ID + case database.CustomRole: + return typed.ID + case database.AuditableOrganizationMember: + return typed.UserID + case database.Organization: + return typed.ID default: panic(fmt.Sprintf("unknown resource %T for ResourceID", tgt)) } @@ -175,6 +187,12 @@ func ResourceType[T Auditable](tgt T) database.ResourceType { return database.ResourceTypeOauth2ProviderApp case database.OAuth2ProviderAppSecret: return database.ResourceTypeOauth2ProviderAppSecret + case database.CustomRole: + return database.ResourceTypeCustomRole + case database.AuditableOrganizationMember: + return database.ResourceTypeOrganizationMember + case database.Organization: + return database.ResourceTypeOrganization default: panic(fmt.Sprintf("unknown resource %T for ResourceType", typed)) } @@ -211,6 +229,12 @@ func ResourceRequiresOrgID[T Auditable]() bool { return false case database.OAuth2ProviderAppSecret: return false + case database.CustomRole: + return true + case database.AuditableOrganizationMember: + return true + case database.Organization: + return true default: panic(fmt.Sprintf("unknown resource %T for ResourceRequiresOrgID", tgt)) } @@ -275,8 +299,15 @@ func InitRequest[T Auditable](w http.ResponseWriter, p *RequestParams) (*Request } } - if p.AdditionalFields == nil { - p.AdditionalFields = json.RawMessage("{}") + additionalFieldsRaw := json.RawMessage("{}") + + if p.AdditionalFields != nil { + data, err := json.Marshal(p.AdditionalFields) + if err != nil { + p.Log.Warn(logCtx, "marshal additional fields", slog.Error(err)) + } else { + additionalFieldsRaw = json.RawMessage(data) + } } var userID uuid.UUID @@ -311,7 +342,7 @@ func InitRequest[T Auditable](w http.ResponseWriter, p *RequestParams) (*Request Diff: diffRaw, StatusCode: int32(sw.Status), RequestID: httpmw.RequestID(p.Request), - AdditionalFields: p.AdditionalFields, + AdditionalFields: additionalFieldsRaw, OrganizationID: requireOrgID[T](logCtx, p.OrganizationID, p.Log), } err := p.Audit.Export(ctx, auditLog) diff --git a/coderd/audit_test.go b/coderd/audit_test.go index b8b62cf27ecf0..9a810a2fce9a0 100644 --- a/coderd/audit_test.go +++ b/coderd/audit_test.go @@ -4,15 +4,19 @@ import ( "context" "encoding/json" "fmt" + "net/http" "strconv" "testing" "time" + "github.com/google/uuid" "github.com/stretchr/testify/require" + "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" ) @@ -42,6 +46,55 @@ func TestAuditLogs(t *testing.T) { require.Len(t, alogs.AuditLogs, 1) }) + t.Run("User", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + client := coderdtest.New(t, nil) + user := coderdtest.CreateFirstUser(t, client) + client2, user2 := coderdtest.CreateAnotherUser(t, client, user.OrganizationID, rbac.RoleOwner()) + + err := client2.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{ + ResourceID: user2.ID, + }) + require.NoError(t, err) + + alogs, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + Pagination: codersdk.Pagination{ + Limit: 1, + }, + }) + require.NoError(t, err) + require.Equal(t, int64(1), alogs.Count) + require.Len(t, alogs.AuditLogs, 1) + + // Make sure the returned user is fully populated. + foundUser, err := client.User(ctx, user2.ID.String()) + foundUser.OrganizationIDs = []uuid.UUID{} // Not included. + require.NoError(t, err) + require.Equal(t, foundUser, *alogs.AuditLogs[0].User) + + // Delete the user and try again. This is a soft delete so nothing should + // change. If users are hard deleted we should get nil, but there is no way + // to test this at the moment. + err = client.DeleteUser(ctx, user2.ID) + require.NoError(t, err) + + alogs, err = client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + Pagination: codersdk.Pagination{ + Limit: 1, + }, + }) + require.NoError(t, err) + require.Equal(t, int64(1), alogs.Count) + require.Len(t, alogs.AuditLogs, 1) + + foundUser, err = client.User(ctx, user2.ID.String()) + foundUser.OrganizationIDs = []uuid.UUID{} // Not included. + require.NoError(t, err) + require.Equal(t, foundUser, *alogs.AuditLogs[0].User) + }) + t.Run("WorkspaceBuildAuditLink", func(t *testing.T) { t.Parallel() @@ -84,6 +137,89 @@ func TestAuditLogs(t *testing.T) { require.Equal(t, auditLogs.AuditLogs[0].ResourceLink, fmt.Sprintf("/@%s/%s/builds/%s", workspace.OwnerName, workspace.Name, buildNumberString)) }) + + t.Run("Organization", func(t *testing.T) { + t.Parallel() + + logger := slogtest.Make(t, &slogtest.Options{ + IgnoreErrors: true, + }) + ctx := context.Background() + client := coderdtest.New(t, &coderdtest.Options{ + Logger: &logger, + }) + owner := coderdtest.CreateFirstUser(t, client) + orgAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID)) + + err := client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{ + ResourceID: owner.UserID, + OrganizationID: owner.OrganizationID, + }) + require.NoError(t, err) + + // Add an extra audit log in another organization + err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{ + ResourceID: owner.UserID, + OrganizationID: uuid.New(), + }) + require.NoError(t, err) + + // Fetching audit logs without an organization selector should fail + _, err = orgAdmin.AuditLogs(ctx, codersdk.AuditLogsRequest{ + Pagination: codersdk.Pagination{ + Limit: 5, + }, + }) + var sdkError *codersdk.Error + require.Error(t, err) + require.ErrorAsf(t, err, &sdkError, "error should be of type *codersdk.Error") + require.Equal(t, http.StatusForbidden, sdkError.StatusCode()) + + // Using the organization selector allows the org admin to fetch audit logs + alogs, err := orgAdmin.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: fmt.Sprintf("organization:%s", owner.OrganizationID.String()), + Pagination: codersdk.Pagination{ + Limit: 5, + }, + }) + require.NoError(t, err) + require.Len(t, alogs.AuditLogs, 1) + + // Also try fetching by organization name + organization, err := orgAdmin.Organization(ctx, owner.OrganizationID) + require.NoError(t, err) + + alogs, err = orgAdmin.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: fmt.Sprintf("organization:%s", organization.Name), + Pagination: codersdk.Pagination{ + Limit: 5, + }, + }) + require.NoError(t, err) + require.Len(t, alogs.AuditLogs, 1) + }) + + t.Run("Organization404", func(t *testing.T) { + t.Parallel() + + logger := slogtest.Make(t, &slogtest.Options{ + IgnoreErrors: true, + }) + ctx := context.Background() + client := coderdtest.New(t, &coderdtest.Options{ + Logger: &logger, + }) + owner := coderdtest.CreateFirstUser(t, client) + orgAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID)) + + _, err := orgAdmin.AuditLogs(ctx, codersdk.AuditLogsRequest{ + SearchQuery: fmt.Sprintf("organization:%s", "random-name"), + Pagination: codersdk.Pagination{ + Limit: 5, + }, + }) + require.Error(t, err) + }) } func TestAuditLogsFilter(t *testing.T) { @@ -242,9 +378,6 @@ func TestAuditLogsFilter(t *testing.T) { t.Parallel() auditLogs, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{ SearchQuery: testCase.SearchQuery, - Pagination: codersdk.Pagination{ - Limit: 25, - }, }) if testCase.ExpectedError { require.Error(t, err, "expected error") diff --git a/coderd/authorize_test.go b/coderd/authorize_test.go index 3fcb2f6c8e64f..f720f90c09206 100644 --- a/coderd/authorize_test.go +++ b/coderd/authorize_test.go @@ -27,7 +27,7 @@ func TestCheckPermissions(t *testing.T) { memberClient, _ := coderdtest.CreateAnotherUser(t, adminClient, adminUser.OrganizationID) memberUser, err := memberClient.User(ctx, codersdk.Me) require.NoError(t, err) - orgAdminClient, _ := coderdtest.CreateAnotherUser(t, adminClient, adminUser.OrganizationID, rbac.RoleOrgAdmin(adminUser.OrganizationID)) + orgAdminClient, _ := coderdtest.CreateAnotherUser(t, adminClient, adminUser.OrganizationID, rbac.ScopedRoleOrgAdmin(adminUser.OrganizationID)) orgAdminUser, err := orgAdminClient.User(ctx, codersdk.Me) require.NoError(t, err) diff --git a/coderd/autobuild/notify/notifier.go b/coderd/autobuild/notify/notifier.go index e0db12af35475..d8226161507ef 100644 --- a/coderd/autobuild/notify/notifier.go +++ b/coderd/autobuild/notify/notifier.go @@ -5,9 +5,16 @@ import ( "sort" "sync" "time" + + "github.com/coder/coder/v2/clock" ) -// Notifier calls a Condition at most once for each count in countdown. +// Notifier triggers callbacks at given intervals until some event happens. The +// intervals (e.g. 10 minute warning, 5 minute warning) are given in the +// countdown. The Notifier periodically polls the condition to get the time of +// the event (the Condition's deadline) and the callback. The callback is +// called at most once per entry in the countdown, the first time the time to +// the deadline is shorter than the duration. type Notifier struct { ctx context.Context cancel context.CancelFunc @@ -17,12 +24,15 @@ type Notifier struct { condition Condition notifiedAt map[time.Duration]bool countdown []time.Duration + + // for testing + clock clock.Clock } -// Condition is a function that gets executed with a certain time. +// Condition is a function that gets executed periodically, and receives the +// current time as an argument. // - It should return the deadline for the notification, as well as a -// callback function to execute once the time to the deadline is -// less than one of the notify attempts. If deadline is the zero +// callback function to execute. If deadline is the zero // time, callback will not be executed. // - Callback is executed once for every time the difference between deadline // and the current time is less than an element of countdown. @@ -30,23 +40,19 @@ type Notifier struct { // the returned deadline to the minimum interval. type Condition func(now time.Time) (deadline time.Time, callback func()) -// Notify is a convenience function that initializes a new Notifier -// with the given condition, interval, and countdown. -// It is the responsibility of the caller to call close to stop polling. -func Notify(cond Condition, interval time.Duration, countdown ...time.Duration) (closeFunc func()) { - notifier := New(cond, countdown...) - ticker := time.NewTicker(interval) - go notifier.Poll(ticker.C) - return func() { - ticker.Stop() - _ = notifier.Close() +type Option func(*Notifier) + +// WithTestClock is used in tests to inject a mock Clock +func WithTestClock(clk clock.Clock) Option { + return func(n *Notifier) { + n.clock = clk } } // New returns a Notifier that calls cond once every time it polls. // - Duplicate values are removed from countdown, and it is sorted in // descending order. -func New(cond Condition, countdown ...time.Duration) *Notifier { +func New(cond Condition, interval time.Duration, countdown []time.Duration, opts ...Option) *Notifier { // Ensure countdown is sorted in descending order and contains no duplicates. ct := unique(countdown) sort.Slice(ct, func(i, j int) bool { @@ -61,38 +67,36 @@ func New(cond Condition, countdown ...time.Duration) *Notifier { countdown: ct, condition: cond, notifiedAt: make(map[time.Duration]bool), + clock: clock.NewReal(), } + for _, opt := range opts { + opt(n) + } + go n.poll(interval) return n } -// Poll polls once immediately, and then once for every value from ticker. +// poll polls once immediately, and then periodically according to the interval. // Poll exits when ticker is closed. -func (n *Notifier) Poll(ticker <-chan time.Time) { +func (n *Notifier) poll(interval time.Duration) { defer close(n.pollDone) // poll once immediately - n.pollOnce(time.Now()) - for { - select { - case <-n.ctx.Done(): - return - case t, ok := <-ticker: - if !ok { - return - } - n.pollOnce(t) - } - } + _ = n.pollOnce() + tkr := n.clock.TickerFunc(n.ctx, interval, n.pollOnce, "notifier", "poll") + _ = tkr.Wait() } -func (n *Notifier) Close() error { +func (n *Notifier) Close() { n.cancel() <-n.pollDone - return nil } -func (n *Notifier) pollOnce(tick time.Time) { +// pollOnce only returns an error so it matches the signature expected of TickerFunc +// nolint: revive // bare returns are fine here +func (n *Notifier) pollOnce() (_ error) { + tick := n.clock.Now() n.lock.Lock() defer n.lock.Unlock() @@ -113,6 +117,7 @@ func (n *Notifier) pollOnce(tick time.Time) { n.notifiedAt[tock] = true return } + return } func unique(ds []time.Duration) []time.Duration { diff --git a/coderd/autobuild/notify/notifier_test.go b/coderd/autobuild/notify/notifier_test.go index 09e8158abaa99..d53b06c1a2133 100644 --- a/coderd/autobuild/notify/notifier_test.go +++ b/coderd/autobuild/notify/notifier_test.go @@ -1,34 +1,36 @@ package notify_test import ( - "sync" "testing" "time" "github.com/stretchr/testify/require" - "go.uber.org/atomic" "go.uber.org/goleak" + "github.com/coder/coder/v2/clock" "github.com/coder/coder/v2/coderd/autobuild/notify" + "github.com/coder/coder/v2/testutil" ) func TestNotifier(t *testing.T) { t.Parallel() - now := time.Now() + now := time.Date(2022, 5, 13, 0, 0, 0, 0, time.UTC) testCases := []struct { Name string Countdown []time.Duration - Ticks []time.Time + PollInterval time.Duration + NTicks int ConditionDeadline time.Time - NumConditions int64 - NumCallbacks int64 + NumConditions int + NumCallbacks int }{ { Name: "zero deadline", Countdown: durations(), - Ticks: fakeTicker(now, time.Second, 0), + PollInterval: time.Second, + NTicks: 0, ConditionDeadline: time.Time{}, NumConditions: 1, NumCallbacks: 0, @@ -36,7 +38,8 @@ func TestNotifier(t *testing.T) { { Name: "no calls", Countdown: durations(), - Ticks: fakeTicker(now, time.Second, 0), + PollInterval: time.Second, + NTicks: 0, ConditionDeadline: now, NumConditions: 1, NumCallbacks: 0, @@ -44,7 +47,8 @@ func TestNotifier(t *testing.T) { { Name: "exactly one call", Countdown: durations(time.Second), - Ticks: fakeTicker(now, time.Second, 1), + PollInterval: time.Second, + NTicks: 1, ConditionDeadline: now.Add(time.Second), NumConditions: 2, NumCallbacks: 1, @@ -52,7 +56,8 @@ func TestNotifier(t *testing.T) { { Name: "two calls", Countdown: durations(4*time.Second, 2*time.Second), - Ticks: fakeTicker(now, time.Second, 5), + PollInterval: time.Second, + NTicks: 5, ConditionDeadline: now.Add(5 * time.Second), NumConditions: 6, NumCallbacks: 2, @@ -60,7 +65,8 @@ func TestNotifier(t *testing.T) { { Name: "wrong order should not matter", Countdown: durations(2*time.Second, 4*time.Second), - Ticks: fakeTicker(now, time.Second, 5), + PollInterval: time.Second, + NTicks: 5, ConditionDeadline: now.Add(5 * time.Second), NumConditions: 6, NumCallbacks: 2, @@ -68,7 +74,8 @@ func TestNotifier(t *testing.T) { { Name: "ssh autostop notify", Countdown: durations(5*time.Minute, time.Minute), - Ticks: fakeTicker(now, 30*time.Second, 120), + PollInterval: 30 * time.Second, + NTicks: 120, ConditionDeadline: now.Add(30 * time.Minute), NumConditions: 121, NumCallbacks: 2, @@ -79,30 +86,33 @@ func TestNotifier(t *testing.T) { testCase := testCase t.Run(testCase.Name, func(t *testing.T) { t.Parallel() - ch := make(chan time.Time) - numConditions := atomic.NewInt64(0) - numCalls := atomic.NewInt64(0) + ctx := testutil.Context(t, testutil.WaitShort) + mClock := clock.NewMock(t) + mClock.Set(now).MustWait(ctx) + numConditions := 0 + numCalls := 0 cond := func(time.Time) (time.Time, func()) { - numConditions.Inc() + numConditions++ return testCase.ConditionDeadline, func() { - numCalls.Inc() + numCalls++ } } - var wg sync.WaitGroup - go func() { - defer wg.Done() - n := notify.New(cond, testCase.Countdown...) - defer n.Close() - n.Poll(ch) - }() - wg.Add(1) - for _, tick := range testCase.Ticks { - ch <- tick + + trap := mClock.Trap().TickerFunc("notifier", "poll") + defer trap.Close() + + n := notify.New(cond, testCase.PollInterval, testCase.Countdown, notify.WithTestClock(mClock)) + defer n.Close() + + trap.MustWait(ctx).Release() // ensure ticker started + for i := 0; i < testCase.NTicks; i++ { + interval, w := mClock.AdvanceNext() + w.MustWait(ctx) + require.Equal(t, testCase.PollInterval, interval) } - close(ch) - wg.Wait() - require.Equal(t, testCase.NumCallbacks, numCalls.Load()) - require.Equal(t, testCase.NumConditions, numConditions.Load()) + + require.Equal(t, testCase.NumCallbacks, numCalls) + require.Equal(t, testCase.NumConditions, numConditions) }) } } @@ -111,14 +121,6 @@ func durations(ds ...time.Duration) []time.Duration { return ds } -func fakeTicker(t time.Time, d time.Duration, n int) []time.Time { - var ts []time.Time - for i := 1; i <= n; i++ { - ts = append(ts, t.Add(time.Duration(n)*d)) - } - return ts -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } diff --git a/coderd/coderd.go b/coderd/coderd.go index 25763530db702..288eca9a4dbaf 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -43,7 +43,6 @@ import ( "github.com/coder/coder/v2/coderd/appearance" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/awsidentity" - "github.com/coder/coder/v2/coderd/batchstats" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbrollup" @@ -69,7 +68,6 @@ import ( "github.com/coder/coder/v2/coderd/util/slice" "github.com/coder/coder/v2/coderd/workspaceapps" "github.com/coder/coder/v2/coderd/workspacestats" - "github.com/coder/coder/v2/coderd/workspaceusage" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/drpc" "github.com/coder/coder/v2/codersdk/healthsdk" @@ -189,7 +187,7 @@ type Options struct { HTTPClient *http.Client UpdateAgentMetrics func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) - StatsBatcher *batchstats.Batcher + StatsBatcher workspacestats.Batcher WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions @@ -206,7 +204,7 @@ type Options struct { // stats. This is used to provide insights in the WebUI. DatabaseRolluper *dbrollup.Rolluper // WorkspaceUsageTracker tracks workspace usage by the CLI. - WorkspaceUsageTracker *workspaceusage.Tracker + WorkspaceUsageTracker *workspacestats.UsageTracker } // @title Coder API @@ -384,8 +382,8 @@ func New(options *Options) *API { } if options.WorkspaceUsageTracker == nil { - options.WorkspaceUsageTracker = workspaceusage.New(options.Database, - workspaceusage.WithLogger(options.Logger.Named("workspace_usage_tracker")), + options.WorkspaceUsageTracker = workspacestats.NewTracker(options.Database, + workspacestats.TrackerWithLogger(options.Logger.Named("workspace_usage_tracker")), ) } @@ -434,8 +432,7 @@ func New(options *Options) *API { options.Database, options.Pubsub, ), - dbRolluper: options.DatabaseRolluper, - workspaceUsageTracker: options.WorkspaceUsageTracker, + dbRolluper: options.DatabaseRolluper, } var customRoleHandler CustomRoleHandler = &agplCustomRoleHandler{} @@ -450,6 +447,7 @@ func New(options *Options) *API { WorkspaceProxy: false, UpgradeMessage: api.DeploymentValues.CLIUpgradeMessage.String(), DeploymentID: api.DeploymentID, + Telemetry: api.Telemetry.Enabled(), } api.SiteHandler = site.New(&site.Options{ BinFS: binFS, @@ -557,6 +555,7 @@ func New(options *Options) *API { Pubsub: options.Pubsub, TemplateScheduleStore: options.TemplateScheduleStore, StatsBatcher: options.StatsBatcher, + UsageTracker: options.WorkspaceUsageTracker, UpdateAgentMetricsFn: options.UpdateAgentMetrics, AppStatBatchSize: workspaceapps.DefaultStatsDBReporterBatchSize, }) @@ -828,7 +827,7 @@ func New(options *Options) *API { r.Post("/templateversions", api.postTemplateVersionsByOrganization) r.Route("/templates", func(r chi.Router) { r.Post("/", api.postTemplateByOrganization) - r.Get("/", api.templatesByOrganization) + r.Get("/", api.templatesByOrganization()) r.Get("/examples", api.templateExamples) r.Route("/{templatename}", func(r chi.Router) { r.Get("/", api.templateByOrganizationAndName) @@ -839,6 +838,7 @@ func New(options *Options) *API { }) }) r.Route("/members", func(r chi.Router) { + r.Get("/", api.listMembers) r.Route("/roles", func(r chi.Router) { r.Get("/", api.assignableOrgRoles) r.With(httpmw.RequireExperiment(api.Experiments, codersdk.ExperimentCustomRoles)). @@ -846,29 +846,48 @@ func New(options *Options) *API { }) r.Route("/{user}", func(r chi.Router) { - r.Use( - httpmw.ExtractOrganizationMemberParam(options.Database), - ) - r.Put("/roles", api.putMemberRoles) - r.Post("/workspaces", api.postWorkspacesByOrganization) + r.Group(func(r chi.Router) { + r.Use( + // Adding a member requires "read" permission + // on the site user. So limited to owners and user-admins. + // TODO: Allow org-admins to add users via some new permission? Or give them + // read on site users. + httpmw.ExtractUserParam(options.Database), + ) + r.Post("/", api.postOrganizationMember) + }) + + r.Group(func(r chi.Router) { + r.Use( + httpmw.ExtractOrganizationMemberParam(options.Database), + ) + r.Delete("/", api.deleteOrganizationMember) + r.Put("/roles", api.putMemberRoles) + r.Post("/workspaces", api.postWorkspacesByOrganization) + }) }) }) }) }) - r.Route("/templates/{template}", func(r chi.Router) { + r.Route("/templates", func(r chi.Router) { r.Use( apiKeyMiddleware, - httpmw.ExtractTemplateParam(options.Database), ) - r.Get("/daus", api.templateDAUs) - r.Get("/", api.template) - r.Delete("/", api.deleteTemplate) - r.Patch("/", api.patchTemplateMeta) - r.Route("/versions", func(r chi.Router) { - r.Post("/archive", api.postArchiveTemplateVersions) - r.Get("/", api.templateVersionsByTemplate) - r.Patch("/", api.patchActiveTemplateVersion) - r.Get("/{templateversionname}", api.templateVersionByName) + r.Get("/", api.fetchTemplates(nil)) + r.Route("/{template}", func(r chi.Router) { + r.Use( + httpmw.ExtractTemplateParam(options.Database), + ) + r.Get("/daus", api.templateDAUs) + r.Get("/", api.template) + r.Delete("/", api.deleteTemplate) + r.Patch("/", api.patchTemplateMeta) + r.Route("/versions", func(r chi.Router) { + r.Post("/archive", api.postArchiveTemplateVersions) + r.Get("/", api.templateVersionsByTemplate) + r.Patch("/", api.patchActiveTemplateVersion) + r.Get("/{templateversionname}", api.templateVersionByName) + }) }) }) r.Route("/templateversions/{templateversion}", func(r chi.Router) { @@ -1004,23 +1023,12 @@ func New(options *Options) *API { Optional: false, })) r.Get("/rpc", api.workspaceAgentRPC) - r.Get("/manifest", api.workspaceAgentManifest) - // This route is deprecated and will be removed in a future release. - // New agents will use /me/manifest instead. - r.Get("/metadata", api.workspaceAgentManifest) - r.Post("/startup", api.postWorkspaceAgentStartup) - r.Patch("/startup-logs", api.patchWorkspaceAgentLogsDeprecated) r.Patch("/logs", api.patchWorkspaceAgentLogs) - r.Post("/app-health", api.postWorkspaceAppHealth) // Deprecated: Required to support legacy agents r.Get("/gitauth", api.workspaceAgentsGitAuth) r.Get("/external-auth", api.workspaceAgentsExternalAuth) r.Get("/gitsshkey", api.agentGitSSHKey) - r.Get("/coordinate", api.workspaceAgentCoordinate) - r.Post("/report-stats", api.workspaceAgentReportStats) - r.Post("/report-lifecycle", api.workspaceAgentReportLifecycle) - r.Post("/metadata", api.workspaceAgentPostMetadata) - r.Post("/metadata/{key}", api.workspaceAgentPostMetadataDeprecated) + r.Post("/log-source", api.workspaceAgentPostLogSource) }) r.Route("/{workspaceagent}", func(r chi.Router) { r.Use( @@ -1207,7 +1215,7 @@ func New(options *Options) *API { // Add CSP headers to all static assets and pages. CSP headers only affect // browsers, so these don't make sense on api routes. - cspMW := httpmw.CSPHeaders(func() []string { + cspMW := httpmw.CSPHeaders(options.Telemetry.Enabled(), func() []string { if api.DeploymentValues.Dangerous.AllowAllCors { // In this mode, allow all external requests return []string{"*"} @@ -1300,8 +1308,7 @@ type API struct { Acquirer *provisionerdserver.Acquirer // dbRolluper rolls up template usage stats from raw agent and app // stats. This is used to provide insights in the WebUI. - dbRolluper *dbrollup.Rolluper - workspaceUsageTracker *workspaceusage.Tracker + dbRolluper *dbrollup.Rolluper } // Close waits for all WebSocket connections to drain before returning. @@ -1340,7 +1347,7 @@ func (api *API) Close() error { _ = (*coordinator).Close() } _ = api.agentProvider.Close() - api.workspaceUsageTracker.Close() + _ = api.statsReporter.Close() return nil } @@ -1372,6 +1379,10 @@ func compressHandler(h http.Handler) http.Handler { // CreateInMemoryProvisionerDaemon is an in-memory connection to a provisionerd. // Useful when starting coderd and provisionerd in the same process. func (api *API) CreateInMemoryProvisionerDaemon(dialCtx context.Context, name string, provisionerTypes []codersdk.ProvisionerType) (client proto.DRPCProvisionerDaemonClient, err error) { + return api.CreateInMemoryTaggedProvisionerDaemon(dialCtx, name, provisionerTypes, nil) +} + +func (api *API) CreateInMemoryTaggedProvisionerDaemon(dialCtx context.Context, name string, provisionerTypes []codersdk.ProvisionerType, provisionerTags map[string]string) (client proto.DRPCProvisionerDaemonClient, err error) { tracer := api.TracerProvider.Tracer(tracing.TracerName) clientSession, serverSession := drpc.MemTransportPipe() defer func() { @@ -1399,7 +1410,7 @@ func (api *API) CreateInMemoryProvisionerDaemon(dialCtx context.Context, name st OrganizationID: defaultOrg.ID, CreatedAt: dbtime.Now(), Provisioners: dbTypes, - Tags: provisionersdk.MutateTags(uuid.Nil, nil), + Tags: provisionersdk.MutateTags(uuid.Nil, provisionerTags), LastSeenAt: sql.NullTime{Time: dbtime.Now(), Valid: true}, Version: buildinfo.Version(), APIVersion: proto.CurrentVersion.String(), diff --git a/coderd/coderdtest/authorize.go b/coderd/coderdtest/authorize.go index e753e66f2d2f6..9586289d60025 100644 --- a/coderd/coderdtest/authorize.go +++ b/coderd/coderdtest/authorize.go @@ -60,10 +60,13 @@ func AssertRBAC(t *testing.T, api *coderd.API, client *codersdk.Client) RBACAsse roles, err := api.Database.GetAuthorizationUserRoles(ctx, key.UserID) require.NoError(t, err, "fetch user roles") + roleNames, err := roles.RoleNames() + require.NoError(t, err) + return RBACAsserter{ Subject: rbac.Subject{ ID: key.UserID.String(), - Roles: rbac.RoleNames(roles.Roles), + Roles: rbac.RoleIdentifiers(roleNames), Groups: roles.Groups, Scope: rbac.ScopeName(key.Scope), }, @@ -435,7 +438,7 @@ func randomRBACType() string { func RandomRBACSubject() rbac.Subject { return rbac.Subject{ ID: uuid.NewString(), - Roles: rbac.RoleNames{rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember()}, Groups: []string{namesgenerator.GetRandomName(1)}, Scope: rbac.ScopeAll, } diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go index 6153f1a68abcb..472c380926ec4 100644 --- a/coderd/coderdtest/coderdtest.go +++ b/coderd/coderdtest/coderdtest.go @@ -29,6 +29,7 @@ import ( "sync/atomic" "testing" "time" + "unicode" "cloud.google.com/go/compute/metadata" "github.com/fullsailor/pkcs7" @@ -54,8 +55,8 @@ import ( "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/autobuild" "github.com/coder/coder/v2/coderd/awsidentity" - "github.com/coder/coder/v2/coderd/batchstats" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbrollup" "github.com/coder/coder/v2/coderd/database/dbtestutil" @@ -71,7 +72,7 @@ import ( "github.com/coder/coder/v2/coderd/util/ptr" "github.com/coder/coder/v2/coderd/workspaceapps" "github.com/coder/coder/v2/coderd/workspaceapps/appurl" - "github.com/coder/coder/v2/coderd/workspaceusage" + "github.com/coder/coder/v2/coderd/workspacestats" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/codersdk/drpc" @@ -125,6 +126,7 @@ type Options struct { // IncludeProvisionerDaemon when true means to start an in-memory provisionerD IncludeProvisionerDaemon bool + ProvisionerDaemonTags map[string]string MetricsCacheRefreshInterval time.Duration AgentStatsRefreshInterval time.Duration DeploymentValues *codersdk.DeploymentValues @@ -144,7 +146,7 @@ type Options struct { // Logger should only be overridden if you expect errors // as part of your test. Logger *slog.Logger - StatsBatcher *batchstats.Batcher + StatsBatcher workspacestats.Batcher WorkspaceAppsStatsCollectorOptions workspaceapps.StatsCollectorOptions AllowWorkspaceRenames bool @@ -271,10 +273,10 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can if options.StatsBatcher == nil { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - batcher, closeBatcher, err := batchstats.New(ctx, - batchstats.WithStore(options.Database), + batcher, closeBatcher, err := workspacestats.NewBatcher(ctx, + workspacestats.BatcherWithStore(options.Database), // Avoid cluttering up test output. - batchstats.WithLogger(slog.Make(sloghuman.Sink(io.Discard))), + workspacestats.BatcherWithLogger(slog.Make(sloghuman.Sink(io.Discard))), ) require.NoError(t, err, "create stats batcher") options.StatsBatcher = batcher @@ -336,10 +338,10 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can options.WorkspaceUsageTrackerTick = make(chan time.Time, 1) // buffering just in case } // Close is called by API.Close() - wuTracker := workspaceusage.New( + wuTracker := workspacestats.NewTracker( options.Database, - workspaceusage.WithLogger(options.Logger.Named("workspace_usage_tracker")), - workspaceusage.WithTickFlush(options.WorkspaceUsageTrackerTick, options.WorkspaceUsageTrackerFlush), + workspacestats.TrackerWithLogger(options.Logger.Named("workspace_usage_tracker")), + workspacestats.TrackerWithTickFlush(options.WorkspaceUsageTrackerTick, options.WorkspaceUsageTrackerFlush), ) var mutex sync.RWMutex @@ -512,7 +514,7 @@ func NewWithAPI(t testing.TB, options *Options) (*codersdk.Client, io.Closer, *c setHandler(coderAPI.RootHandler) var provisionerCloser io.Closer = nopcloser{} if options.IncludeProvisionerDaemon { - provisionerCloser = NewProvisionerDaemon(t, coderAPI) + provisionerCloser = NewTaggedProvisionerDaemon(t, coderAPI, "test", options.ProvisionerDaemonTags) } client := codersdk.New(serverURL) t.Cleanup(func() { @@ -552,6 +554,10 @@ func (c *provisionerdCloser) Close() error { // well with coderd testing. It registers the "echo" provisioner for // quick testing. func NewProvisionerDaemon(t testing.TB, coderAPI *coderd.API) io.Closer { + return NewTaggedProvisionerDaemon(t, coderAPI, "test", nil) +} + +func NewTaggedProvisionerDaemon(t testing.TB, coderAPI *coderd.API, name string, provisionerTags map[string]string) io.Closer { t.Helper() // t.Cleanup runs in last added, first called order. t.TempDir() will delete @@ -578,7 +584,7 @@ func NewProvisionerDaemon(t testing.TB, coderAPI *coderd.API) io.Closer { }() daemon := provisionerd.New(func(dialCtx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) { - return coderAPI.CreateInMemoryProvisionerDaemon(dialCtx, "test", []codersdk.ProvisionerType{codersdk.ProvisionerTypeEcho}) + return coderAPI.CreateInMemoryTaggedProvisionerDaemon(dialCtx, name, []codersdk.ProvisionerType{codersdk.ProvisionerTypeEcho}, provisionerTags) }, &provisionerd.Options{ Logger: coderAPI.Logger.Named("provisionerd").Leveled(slog.LevelDebug), UpdateInterval: 250 * time.Millisecond, @@ -595,6 +601,18 @@ func NewProvisionerDaemon(t testing.TB, coderAPI *coderd.API) io.Closer { } func NewExternalProvisionerDaemon(t testing.TB, client *codersdk.Client, org uuid.UUID, tags map[string]string) io.Closer { + t.Helper() + + // Without this check, the provisioner will silently fail. + entitlements, err := client.Entitlements(context.Background()) + if err == nil { + feature := entitlements.Features[codersdk.FeatureExternalProvisionerDaemons] + if !feature.Enabled || feature.Entitlement != codersdk.EntitlementEntitled { + require.NoError(t, xerrors.Errorf("external provisioner daemons require an entitled license")) + return nil + } + } + echoClient, echoServer := drpc.MemTransportPipe() ctx, cancelFunc := context.WithCancel(context.Background()) serveDone := make(chan struct{}) @@ -633,6 +651,7 @@ func NewExternalProvisionerDaemon(t testing.TB, client *codersdk.Client, org uui t.Cleanup(func() { _ = closer.Close() }) + return closer } @@ -640,6 +659,7 @@ var FirstUserParams = codersdk.CreateFirstUserRequest{ Email: "testuser@coder.com", Username: "testuser", Password: "SomeSecurePassword!", + Name: "Test User", } // CreateFirstUser creates a user with preset credentials and authenticates @@ -658,24 +678,29 @@ func CreateFirstUser(t testing.TB, client *codersdk.Client) codersdk.CreateFirst } // CreateAnotherUser creates and authenticates a new user. -func CreateAnotherUser(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles ...string) (*codersdk.Client, codersdk.User) { +// Roles can include org scoped roles with 'roleName:' +func CreateAnotherUser(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles ...rbac.RoleIdentifier) (*codersdk.Client, codersdk.User) { return createAnotherUserRetry(t, client, organizationID, 5, roles) } -func CreateAnotherUserMutators(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles []string, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) { +func CreateAnotherUserMutators(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) { return createAnotherUserRetry(t, client, organizationID, 5, roles, mutators...) } // AuthzUserSubject does not include the user's groups. func AuthzUserSubject(user codersdk.User, orgID uuid.UUID) rbac.Subject { - roles := make(rbac.RoleNames, 0, len(user.Roles)) + roles := make(rbac.RoleIdentifiers, 0, len(user.Roles)) // Member role is always implied roles = append(roles, rbac.RoleMember()) for _, r := range user.Roles { - roles = append(roles, r.Name) + orgID, _ := uuid.Parse(r.OrganizationID) // defaults to nil + roles = append(roles, rbac.RoleIdentifier{ + Name: r.Name, + OrganizationID: orgID, + }) } // We assume only 1 org exists - roles = append(roles, rbac.RoleOrgMember(orgID)) + roles = append(roles, rbac.ScopedRoleOrgMember(orgID)) return rbac.Subject{ ID: user.ID.String(), @@ -685,10 +710,11 @@ func AuthzUserSubject(user codersdk.User, orgID uuid.UUID) rbac.Subject { } } -func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, retries int, roles []string, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) { +func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationID uuid.UUID, retries int, roles []rbac.RoleIdentifier, mutators ...func(r *codersdk.CreateUserRequest)) (*codersdk.Client, codersdk.User) { req := codersdk.CreateUserRequest{ Email: namesgenerator.GetRandomName(10) + "@coder.com", Username: RandomUsername(t), + Name: RandomName(t), Password: "SomeSecurePassword!", OrganizationID: organizationID, } @@ -743,38 +769,74 @@ func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationI if len(roles) > 0 { // Find the roles for the org vs the site wide roles - orgRoles := make(map[string][]string) - var siteRoles []string + orgRoles := make(map[uuid.UUID][]rbac.RoleIdentifier) + var siteRoles []rbac.RoleIdentifier for _, roleName := range roles { - roleName := roleName - orgID, ok := rbac.IsOrgRole(roleName) + ok := roleName.IsOrgRole() if ok { - orgRoles[orgID] = append(orgRoles[orgID], roleName) + orgRoles[roleName.OrganizationID] = append(orgRoles[roleName.OrganizationID], roleName) } else { siteRoles = append(siteRoles, roleName) } } // Update the roles for _, r := range user.Roles { - siteRoles = append(siteRoles, r.Name) + orgID, _ := uuid.Parse(r.OrganizationID) + siteRoles = append(siteRoles, rbac.RoleIdentifier{ + Name: r.Name, + OrganizationID: orgID, + }) + } + + onlyName := func(role rbac.RoleIdentifier) string { + return role.Name } - user, err = client.UpdateUserRoles(context.Background(), user.ID.String(), codersdk.UpdateRoles{Roles: siteRoles}) + user, err = client.UpdateUserRoles(context.Background(), user.ID.String(), codersdk.UpdateRoles{Roles: db2sdk.List(siteRoles, onlyName)}) require.NoError(t, err, "update site roles") // Update org roles for orgID, roles := range orgRoles { - organizationID, err := uuid.Parse(orgID) - require.NoError(t, err, fmt.Sprintf("parse org id %q", orgID)) - _, err = client.UpdateOrganizationMemberRoles(context.Background(), organizationID, user.ID.String(), - codersdk.UpdateRoles{Roles: roles}) + _, err = client.UpdateOrganizationMemberRoles(context.Background(), orgID, user.ID.String(), + codersdk.UpdateRoles{Roles: db2sdk.List(roles, onlyName)}) require.NoError(t, err, "update org membership roles") } } return other, user } +type CreateOrganizationOptions struct { + // IncludeProvisionerDaemon will spin up an external provisioner for the organization. + // This requires enterprise and the feature 'codersdk.FeatureExternalProvisionerDaemons' + IncludeProvisionerDaemon bool +} + +func CreateOrganization(t *testing.T, client *codersdk.Client, opts CreateOrganizationOptions, mutators ...func(*codersdk.CreateOrganizationRequest)) codersdk.Organization { + ctx := testutil.Context(t, testutil.WaitMedium) + req := codersdk.CreateOrganizationRequest{ + Name: strings.ReplaceAll(strings.ToLower(namesgenerator.GetRandomName(0)), "_", "-"), + DisplayName: namesgenerator.GetRandomName(1), + Description: namesgenerator.GetRandomName(1), + Icon: "", + } + for _, mutator := range mutators { + mutator(&req) + } + + org, err := client.CreateOrganization(ctx, req) + require.NoError(t, err) + + if opts.IncludeProvisionerDaemon { + closer := NewExternalProvisionerDaemon(t, client, org.ID, map[string]string{}) + t.Cleanup(func() { + _ = closer.Close() + }) + } + + return org +} + // CreateTemplateVersion creates a template import provisioner job // with the responses provided. It uses the "echo" provisioner for compatibility // with testing. @@ -1018,7 +1080,7 @@ func (w WorkspaceAgentWaiter) Wait() []codersdk.WorkspaceResource { require.Eventually(w.t, func() bool { var err error workspace, err := w.client.Workspace(ctx, w.workspaceID) - if !assert.NoError(w.t, err) { + if err != nil { return false } if workspace.LatestBuild.Job.CompletedAt == nil { @@ -1331,6 +1393,28 @@ func RandomUsername(t testing.TB) string { return n } +func RandomName(t testing.TB) string { + var sb strings.Builder + var err error + ss := strings.Split(namesgenerator.GetRandomName(10), "_") + for si, s := range ss { + for ri, r := range s { + if ri == 0 { + _, err = sb.WriteRune(unicode.ToTitle(r)) + require.NoError(t, err) + } else { + _, err = sb.WriteRune(r) + require.NoError(t, err) + } + } + if si < len(ss)-1 { + _, err = sb.WriteRune(' ') + require.NoError(t, err) + } + } + return sb.String() +} + // Used to easily create an HTTP transport! type roundTripper func(req *http.Request) (*http.Response, error) diff --git a/coderd/coderdtest/oidctest/idp.go b/coderd/coderdtest/oidctest/idp.go index c0b95619d46b7..844c4df1d2664 100644 --- a/coderd/coderdtest/oidctest/idp.go +++ b/coderd/coderdtest/oidctest/idp.go @@ -1255,7 +1255,9 @@ type ExternalAuthConfigOptions struct { // ValidatePayload is the payload that is used when the user calls the // equivalent of "userinfo" for oauth2. This is not standardized, so is // different for each provider type. - ValidatePayload func(email string) interface{} + // + // The int,error payload can control the response if set. + ValidatePayload func(email string) (interface{}, int, error) // routes is more advanced usage. This allows the caller to // completely customize the response. It captures all routes under the /external-auth-validate/* @@ -1292,7 +1294,20 @@ func (f *FakeIDP) ExternalAuthConfig(t testing.TB, id string, custom *ExternalAu case "/user", "/", "": var payload interface{} = "OK" if custom.ValidatePayload != nil { - payload = custom.ValidatePayload(email) + var err error + var code int + payload, code, err = custom.ValidatePayload(email) + if code == 0 && err == nil { + code = http.StatusOK + } + if code == 0 && err != nil { + code = http.StatusUnauthorized + } + if err != nil { + http.Error(rw, fmt.Sprintf("failed validation via custom method: %s", err.Error()), code) + return + } + rw.WriteHeader(code) } _ = json.NewEncoder(rw).Encode(payload) default: diff --git a/coderd/database/db2sdk/db2sdk.go b/coderd/database/db2sdk/db2sdk.go index 2fe9ac9af7a3d..6734dac38d8c3 100644 --- a/coderd/database/db2sdk/db2sdk.go +++ b/coderd/database/db2sdk/db2sdk.go @@ -18,7 +18,6 @@ import ( "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/parameter" "github.com/coder/coder/v2/coderd/rbac" - "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/coderd/workspaceapps/appurl" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisionersdk/proto" @@ -171,7 +170,12 @@ func User(user database.User, organizationIDs []uuid.UUID) codersdk.User { } for _, roleName := range user.RBACRoles { - rbacRole, err := rbac.RoleByName(roleName) + // TODO: Currently the api only returns site wide roles. + // Should it return organization roles? + rbacRole, err := rbac.RoleByName(rbac.RoleIdentifier{ + Name: roleName, + OrganizationID: uuid.Nil, + }) if err == nil { convertedUser.Roles = append(convertedUser.Roles, SlimRole(rbacRole)) } else { @@ -205,13 +209,6 @@ func Group(group database.Group, members []database.User) codersdk.Group { } } -func SlimRole(role rbac.Role) codersdk.SlimRole { - return codersdk.SlimRole{ - DisplayName: role.DisplayName, - Name: role.Name, - } -} - func TemplateInsightsParameters(parameterRows []database.GetTemplateParameterInsightsRow) ([]codersdk.TemplateParameterUsage, error) { // Use a stable sort, similarly to how we would sort in the query, note that // we don't sort in the query because order varies depending on the table @@ -526,54 +523,61 @@ func ProvisionerDaemon(dbDaemon database.ProvisionerDaemon) codersdk.Provisioner return result } -func Role(role rbac.Role) codersdk.Role { - roleName, orgIDStr, err := rbac.RoleSplit(role.Name) - if err != nil { - roleName = role.Name +func SlimRole(role rbac.Role) codersdk.SlimRole { + orgID := "" + if role.Identifier.OrganizationID != uuid.Nil { + orgID = role.Identifier.OrganizationID.String() } - return codersdk.Role{ - Name: roleName, - OrganizationID: orgIDStr, - DisplayName: role.DisplayName, - SitePermissions: List(role.Site, Permission), - // This is not perfect. If there are organization permissions in another - // organization, they will be omitted. This should not be allowed, so - // should never happen. - OrganizationPermissions: List(role.Org[orgIDStr], Permission), - UserPermissions: List(role.User, Permission), + return codersdk.SlimRole{ + DisplayName: role.DisplayName, + Name: role.Identifier.Name, + OrganizationID: orgID, } } -func Permission(permission rbac.Permission) codersdk.Permission { - return codersdk.Permission{ - Negate: permission.Negate, - ResourceType: codersdk.RBACResource(permission.ResourceType), - Action: codersdk.RBACAction(permission.Action), +func RBACRole(role rbac.Role) codersdk.Role { + slim := SlimRole(role) + + orgPerms := role.Org[slim.OrganizationID] + return codersdk.Role{ + Name: slim.Name, + OrganizationID: slim.OrganizationID, + DisplayName: slim.DisplayName, + SitePermissions: List(role.Site, RBACPermission), + OrganizationPermissions: List(orgPerms, RBACPermission), + UserPermissions: List(role.User, RBACPermission), } } -func RoleToRBAC(role codersdk.Role) rbac.Role { - orgPerms := map[string][]rbac.Permission{} - if role.OrganizationID != "" { - orgPerms = map[string][]rbac.Permission{ - role.OrganizationID: List(role.OrganizationPermissions, PermissionToRBAC), - } +func Role(role database.CustomRole) codersdk.Role { + orgID := "" + if role.OrganizationID.UUID != uuid.Nil { + orgID = role.OrganizationID.UUID.String() } - return rbac.Role{ - Name: rbac.RoleName(role.Name, role.OrganizationID), - DisplayName: role.DisplayName, - Site: List(role.SitePermissions, PermissionToRBAC), - Org: orgPerms, - User: List(role.UserPermissions, PermissionToRBAC), + return codersdk.Role{ + Name: role.Name, + OrganizationID: orgID, + DisplayName: role.DisplayName, + SitePermissions: List(role.SitePermissions, Permission), + OrganizationPermissions: List(role.OrgPermissions, Permission), + UserPermissions: List(role.UserPermissions, Permission), + } +} + +func Permission(permission database.CustomRolePermission) codersdk.Permission { + return codersdk.Permission{ + Negate: permission.Negate, + ResourceType: codersdk.RBACResource(permission.ResourceType), + Action: codersdk.RBACAction(permission.Action), } } -func PermissionToRBAC(permission codersdk.Permission) rbac.Permission { - return rbac.Permission{ +func RBACPermission(permission rbac.Permission) codersdk.Permission { + return codersdk.Permission{ Negate: permission.Negate, - ResourceType: string(permission.ResourceType), - Action: policy.Action(permission.Action), + ResourceType: codersdk.RBACResource(permission.ResourceType), + Action: codersdk.RBACAction(permission.Action), } } diff --git a/coderd/database/dbauthz/customroles_test.go b/coderd/database/dbauthz/customroles_test.go index aaa2c7a34bbf3..4a544989c599e 100644 --- a/coderd/database/dbauthz/customroles_test.go +++ b/coderd/database/dbauthz/customroles_test.go @@ -1,7 +1,6 @@ package dbauthz_test import ( - "encoding/json" "testing" "github.com/google/uuid" @@ -11,10 +10,12 @@ import ( "cdr.dev/slog" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) @@ -34,10 +35,10 @@ func TestUpsertCustomRoles(t *testing.T) { } canAssignRole := rbac.Role{ - Name: "can-assign", + Identifier: rbac.RoleIdentifier{Name: "can-assign"}, DisplayName: "", Site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceAssignRole.Type: {policy.ActionCreate}, + rbac.ResourceAssignRole.Type: {policy.ActionRead, policy.ActionCreate}, }), } @@ -50,7 +51,7 @@ func TestUpsertCustomRoles(t *testing.T) { all = append(all, t) case rbac.ExpandableRoles: all = append(all, must(t.Expand())...) - case string: + case rbac.RoleIdentifier: all = append(all, must(rbac.RoleByName(t))) default: panic("unknown type") @@ -60,22 +61,26 @@ func TestUpsertCustomRoles(t *testing.T) { return all } - orgID := uuid.New() + orgID := uuid.NullUUID{ + UUID: uuid.New(), + Valid: true, + } testCases := []struct { name string subject rbac.ExpandableRoles // Perms to create on new custom role - site []rbac.Permission - org map[string][]rbac.Permission - user []rbac.Permission - errorContains string + organizationID uuid.NullUUID + site []codersdk.Permission + org []codersdk.Permission + user []codersdk.Permission + errorContains string }{ { // No roles, so no assign role name: "no-roles", - subject: rbac.RoleNames([]string{}), + subject: rbac.RoleIdentifiers{}, errorContains: "forbidden", }, { @@ -84,45 +89,31 @@ func TestUpsertCustomRoles(t *testing.T) { subject: merge(canAssignRole), }, { - name: "mixed-scopes", - subject: merge(canAssignRole, rbac.RoleOwner()), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + name: "mixed-scopes", + subject: merge(canAssignRole, rbac.RoleOwner()), + organizationID: orgID, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, + }), + org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), - org: map[string][]rbac.Permission{ - uuid.New().String(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }), - }, errorContains: "cannot assign both org and site permissions", }, - { - name: "multiple-org", - subject: merge(canAssignRole, rbac.RoleOwner()), - org: map[string][]rbac.Permission{ - uuid.New().String(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }), - uuid.New().String(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }), - }, - errorContains: "cannot assign permissions to more than 1", - }, { name: "invalid-action", subject: merge(canAssignRole, rbac.RoleOwner()), - site: rbac.Permissions(map[string][]policy.Action{ + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ // Action does not go with resource - rbac.ResourceWorkspace.Type: {policy.ActionViewInsights}, + codersdk.ResourceWorkspace: {codersdk.ActionViewInsights}, }), errorContains: "invalid action", }, { name: "invalid-resource", subject: merge(canAssignRole, rbac.RoleOwner()), - site: rbac.Permissions(map[string][]policy.Action{ - "foobar": {policy.ActionViewInsights}, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + "foobar": {codersdk.ActionViewInsights}, }), errorContains: "invalid resource", }, @@ -130,11 +121,11 @@ func TestUpsertCustomRoles(t *testing.T) { // Not allowing these at this time. name: "negative-permission", subject: merge(canAssignRole, rbac.RoleOwner()), - site: []rbac.Permission{ + site: []codersdk.Permission{ { Negate: true, - ResourceType: rbac.ResourceWorkspace.Type, - Action: policy.ActionRead, + ResourceType: codersdk.ResourceWorkspace, + Action: codersdk.ActionRead, }, }, errorContains: "no negative permissions", @@ -142,8 +133,8 @@ func TestUpsertCustomRoles(t *testing.T) { { name: "wildcard", // not allowed subject: merge(canAssignRole, rbac.RoleOwner()), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.WildcardSymbol}, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {"*"}, }), errorContains: "no wildcard symbols", }, @@ -151,40 +142,41 @@ func TestUpsertCustomRoles(t *testing.T) { { name: "read-workspace-escalation", subject: merge(canAssignRole), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), errorContains: "not allowed to grant this permission", }, { - name: "read-workspace-outside-org", - subject: merge(canAssignRole, rbac.RoleOrgAdmin(orgID)), - org: map[string][]rbac.Permission{ - // The org admin is for a different org - uuid.NewString(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }), + name: "read-workspace-outside-org", + organizationID: uuid.NullUUID{ + UUID: uuid.New(), + Valid: true, }, - errorContains: "not allowed to grant this permission", + subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)), + org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, + }), + errorContains: "forbidden", }, { name: "user-escalation", // These roles do not grant user perms - subject: merge(canAssignRole, rbac.RoleOrgAdmin(orgID)), - user: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)), + user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), errorContains: "not allowed to grant this permission", }, { name: "template-admin-escalation", subject: merge(canAssignRole, rbac.RoleTemplateAdmin()), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, // ok! - rbac.ResourceDeploymentConfig.Type: {policy.ActionUpdate}, // not ok! + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, // ok! + codersdk.ResourceDeploymentConfig: {codersdk.ActionUpdate}, // not ok! }), - user: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, // ok! + user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, // ok! }), errorContains: "deployment_config", }, @@ -192,36 +184,34 @@ func TestUpsertCustomRoles(t *testing.T) { { name: "read-workspace-template-admin", subject: merge(canAssignRole, rbac.RoleTemplateAdmin()), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), }, { - name: "read-workspace-in-org", - subject: merge(canAssignRole, rbac.RoleOrgAdmin(orgID)), - org: map[string][]rbac.Permission{ - // Org admin of this org, this is ok! - orgID.String(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }), - }, + name: "read-workspace-in-org", + subject: merge(canAssignRole, rbac.ScopedRoleOrgAdmin(orgID.UUID)), + organizationID: orgID, + org: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, + }), }, { name: "user-perms", // This is weird, but is ok subject: merge(canAssignRole, rbac.RoleMember()), - user: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), }, { name: "site+user-perms", subject: merge(canAssignRole, rbac.RoleMember(), rbac.RoleTemplateAdmin()), - site: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + site: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), - user: rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, + user: codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, }), }, } @@ -244,15 +234,38 @@ func TestUpsertCustomRoles(t *testing.T) { _, err := az.UpsertCustomRole(ctx, database.UpsertCustomRoleParams{ Name: "test-role", DisplayName: "", - SitePermissions: must(json.Marshal(tc.site)), - OrgPermissions: must(json.Marshal(tc.org)), - UserPermissions: must(json.Marshal(tc.user)), + OrganizationID: tc.organizationID, + SitePermissions: db2sdk.List(tc.site, convertSDKPerm), + OrgPermissions: db2sdk.List(tc.org, convertSDKPerm), + UserPermissions: db2sdk.List(tc.user, convertSDKPerm), }) if tc.errorContains != "" { require.ErrorContains(t, err, tc.errorContains) } else { require.NoError(t, err) + + // Verify the role is fetched with the lookup filter. + roles, err := az.CustomRoles(ctx, database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: "test-role", + OrganizationID: tc.organizationID.UUID, + }, + }, + ExcludeOrgRoles: false, + OrganizationID: uuid.UUID{}, + }) + require.NoError(t, err) + require.Len(t, roles, 1) } }) } } + +func convertSDKPerm(perm codersdk.Permission) database.CustomRolePermission { + return database.CustomRolePermission{ + Negate: perm.Negate, + ResourceType: string(perm.ResourceType), + Action: policy.Action(perm.Action), + } +} diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 3a814cfed88d2..098922527c81f 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -162,7 +162,7 @@ var ( ID: uuid.Nil.String(), Roles: rbac.Roles([]rbac.Role{ { - Name: "provisionerd", + Identifier: rbac.RoleIdentifier{Name: "provisionerd"}, DisplayName: "Provisioner Daemon", Site: rbac.Permissions(map[string][]policy.Action{ // TODO: Add ProvisionerJob resource type. @@ -191,7 +191,7 @@ var ( ID: uuid.Nil.String(), Roles: rbac.Roles([]rbac.Role{ { - Name: "autostart", + Identifier: rbac.RoleIdentifier{Name: "autostart"}, DisplayName: "Autostart Daemon", Site: rbac.Permissions(map[string][]policy.Action{ rbac.ResourceSystem.Type: {policy.WildcardSymbol}, @@ -213,7 +213,7 @@ var ( ID: uuid.Nil.String(), Roles: rbac.Roles([]rbac.Role{ { - Name: "hangdetector", + Identifier: rbac.RoleIdentifier{Name: "hangdetector"}, DisplayName: "Hang Detector Daemon", Site: rbac.Permissions(map[string][]policy.Action{ rbac.ResourceSystem.Type: {policy.WildcardSymbol}, @@ -232,17 +232,17 @@ var ( ID: uuid.Nil.String(), Roles: rbac.Roles([]rbac.Role{ { - Name: "system", + Identifier: rbac.RoleIdentifier{Name: "system"}, DisplayName: "Coder", Site: rbac.Permissions(map[string][]policy.Action{ rbac.ResourceWildcard.Type: {policy.ActionRead}, rbac.ResourceApiKey.Type: rbac.ResourceApiKey.AvailableActions(), rbac.ResourceGroup.Type: {policy.ActionCreate, policy.ActionUpdate}, rbac.ResourceAssignRole.Type: rbac.ResourceAssignRole.AvailableActions(), + rbac.ResourceAssignOrgRole.Type: rbac.ResourceAssignOrgRole.AvailableActions(), rbac.ResourceSystem.Type: {policy.WildcardSymbol}, rbac.ResourceOrganization.Type: {policy.ActionCreate, policy.ActionRead}, rbac.ResourceOrganizationMember.Type: {policy.ActionCreate}, - rbac.ResourceAssignOrgRole.Type: {policy.ActionRead, policy.ActionCreate, policy.ActionDelete}, rbac.ResourceProvisionerDaemon.Type: {policy.ActionCreate, policy.ActionUpdate}, rbac.ResourceUser.Type: rbac.ResourceUser.AvailableActions(), rbac.ResourceWorkspaceDormant.Type: {policy.ActionUpdate, policy.ActionDelete, policy.ActionWorkspaceStop}, @@ -582,8 +582,38 @@ func (q *querier) authorizeUpdateFileTemplate(ctx context.Context, file database } } +// convertToOrganizationRoles converts a set of scoped role names to their unique +// scoped names. The database stores roles as an array of strings, and needs to be +// converted. +// TODO: Maybe make `[]rbac.RoleIdentifier` a custom type that implements a sql scanner +// to remove the need for these converters? +func (*querier) convertToOrganizationRoles(organizationID uuid.UUID, names []string) ([]rbac.RoleIdentifier, error) { + uniques := make([]rbac.RoleIdentifier, 0, len(names)) + for _, name := range names { + // This check is a developer safety check. Old code might try to invoke this code path with + // organization id suffixes. Catch this and return a nice error so it can be fixed. + if strings.Contains(name, ":") { + return nil, xerrors.Errorf("attempt to assign a role %q, remove the ': suffix", name) + } + + uniques = append(uniques, rbac.RoleIdentifier{Name: name, OrganizationID: organizationID}) + } + + return uniques, nil +} + +// convertToDeploymentRoles converts string role names into deployment wide roles. +func (*querier) convertToDeploymentRoles(names []string) []rbac.RoleIdentifier { + uniques := make([]rbac.RoleIdentifier, 0, len(names)) + for _, name := range names { + uniques = append(uniques, rbac.RoleIdentifier{Name: name}) + } + + return uniques +} + // canAssignRoles handles assigning built in and custom roles. -func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, removed []string) error { +func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, removed []rbac.RoleIdentifier) error { actor, ok := ActorFromContext(ctx) if !ok { return NoActorError @@ -592,33 +622,29 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r roleAssign := rbac.ResourceAssignRole shouldBeOrgRoles := false if orgID != nil { - roleAssign = roleAssign.InOrg(*orgID) + roleAssign = rbac.ResourceAssignOrgRole.InOrg(*orgID) shouldBeOrgRoles = true } grantedRoles := append(added, removed...) - customRoles := make([]string, 0) + customRoles := make([]rbac.RoleIdentifier, 0) // Validate that the roles being assigned are valid. for _, r := range grantedRoles { - roleOrgIDStr, isOrgRole := rbac.IsOrgRole(r) + isOrgRole := r.OrganizationID != uuid.Nil if shouldBeOrgRoles && !isOrgRole { return xerrors.Errorf("Must only update org roles") } + if !shouldBeOrgRoles && isOrgRole { return xerrors.Errorf("Must only update site wide roles") } if shouldBeOrgRoles { - roleOrgID, err := uuid.Parse(roleOrgIDStr) - if err != nil { - return xerrors.Errorf("role %q has invalid uuid for org: %w", r, err) - } - if orgID == nil { return xerrors.Errorf("should never happen, orgID is nil, but trying to assign an organization role") } - if roleOrgID != *orgID { + if r.OrganizationID != *orgID { return xerrors.Errorf("attempted to assign role from a different org, role %q to %q", r, orgID.String()) } } @@ -629,7 +655,7 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r } } - customRolesMap := make(map[string]struct{}, len(customRoles)) + customRolesMap := make(map[rbac.RoleIdentifier]struct{}, len(customRoles)) for _, r := range customRoles { customRolesMap[r] = struct{}{} } @@ -649,7 +675,7 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r // returns them all, but then someone could pass in a large list to make us do // a lot of loop iterations. if !slices.ContainsFunc(expandedCustomRoles, func(customRole rbac.Role) bool { - return strings.EqualFold(customRole.Name, role) + return strings.EqualFold(customRole.Identifier.Name, role.Name) && customRole.Identifier.OrganizationID == role.OrganizationID }) { return xerrors.Errorf("%q is not a supported role", role) } @@ -671,8 +697,14 @@ func (q *querier) canAssignRoles(ctx context.Context, orgID *uuid.UUID, added, r for _, roleName := range grantedRoles { if _, isCustom := customRolesMap[roleName]; isCustom { - // For now, use a constant name so our static assign map still works. - roleName = rbac.CustomSiteRole() + // To support a dynamic mapping of what roles can assign what, we need + // to store this in the database. For now, just use a static role so + // owners and org admins can assign roles. + if roleName.IsOrgRole() { + roleName = rbac.CustomOrganizationRole(roleName.OrganizationID) + } else { + roleName = rbac.CustomSiteRole() + } } if !rbac.CanAssignRole(actor.Roles, roleName) { @@ -785,6 +817,13 @@ func (q *querier) AcquireLock(ctx context.Context, id int64) error { return q.db.AcquireLock(ctx, id) } +func (q *querier) AcquireNotificationMessages(ctx context.Context, arg database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil { + return nil, err + } + return q.db.AcquireNotificationMessages(ctx, arg) +} + // TODO: We need to create a ProvisionerJob resource type func (q *querier) AcquireProvisionerJob(ctx context.Context, arg database.AcquireProvisionerJobParams) (database.ProvisionerJob, error) { // if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil { @@ -829,6 +868,20 @@ func (q *querier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg databa return q.db.BatchUpdateWorkspaceLastUsedAt(ctx, arg) } +func (q *querier) BulkMarkNotificationMessagesFailed(ctx context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil { + return 0, err + } + return q.db.BulkMarkNotificationMessagesFailed(ctx, arg) +} + +func (q *querier) BulkMarkNotificationMessagesSent(ctx context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) { + if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil { + return 0, err + } + return q.db.BulkMarkNotificationMessagesSent(ctx, arg) +} + func (q *querier) CleanTailnetCoordinators(ctx context.Context) error { if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil { return err @@ -978,6 +1031,13 @@ func (q *querier) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Contex return q.db.DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx, arg) } +func (q *querier) DeleteOldNotificationMessages(ctx context.Context) error { + if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil { + return err + } + return q.db.DeleteOldNotificationMessages(ctx) +} + func (q *querier) DeleteOldProvisionerDaemons(ctx context.Context) error { if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil { return err @@ -1003,6 +1063,16 @@ func (q *querier) DeleteOrganization(ctx context.Context, id uuid.UUID) error { return deleteQ(q.log, q.auth, q.db.GetOrganizationByID, q.db.DeleteOrganization)(ctx, id) } +func (q *querier) DeleteOrganizationMember(ctx context.Context, arg database.DeleteOrganizationMemberParams) error { + return deleteQ[database.OrganizationMember](q.log, q.auth, func(ctx context.Context, arg database.DeleteOrganizationMemberParams) (database.OrganizationMember, error) { + member, err := database.ExpectOne(q.OrganizationMembers(ctx, database.OrganizationMembersParams(arg))) + if err != nil { + return database.OrganizationMember{}, err + } + return member.OrganizationMember, nil + }, q.db.DeleteOrganizationMember)(ctx, arg) +} + func (q *querier) DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error { if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil { return err @@ -1072,6 +1142,13 @@ func (q *querier) DeleteWorkspaceAgentPortSharesByTemplate(ctx context.Context, return q.db.DeleteWorkspaceAgentPortSharesByTemplate(ctx, templateID) } +func (q *querier) EnqueueNotificationMessage(ctx context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { + if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil { + return database.NotificationMessage{}, err + } + return q.db.EnqueueNotificationMessage(ctx, arg) +} + func (q *querier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error { fetch := func(ctx context.Context, id uuid.UUID) (database.Workspace, error) { return q.db.GetWorkspaceByID(ctx, id) @@ -1079,6 +1156,13 @@ func (q *querier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error { return update(q.log, q.auth, fetch, q.db.FavoriteWorkspace)(ctx, id) } +func (q *querier) FetchNewMessageMetadata(ctx context.Context, arg database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) { + if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil { + return database.FetchNewMessageMetadataRow{}, err + } + return q.db.FetchNewMessageMetadata(ctx, arg) +} + func (q *querier) GetAPIKeyByID(ctx context.Context, id string) (database.APIKey, error) { return fetch(q.log, q.auth, q.db.GetAPIKeyByID)(ctx, id) } @@ -1158,12 +1242,21 @@ func (q *querier) GetApplicationName(ctx context.Context) (string, error) { } func (q *querier) GetAuditLogsOffset(ctx context.Context, arg database.GetAuditLogsOffsetParams) ([]database.GetAuditLogsOffsetRow, error) { - // To optimize audit logs, we only check the global audit log permission once. - // This is because we expect a large unbounded set of audit logs, and applying a SQL - // filter would slow down the query for no benefit. - if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAuditLog); err != nil { + // To optimize the authz checks for audit logs, do not run an authorize + // check on each individual audit log row. In practice, audit logs are either + // fetched from a global or an organization scope. + // Applying a SQL filter would slow down the query for no benefit on how this query is + // actually used. + + object := rbac.ResourceAuditLog + if arg.OrganizationID != uuid.Nil { + object = object.InOrg(arg.OrganizationID) + } + + if err := q.authorizeContext(ctx, policy.ActionRead, object); err != nil { return nil, err } + return q.db.GetAuditLogsOffset(ctx, arg) } @@ -1279,11 +1372,25 @@ func (q *querier) GetGroupByOrgAndName(ctx context.Context, arg database.GetGrou return fetch(q.log, q.auth, q.db.GetGroupByOrgAndName)(ctx, arg) } -func (q *querier) GetGroupMembers(ctx context.Context, id uuid.UUID) ([]database.User, error) { +func (q *querier) GetGroupMembers(ctx context.Context) ([]database.GroupMember, error) { + if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil { + return nil, err + } + return q.db.GetGroupMembers(ctx) +} + +func (q *querier) GetGroupMembersByGroupID(ctx context.Context, id uuid.UUID) ([]database.User, error) { if _, err := q.GetGroupByID(ctx, id); err != nil { // AuthZ check return nil, err } - return q.db.GetGroupMembers(ctx, id) + return q.db.GetGroupMembersByGroupID(ctx, id) +} + +func (q *querier) GetGroups(ctx context.Context) ([]database.Group, error) { + if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil { + return nil, err + } + return q.db.GetGroups(ctx) } func (q *querier) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { @@ -1450,14 +1557,6 @@ func (q *querier) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uuid. return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationIDsByMemberIDs)(ctx, ids) } -func (q *querier) GetOrganizationMemberByUserID(ctx context.Context, arg database.GetOrganizationMemberByUserIDParams) (database.OrganizationMember, error) { - return fetch(q.log, q.auth, q.db.GetOrganizationMemberByUserID)(ctx, arg) -} - -func (q *querier) GetOrganizationMembershipsByUserID(ctx context.Context, userID uuid.UUID) ([]database.OrganizationMember, error) { - return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationMembershipsByUserID)(ctx, userID) -} - func (q *querier) GetOrganizations(ctx context.Context) ([]database.Organization, error) { fetch := func(ctx context.Context, _ interface{}) ([]database.Organization, error) { return q.db.GetOrganizations(ctx) @@ -2471,9 +2570,14 @@ func (q *querier) InsertOrganization(ctx context.Context, arg database.InsertOrg } func (q *querier) InsertOrganizationMember(ctx context.Context, arg database.InsertOrganizationMemberParams) (database.OrganizationMember, error) { + orgRoles, err := q.convertToOrganizationRoles(arg.OrganizationID, arg.Roles) + if err != nil { + return database.OrganizationMember{}, xerrors.Errorf("converting to organization roles: %w", err) + } + // All roles are added roles. Org member is always implied. - addedRoles := append(arg.Roles, rbac.RoleOrgMember(arg.OrganizationID)) - err := q.canAssignRoles(ctx, &arg.OrganizationID, addedRoles, []string{}) + addedRoles := append(orgRoles, rbac.ScopedRoleOrgMember(arg.OrganizationID)) + err = q.canAssignRoles(ctx, &arg.OrganizationID, addedRoles, []rbac.RoleIdentifier{}) if err != nil { return database.OrganizationMember{}, err } @@ -2559,8 +2663,8 @@ func (q *querier) InsertTemplateVersionWorkspaceTag(ctx context.Context, arg dat func (q *querier) InsertUser(ctx context.Context, arg database.InsertUserParams) (database.User, error) { // Always check if the assigned roles can actually be assigned by this actor. - impliedRoles := append([]string{rbac.RoleMember()}, arg.RBACRoles...) - err := q.canAssignRoles(ctx, nil, impliedRoles, []string{}) + impliedRoles := append([]rbac.RoleIdentifier{rbac.RoleMember()}, q.convertToDeploymentRoles(arg.RBACRoles)...) + err := q.canAssignRoles(ctx, nil, impliedRoles, []rbac.RoleIdentifier{}) if err != nil { return database.User{}, err } @@ -2740,6 +2844,10 @@ func (q *querier) ListWorkspaceAgentPortShares(ctx context.Context, workspaceID return q.db.ListWorkspaceAgentPortShares(ctx, workspaceID) } +func (q *querier) OrganizationMembers(ctx context.Context, arg database.OrganizationMembersParams) ([]database.OrganizationMembersRow, error) { + return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.OrganizationMembers)(ctx, arg) +} + func (q *querier) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx context.Context, templateID uuid.UUID) error { template, err := q.db.GetTemplateByID(ctx, templateID) if err != nil { @@ -2839,17 +2947,30 @@ func (q *querier) UpdateInactiveUsersToDormant(ctx context.Context, lastSeenAfte func (q *querier) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemberRolesParams) (database.OrganizationMember, error) { // Authorized fetch will check that the actor has read access to the org member since the org member is returned. - member, err := q.GetOrganizationMemberByUserID(ctx, database.GetOrganizationMemberByUserIDParams{ + member, err := database.ExpectOne(q.OrganizationMembers(ctx, database.OrganizationMembersParams{ OrganizationID: arg.OrgID, UserID: arg.UserID, - }) + })) + if err != nil { + return database.OrganizationMember{}, err + } + + originalRoles, err := q.convertToOrganizationRoles(member.OrganizationMember.OrganizationID, member.OrganizationMember.Roles) + if err != nil { + return database.OrganizationMember{}, xerrors.Errorf("convert original roles: %w", err) + } + + // The 'rbac' package expects role names to be scoped. + // Convert the argument roles for validation. + scopedGranted, err := q.convertToOrganizationRoles(arg.OrgID, arg.GrantedRoles) if err != nil { return database.OrganizationMember{}, err } // The org member role is always implied. - impliedTypes := append(arg.GrantedRoles, rbac.RoleOrgMember(arg.OrgID)) - added, removed := rbac.ChangeRoleSet(member.Roles, impliedTypes) + impliedTypes := append(scopedGranted, rbac.ScopedRoleOrgMember(arg.OrgID)) + + added, removed := rbac.ChangeRoleSet(originalRoles, impliedTypes) err = q.canAssignRoles(ctx, &arg.OrgID, added, removed) if err != nil { return database.OrganizationMember{}, err @@ -3190,9 +3311,9 @@ func (q *querier) UpdateUserRoles(ctx context.Context, arg database.UpdateUserRo } // The member role is always implied. - impliedTypes := append(arg.GrantedRoles, rbac.RoleMember()) + impliedTypes := append(q.convertToDeploymentRoles(arg.GrantedRoles), rbac.RoleMember()) // If the changeset is nothing, less rbac checks need to be done. - added, removed := rbac.ChangeRoleSet(user.RBACRoles, impliedTypes) + added, removed := rbac.ChangeRoleSet(q.convertToDeploymentRoles(user.RBACRoles), impliedTypes) err = q.canAssignRoles(ctx, nil, added, removed) if err != nil { return database.User{}, err @@ -3436,18 +3557,31 @@ func (q *querier) UpsertCustomRole(ctx context.Context, arg database.UpsertCusto return database.CustomRole{}, NoActorError } - // TODO: If this is an org role, check the org assign role type. - if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignRole); err != nil { - return database.CustomRole{}, err + // Org and site role upsert share the same query. So switch the assertion based on the org uuid. + if arg.OrganizationID.UUID != uuid.Nil { + if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignOrgRole.InOrg(arg.OrganizationID.UUID)); err != nil { + return database.CustomRole{}, err + } + } else { + if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceAssignRole); err != nil { + return database.CustomRole{}, err + } + } + + if arg.OrganizationID.UUID == uuid.Nil && len(arg.OrgPermissions) > 0 { + return database.CustomRole{}, xerrors.Errorf("organization permissions require specifying an organization id") } - // There is quite a bit of validation we should do here. First, let's make sure the json data is correct. + // There is quite a bit of validation we should do here. + // The rbac.Role has a 'Valid()' function on it that will do a lot + // of checks. rbacRole, err := rolestore.ConvertDBRole(database.CustomRole{ Name: arg.Name, DisplayName: arg.DisplayName, SitePermissions: arg.SitePermissions, OrgPermissions: arg.OrgPermissions, UserPermissions: arg.UserPermissions, + OrganizationID: arg.OrganizationID, }) if err != nil { return database.CustomRole{}, xerrors.Errorf("invalid args: %w", err) diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index 9507e1b83c00e..3b663d3fa9561 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -13,7 +13,9 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" + "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" @@ -80,7 +82,7 @@ func TestInTX(t *testing.T) { }, slog.Make(), coderdtest.AccessControlStorePointer()) actor := rbac.Subject{ ID: uuid.NewString(), - Roles: rbac.RoleNames{rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleOwner()}, Groups: []string{}, Scope: rbac.ScopeAll, } @@ -134,7 +136,7 @@ func TestDBAuthzRecursive(t *testing.T) { }, slog.Make(), coderdtest.AccessControlStorePointer()) actor := rbac.Subject{ ID: uuid.NewString(), - Roles: rbac.RoleNames{rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleOwner()}, Groups: []string{}, Scope: rbac.ScopeAll, } @@ -261,7 +263,7 @@ func (s *MethodTestSuite) TestAuditLogs() { _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) _ = dbgen.AuditLog(s.T(), db, database.AuditLog{}) check.Args(database.GetAuditLogsOffsetParams{ - Limit: 10, + LimitOpt: 10, }).Asserts(rbac.ResourceAuditLog, policy.ActionRead) })) } @@ -312,11 +314,19 @@ func (s *MethodTestSuite) TestGroup() { Name: g.Name, }).Asserts(g, policy.ActionRead).Returns(g) })) - s.Run("GetGroupMembers", s.Subtest(func(db database.Store, check *expects) { + s.Run("GetGroupMembersByGroupID", s.Subtest(func(db database.Store, check *expects) { g := dbgen.Group(s.T(), db, database.Group{}) _ = dbgen.GroupMember(s.T(), db, database.GroupMember{}) check.Args(g.ID).Asserts(g, policy.ActionRead) })) + s.Run("GetGroupMembers", s.Subtest(func(db database.Store, check *expects) { + _ = dbgen.GroupMember(s.T(), db, database.GroupMember{}) + check.Asserts(rbac.ResourceSystem, policy.ActionRead) + })) + s.Run("GetGroups", s.Subtest(func(db database.Store, check *expects) { + _ = dbgen.Group(s.T(), db, database.Group{}) + check.Asserts(rbac.ResourceSystem, policy.ActionRead) + })) s.Run("GetGroupsByOrganizationAndUserID", s.Subtest(func(db database.Store, check *expects) { g := dbgen.Group(s.T(), db, database.Group{}) gm := dbgen.GroupMember(s.T(), db, database.GroupMember{GroupID: g.ID}) @@ -594,19 +604,6 @@ func (s *MethodTestSuite) TestOrganization() { check.Args([]uuid.UUID{ma.UserID, mb.UserID}). Asserts(rbac.ResourceUserObject(ma.UserID), policy.ActionRead, rbac.ResourceUserObject(mb.UserID), policy.ActionRead) })) - s.Run("GetOrganizationMemberByUserID", s.Subtest(func(db database.Store, check *expects) { - mem := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{}) - check.Args(database.GetOrganizationMemberByUserIDParams{ - OrganizationID: mem.OrganizationID, - UserID: mem.UserID, - }).Asserts(mem, policy.ActionRead).Returns(mem) - })) - s.Run("GetOrganizationMembershipsByUserID", s.Subtest(func(db database.Store, check *expects) { - u := dbgen.User(s.T(), db, database.User{}) - a := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{UserID: u.ID}) - b := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{UserID: u.ID}) - check.Args(u.ID).Asserts(a, policy.ActionRead, b, policy.ActionRead).Returns(slice.New(a, b)) - })) s.Run("GetOrganizations", s.Subtest(func(db database.Store, check *expects) { def, _ := db.GetDefaultOrganization(context.Background()) a := dbgen.Organization(s.T(), db, database.Organization{}) @@ -634,11 +631,28 @@ func (s *MethodTestSuite) TestOrganization() { check.Args(database.InsertOrganizationMemberParams{ OrganizationID: o.ID, UserID: u.ID, - Roles: []string{rbac.RoleOrgAdmin(o.ID)}, + Roles: []string{codersdk.RoleOrganizationAdmin}, }).Asserts( - rbac.ResourceAssignRole.InOrg(o.ID), policy.ActionAssign, + rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionAssign, rbac.ResourceOrganizationMember.InOrg(o.ID).WithID(u.ID), policy.ActionCreate) })) + s.Run("DeleteOrganizationMember", s.Subtest(func(db database.Store, check *expects) { + o := dbgen.Organization(s.T(), db, database.Organization{}) + u := dbgen.User(s.T(), db, database.User{}) + member := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{UserID: u.ID, OrganizationID: o.ID}) + + check.Args(database.DeleteOrganizationMemberParams{ + OrganizationID: o.ID, + UserID: u.ID, + }).Asserts( + // Reads the org member before it tries to delete it + member, policy.ActionRead, + member, policy.ActionDelete). + // SQL Filter returns a 404 + WithNotAuthorized("no rows"). + WithCancelled("no rows"). + Errors(sql.ErrNoRows) + })) s.Run("UpdateOrganization", s.Subtest(func(db database.Store, check *expects) { o := dbgen.Organization(s.T(), db, database.Organization{ Name: "something-unique", @@ -656,13 +670,29 @@ func (s *MethodTestSuite) TestOrganization() { o.ID, ).Asserts(o, policy.ActionDelete) })) + s.Run("OrganizationMembers", s.Subtest(func(db database.Store, check *expects) { + o := dbgen.Organization(s.T(), db, database.Organization{}) + u := dbgen.User(s.T(), db, database.User{}) + mem := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{ + OrganizationID: o.ID, + UserID: u.ID, + Roles: []string{rbac.RoleOrgAdmin()}, + }) + + check.Args(database.OrganizationMembersParams{ + OrganizationID: uuid.UUID{}, + UserID: uuid.UUID{}, + }).Asserts( + mem, policy.ActionRead, + ) + })) s.Run("UpdateMemberRoles", s.Subtest(func(db database.Store, check *expects) { o := dbgen.Organization(s.T(), db, database.Organization{}) u := dbgen.User(s.T(), db, database.User{}) mem := dbgen.OrganizationMember(s.T(), db, database.OrganizationMember{ OrganizationID: o.ID, UserID: u.ID, - Roles: []string{rbac.RoleOrgAdmin(o.ID)}, + Roles: []string{codersdk.RoleOrganizationAdmin}, }) out := mem out.Roles = []string{} @@ -671,11 +701,14 @@ func (s *MethodTestSuite) TestOrganization() { GrantedRoles: []string{}, UserID: u.ID, OrgID: o.ID, - }).Asserts( - mem, policy.ActionRead, - rbac.ResourceAssignRole.InOrg(o.ID), policy.ActionAssign, // org-mem - rbac.ResourceAssignRole.InOrg(o.ID), policy.ActionDelete, // org-admin - ).Returns(out) + }). + WithNotAuthorized(sql.ErrNoRows.Error()). + WithCancelled(sql.ErrNoRows.Error()). + Asserts( + mem, policy.ActionRead, + rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionAssign, // org-mem + rbac.ResourceAssignOrgRole.InOrg(o.ID), policy.ActionDelete, // org-admin + ).Returns(out) })) } @@ -1089,6 +1122,7 @@ func (s *MethodTestSuite) TestUser() { ID: u.ID, Email: u.Email, Username: u.Username, + Name: u.Name, UpdatedAt: u.UpdatedAt, }).Asserts(u, policy.ActionUpdatePersonal).Returns(u) })) @@ -1177,11 +1211,11 @@ func (s *MethodTestSuite) TestUser() { }).Asserts(rbac.ResourceUserObject(link.UserID), policy.ActionUpdatePersonal).Returns(link) })) s.Run("UpdateUserRoles", s.Subtest(func(db database.Store, check *expects) { - u := dbgen.User(s.T(), db, database.User{RBACRoles: []string{rbac.RoleTemplateAdmin()}}) + u := dbgen.User(s.T(), db, database.User{RBACRoles: []string{codersdk.RoleTemplateAdmin}}) o := u - o.RBACRoles = []string{rbac.RoleUserAdmin()} + o.RBACRoles = []string{codersdk.RoleUserAdmin} check.Args(database.UpdateUserRolesParams{ - GrantedRoles: []string{rbac.RoleUserAdmin()}, + GrantedRoles: []string{codersdk.RoleUserAdmin}, ID: u.ID, }).Asserts( u, policy.ActionRead, @@ -1202,22 +1236,22 @@ func (s *MethodTestSuite) TestUser() { check.Args(database.UpsertCustomRoleParams{ Name: "test", DisplayName: "Test Name", - SitePermissions: []byte(`[]`), - OrgPermissions: []byte(`{}`), - UserPermissions: []byte(`[]`), + SitePermissions: nil, + OrgPermissions: nil, + UserPermissions: nil, }).Asserts(rbac.ResourceAssignRole, policy.ActionCreate) })) s.Run("SitePermissions/UpsertCustomRole", s.Subtest(func(db database.Store, check *expects) { check.Args(database.UpsertCustomRoleParams{ Name: "test", DisplayName: "Test Name", - SitePermissions: must(json.Marshal(rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceTemplate.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete, policy.ActionViewInsights}, - }))), - OrgPermissions: []byte(`{}`), - UserPermissions: must(json.Marshal(rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }))), + SitePermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead, codersdk.ActionUpdate, codersdk.ActionDelete, codersdk.ActionViewInsights}, + }), convertSDKPerm), + OrgPermissions: nil, + UserPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, + }), convertSDKPerm), }).Asserts( // First check rbac.ResourceAssignRole, policy.ActionCreate, @@ -1234,20 +1268,22 @@ func (s *MethodTestSuite) TestUser() { s.Run("OrgPermissions/UpsertCustomRole", s.Subtest(func(db database.Store, check *expects) { orgID := uuid.New() check.Args(database.UpsertCustomRoleParams{ - Name: "test", - DisplayName: "Test Name", - SitePermissions: []byte(`[]`), - OrgPermissions: must(json.Marshal(map[string][]rbac.Permission{ - orgID.String(): rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceTemplate.Type: {policy.ActionCreate, policy.ActionRead}, - }), - })), - UserPermissions: must(json.Marshal(rbac.Permissions(map[string][]policy.Action{ - rbac.ResourceWorkspace.Type: {policy.ActionRead}, - }))), + Name: "test", + DisplayName: "Test Name", + OrganizationID: uuid.NullUUID{ + UUID: orgID, + Valid: true, + }, + SitePermissions: nil, + OrgPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceTemplate: {codersdk.ActionCreate, codersdk.ActionRead}, + }), convertSDKPerm), + UserPermissions: db2sdk.List(codersdk.CreatePermissions(map[codersdk.RBACResource][]codersdk.RBACAction{ + codersdk.ResourceWorkspace: {codersdk.ActionRead}, + }), convertSDKPerm), }).Asserts( // First check - rbac.ResourceAssignRole, policy.ActionCreate, + rbac.ResourceAssignOrgRole.InOrg(orgID), policy.ActionCreate, // Escalation checks rbac.ResourceTemplate.InOrg(orgID), policy.ActionCreate, rbac.ResourceTemplate.InOrg(orgID), policy.ActionRead, @@ -2431,6 +2467,32 @@ func (s *MethodTestSuite) TestSystemFunctions() { AgentID: uuid.New(), }).Asserts(tpl, policy.ActionCreate) })) + s.Run("AcquireNotificationMessages", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.AcquireNotificationMessagesParams{}).Asserts(rbac.ResourceSystem, policy.ActionUpdate) + })) + s.Run("BulkMarkNotificationMessagesFailed", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.BulkMarkNotificationMessagesFailedParams{}).Asserts(rbac.ResourceSystem, policy.ActionUpdate) + })) + s.Run("BulkMarkNotificationMessagesSent", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.BulkMarkNotificationMessagesSentParams{}).Asserts(rbac.ResourceSystem, policy.ActionUpdate) + })) + s.Run("DeleteOldNotificationMessages", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args().Asserts(rbac.ResourceSystem, policy.ActionDelete) + })) + s.Run("EnqueueNotificationMessage", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.EnqueueNotificationMessageParams{ + Method: database.NotificationMethodWebhook, + }).Asserts(rbac.ResourceSystem, policy.ActionCreate) + })) + s.Run("FetchNewMessageMetadata", s.Subtest(func(db database.Store, check *expects) { + // TODO: update this test once we have a specific role for notifications + check.Args(database.FetchNewMessageMetadataParams{}).Asserts(rbac.ResourceSystem, policy.ActionRead) + })) } func (s *MethodTestSuite) TestOAuth2ProviderApps() { diff --git a/coderd/database/dbauthz/setup_test.go b/coderd/database/dbauthz/setup_test.go index 95d8b70a42b40..4df38a3ca4b98 100644 --- a/coderd/database/dbauthz/setup_test.go +++ b/coderd/database/dbauthz/setup_test.go @@ -123,7 +123,7 @@ func (s *MethodTestSuite) Subtest(testCaseF func(db database.Store, check *expec az := dbauthz.New(db, rec, slog.Make(), coderdtest.AccessControlStorePointer()) actor := rbac.Subject{ ID: testActorID.String(), - Roles: rbac.RoleNames{rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleOwner()}, Groups: []string{}, Scope: rbac.ScopeAll, } @@ -157,7 +157,7 @@ func (s *MethodTestSuite) Subtest(testCaseF func(db database.Store, check *expec if len(testCase.assertions) > 0 { // Only run these tests if we know the underlying call makes // rbac assertions. - s.NotAuthorizedErrorTest(ctx, fakeAuthorizer, callMethod) + s.NotAuthorizedErrorTest(ctx, fakeAuthorizer, testCase, callMethod) } if len(testCase.assertions) > 0 || @@ -230,7 +230,7 @@ func (s *MethodTestSuite) NoActorErrorTest(callMethod func(ctx context.Context) // NotAuthorizedErrorTest runs the given method with an authorizer that will fail authz. // Asserts that the error returned is a NotAuthorizedError. -func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderdtest.FakeAuthorizer, callMethod func(ctx context.Context) ([]reflect.Value, error)) { +func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderdtest.FakeAuthorizer, testCase expects, callMethod func(ctx context.Context) ([]reflect.Value, error)) { s.Run("NotAuthorized", func() { az.AlwaysReturn = rbac.ForbiddenWithInternal(xerrors.New("Always fail authz"), rbac.Subject{}, "", rbac.Object{}, nil) @@ -242,9 +242,14 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd // This is unfortunate, but if we are using `Filter` the error returned will be nil. So filter out // any case where the error is nil and the response is an empty slice. if err != nil || !hasEmptySliceResponse(resp) { - s.ErrorContainsf(err, "unauthorized", "error string should have a good message") - s.Errorf(err, "method should an error with disallow authz") - s.ErrorAs(err, &dbauthz.NotAuthorizedError{}, "error should be NotAuthorizedError") + // Expect the default error + if testCase.notAuthorizedExpect == "" { + s.ErrorContainsf(err, "unauthorized", "error string should have a good message") + s.Errorf(err, "method should an error with disallow authz") + s.ErrorAs(err, &dbauthz.NotAuthorizedError{}, "error should be NotAuthorizedError") + } else { + s.ErrorContains(err, testCase.notAuthorizedExpect) + } } }) @@ -263,8 +268,12 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd // This is unfortunate, but if we are using `Filter` the error returned will be nil. So filter out // any case where the error is nil and the response is an empty slice. if err != nil || !hasEmptySliceResponse(resp) { - s.Errorf(err, "method should an error with cancellation") - s.ErrorIsf(err, context.Canceled, "error should match context.Canceled") + if testCase.cancelledCtxExpect == "" { + s.Errorf(err, "method should an error with cancellation") + s.ErrorIsf(err, context.Canceled, "error should match context.Canceled") + } else { + s.ErrorContains(err, testCase.cancelledCtxExpect) + } } }) } @@ -308,6 +317,13 @@ type expects struct { // outputs is optional. Can assert non-error return values. outputs []reflect.Value err error + + // Optional override of the default error checks. + // By default, we search for the expected error strings. + // If these strings are present, these strings will be searched + // instead. + notAuthorizedExpect string + cancelledCtxExpect string } // Asserts is required. Asserts the RBAC authorize calls that should be made. @@ -338,6 +354,16 @@ func (m *expects) Errors(err error) *expects { return m } +func (m *expects) WithNotAuthorized(contains string) *expects { + m.notAuthorizedExpect = contains + return m +} + +func (m *expects) WithCancelled(contains string) *expects { + m.cancelledCtxExpect = contains + return m +} + // AssertRBAC contains the object and actions to be asserted. type AssertRBAC struct { Object rbac.Object diff --git a/coderd/database/dbfake/dbfake.go b/coderd/database/dbfake/dbfake.go index 6cb2d94429eb1..4f9d6ddc5b28c 100644 --- a/coderd/database/dbfake/dbfake.go +++ b/coderd/database/dbfake/dbfake.go @@ -26,7 +26,7 @@ import ( var ownerCtx = dbauthz.As(context.Background(), rbac.Subject{ ID: "owner", - Roles: rbac.Roles(must(rbac.RoleNames{rbac.RoleOwner()}.Expand())), + Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleOwner()}.Expand())), Groups: []string{}, Scope: rbac.ExpandableScope(rbac.ScopeAll), }) diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go index be612abc333f9..d2b66e5d4b6df 100644 --- a/coderd/database/dbgen/dbgen.go +++ b/coderd/database/dbgen/dbgen.go @@ -33,7 +33,7 @@ import ( // genCtx is to give all generator functions permission if the db is a dbauthz db. var genCtx = dbauthz.As(context.Background(), rbac.Subject{ ID: "owner", - Roles: rbac.Roles(must(rbac.RoleNames{rbac.RoleOwner()}.Expand())), + Roles: rbac.Roles(must(rbac.RoleIdentifiers{rbac.RoleOwner()}.Expand())), Groups: []string{}, Scope: rbac.ExpandableScope(rbac.ScopeAll), }) @@ -289,6 +289,7 @@ func User(t testing.TB, db database.Store, orig database.User) database.User { ID: takeFirst(orig.ID, uuid.New()), Email: takeFirst(orig.Email, namesgenerator.GetRandomName(1)), Username: takeFirst(orig.Username, namesgenerator.GetRandomName(1)), + Name: takeFirst(orig.Name, namesgenerator.GetRandomName(1)), HashedPassword: takeFirstSlice(orig.HashedPassword, []byte(must(cryptorand.String(32)))), CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()), UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()), @@ -336,7 +337,9 @@ func Organization(t testing.TB, db database.Store, orig database.Organization) d org, err := db.InsertOrganization(genCtx, database.InsertOrganizationParams{ ID: takeFirst(orig.ID, uuid.New()), Name: takeFirst(orig.Name, namesgenerator.GetRandomName(1)), + DisplayName: takeFirst(orig.Name, namesgenerator.GetRandomName(1)), Description: takeFirst(orig.Description, namesgenerator.GetRandomName(1)), + Icon: takeFirst(orig.Icon, ""), CreatedAt: takeFirst(orig.CreatedAt, dbtime.Now()), UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()), }) @@ -823,9 +826,9 @@ func CustomRole(t testing.TB, db database.Store, seed database.CustomRole) datab Name: takeFirst(seed.Name, strings.ToLower(namesgenerator.GetRandomName(1))), DisplayName: namesgenerator.GetRandomName(1), OrganizationID: seed.OrganizationID, - SitePermissions: takeFirstSlice(seed.SitePermissions, []byte("[]")), - OrgPermissions: takeFirstSlice(seed.SitePermissions, []byte("{}")), - UserPermissions: takeFirstSlice(seed.SitePermissions, []byte("[]")), + SitePermissions: takeFirstSlice(seed.SitePermissions, []database.CustomRolePermission{}), + OrgPermissions: takeFirstSlice(seed.SitePermissions, []database.CustomRolePermission{}), + UserPermissions: takeFirstSlice(seed.SitePermissions, []database.CustomRolePermission{}), }) require.NoError(t, err, "insert custom role") return role diff --git a/coderd/database/dbgen/dbgen_test.go b/coderd/database/dbgen/dbgen_test.go index eaf5a0e764482..5f9c235f312db 100644 --- a/coderd/database/dbgen/dbgen_test.go +++ b/coderd/database/dbgen/dbgen_test.go @@ -19,7 +19,7 @@ func TestGenerator(t *testing.T) { t.Parallel() db := dbmem.New() _ = dbgen.AuditLog(t, db, database.AuditLog{}) - logs := must(db.GetAuditLogsOffset(context.Background(), database.GetAuditLogsOffsetParams{Limit: 1})) + logs := must(db.GetAuditLogsOffset(context.Background(), database.GetAuditLogsOffsetParams{LimitOpt: 1})) require.Len(t, logs, 1) }) @@ -105,7 +105,7 @@ func TestGenerator(t *testing.T) { exp := []database.User{u} dbgen.GroupMember(t, db, database.GroupMember{GroupID: g.ID, UserID: u.ID}) - require.Equal(t, exp, must(db.GetGroupMembers(context.Background(), g.ID))) + require.Equal(t, exp, must(db.GetGroupMembersByGroupID(context.Background(), g.ID))) }) t.Run("Organization", func(t *testing.T) { @@ -119,10 +119,10 @@ func TestGenerator(t *testing.T) { t.Parallel() db := dbmem.New() exp := dbgen.OrganizationMember(t, db, database.OrganizationMember{}) - require.Equal(t, exp, must(db.GetOrganizationMemberByUserID(context.Background(), database.GetOrganizationMemberByUserIDParams{ + require.Equal(t, exp, must(database.ExpectOne(db.OrganizationMembers(context.Background(), database.OrganizationMembersParams{ OrganizationID: exp.OrganizationID, UserID: exp.UserID, - }))) + }))).OrganizationMember) }) t.Run("Workspace", func(t *testing.T) { diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go index fe9b56e35ebdb..c37003f7cb96a 100644 --- a/coderd/database/dbmem/dbmem.go +++ b/coderd/database/dbmem/dbmem.go @@ -86,7 +86,9 @@ func New() database.Store { defaultOrg, err := q.InsertOrganization(context.Background(), database.InsertOrganizationParams{ ID: uuid.New(), Name: "first-organization", + DisplayName: "first-organization", Description: "Builtin default organization.", + Icon: "", CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), }) @@ -320,6 +322,7 @@ func convertUsers(users []database.User, count int64) []database.GetUsersRow { ID: u.ID, Email: u.Email, Username: u.Username, + Name: u.Name, HashedPassword: u.HashedPassword, CreatedAt: u.CreatedAt, UpdatedAt: u.UpdatedAt, @@ -904,6 +907,15 @@ func (*FakeQuerier) AcquireLock(_ context.Context, _ int64) error { return xerrors.New("AcquireLock must only be called within a transaction") } +func (*FakeQuerier) AcquireNotificationMessages(_ context.Context, arg database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { + err := validateDatabaseType(arg) + if err != nil { + return nil, err + } + // nolint:nilnil // Irrelevant. + return nil, nil +} + func (q *FakeQuerier) AcquireProvisionerJob(_ context.Context, arg database.AcquireProvisionerJobParams) (database.ProvisionerJob, error) { if err := validateDatabaseType(arg); err != nil { return database.ProvisionerJob{}, err @@ -1166,6 +1178,22 @@ func (q *FakeQuerier) BatchUpdateWorkspaceLastUsedAt(_ context.Context, arg data return nil } +func (*FakeQuerier) BulkMarkNotificationMessagesFailed(_ context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + err := validateDatabaseType(arg) + if err != nil { + return 0, err + } + return -1, nil +} + +func (*FakeQuerier) BulkMarkNotificationMessagesSent(_ context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) { + err := validateDatabaseType(arg) + if err != nil { + return 0, err + } + return -1, nil +} + func (*FakeQuerier) CleanTailnetCoordinators(_ context.Context) error { return ErrUnimplemented } @@ -1186,12 +1214,17 @@ func (q *FakeQuerier) CustomRoles(_ context.Context, arg database.CustomRolesPar for _, role := range q.data.customRoles { role := role if len(arg.LookupRoles) > 0 { - if !slices.ContainsFunc(arg.LookupRoles, func(s string) bool { - roleName := rbac.RoleName(role.Name, "") - if role.OrganizationID.UUID != uuid.Nil { - roleName = rbac.RoleName(role.Name, role.OrganizationID.UUID.String()) + if !slices.ContainsFunc(arg.LookupRoles, func(pair database.NameOrganizationPair) bool { + if pair.Name != role.Name { + return false + } + + if role.OrganizationID.Valid { + // Expect org match + return role.OrganizationID.UUID == pair.OrganizationID } - return strings.EqualFold(s, roleName) + // Expect no org + return pair.OrganizationID == uuid.Nil }) { continue } @@ -1496,6 +1529,10 @@ func (q *FakeQuerier) DeleteOAuth2ProviderAppTokensByAppAndUserID(_ context.Cont return nil } +func (*FakeQuerier) DeleteOldNotificationMessages(_ context.Context) error { + return nil +} + func (q *FakeQuerier) DeleteOldProvisionerDaemons(_ context.Context) error { q.mutex.Lock() defer q.mutex.Unlock() @@ -1625,6 +1662,24 @@ func (q *FakeQuerier) DeleteOrganization(_ context.Context, id uuid.UUID) error return sql.ErrNoRows } +func (q *FakeQuerier) DeleteOrganizationMember(_ context.Context, arg database.DeleteOrganizationMemberParams) error { + err := validateDatabaseType(arg) + if err != nil { + return err + } + + q.mutex.Lock() + defer q.mutex.Unlock() + + deleted := slices.DeleteFunc(q.data.organizationMembers, func(member database.OrganizationMember) bool { + return member.OrganizationID == arg.OrganizationID && member.UserID == arg.UserID + }) + if len(deleted) == 0 { + return sql.ErrNoRows + } + return nil +} + func (q *FakeQuerier) DeleteReplicasUpdatedBefore(_ context.Context, before time.Time) error { q.mutex.Lock() defer q.mutex.Unlock() @@ -1711,6 +1766,14 @@ func (q *FakeQuerier) DeleteWorkspaceAgentPortSharesByTemplate(_ context.Context return nil } +func (*FakeQuerier) EnqueueNotificationMessage(_ context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { + err := validateDatabaseType(arg) + if err != nil { + return database.NotificationMessage{}, err + } + return database.NotificationMessage{}, nil +} + func (q *FakeQuerier) FavoriteWorkspace(_ context.Context, arg uuid.UUID) error { err := validateDatabaseType(arg) if err != nil { @@ -1730,6 +1793,14 @@ func (q *FakeQuerier) FavoriteWorkspace(_ context.Context, arg uuid.UUID) error return nil } +func (*FakeQuerier) FetchNewMessageMetadata(_ context.Context, arg database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) { + err := validateDatabaseType(arg) + if err != nil { + return database.FetchNewMessageMetadataRow{}, err + } + return database.FetchNewMessageMetadataRow{}, nil +} + func (q *FakeQuerier) GetAPIKeyByID(_ context.Context, id string) (database.APIKey, error) { q.mutex.RLock() defer q.mutex.RUnlock() @@ -1894,12 +1965,20 @@ func (q *FakeQuerier) GetAuditLogsOffset(_ context.Context, arg database.GetAudi q.mutex.RLock() defer q.mutex.RUnlock() - logs := make([]database.GetAuditLogsOffsetRow, 0, arg.Limit) + if arg.LimitOpt == 0 { + // Default to 100 is set in the SQL query. + arg.LimitOpt = 100 + } + + logs := make([]database.GetAuditLogsOffsetRow, 0, arg.LimitOpt) // q.auditLogs are already sorted by time DESC, so no need to sort after the fact. for _, alog := range q.auditLogs { - if arg.Offset > 0 { - arg.Offset-- + if arg.OffsetOpt > 0 { + arg.OffsetOpt-- + continue + } + if arg.OrganizationID != uuid.Nil && arg.OrganizationID != alog.OrganizationID { continue } if arg.Action != "" && !strings.Contains(string(alog.Action), arg.Action) { @@ -1944,29 +2023,36 @@ func (q *FakeQuerier) GetAuditLogsOffset(_ context.Context, arg database.GetAudi userValid := err == nil logs = append(logs, database.GetAuditLogsOffsetRow{ - ID: alog.ID, - RequestID: alog.RequestID, - OrganizationID: alog.OrganizationID, - Ip: alog.Ip, - UserAgent: alog.UserAgent, - ResourceType: alog.ResourceType, - ResourceID: alog.ResourceID, - ResourceTarget: alog.ResourceTarget, - ResourceIcon: alog.ResourceIcon, - Action: alog.Action, - Diff: alog.Diff, - StatusCode: alog.StatusCode, - AdditionalFields: alog.AdditionalFields, - UserID: alog.UserID, - UserUsername: sql.NullString{String: user.Username, Valid: userValid}, - UserEmail: sql.NullString{String: user.Email, Valid: userValid}, - UserCreatedAt: sql.NullTime{Time: user.CreatedAt, Valid: userValid}, - UserStatus: database.NullUserStatus{UserStatus: user.Status, Valid: userValid}, - UserRoles: user.RBACRoles, - Count: 0, + ID: alog.ID, + RequestID: alog.RequestID, + OrganizationID: alog.OrganizationID, + Ip: alog.Ip, + UserAgent: alog.UserAgent, + ResourceType: alog.ResourceType, + ResourceID: alog.ResourceID, + ResourceTarget: alog.ResourceTarget, + ResourceIcon: alog.ResourceIcon, + Action: alog.Action, + Diff: alog.Diff, + StatusCode: alog.StatusCode, + AdditionalFields: alog.AdditionalFields, + UserID: alog.UserID, + UserUsername: sql.NullString{String: user.Username, Valid: userValid}, + UserName: sql.NullString{String: user.Name, Valid: userValid}, + UserEmail: sql.NullString{String: user.Email, Valid: userValid}, + UserCreatedAt: sql.NullTime{Time: user.CreatedAt, Valid: userValid}, + UserUpdatedAt: sql.NullTime{Time: user.UpdatedAt, Valid: userValid}, + UserLastSeenAt: sql.NullTime{Time: user.LastSeenAt, Valid: userValid}, + UserLoginType: database.NullLoginType{LoginType: user.LoginType, Valid: userValid}, + UserDeleted: sql.NullBool{Bool: user.Deleted, Valid: userValid}, + UserThemePreference: sql.NullString{String: user.ThemePreference, Valid: userValid}, + UserQuietHoursSchedule: sql.NullString{String: user.QuietHoursSchedule, Valid: userValid}, + UserStatus: database.NullUserStatus{UserStatus: user.Status, Valid: userValid}, + UserRoles: user.RBACRoles, + Count: 0, }) - if len(logs) >= int(arg.Limit) { + if len(logs) >= int(arg.LimitOpt) { break } } @@ -1997,7 +2083,9 @@ func (q *FakeQuerier) GetAuthorizationUserRoles(_ context.Context, userID uuid.U for _, mem := range q.organizationMembers { if mem.UserID == userID { - roles = append(roles, mem.Roles...) + for _, orgRole := range mem.Roles { + roles = append(roles, orgRole+":"+mem.OrganizationID.String()) + } roles = append(roles, "organization-member:"+mem.OrganizationID.String()) } } @@ -2336,7 +2424,16 @@ func (q *FakeQuerier) GetGroupByOrgAndName(_ context.Context, arg database.GetGr return database.Group{}, sql.ErrNoRows } -func (q *FakeQuerier) GetGroupMembers(_ context.Context, id uuid.UUID) ([]database.User, error) { +func (q *FakeQuerier) GetGroupMembers(_ context.Context) ([]database.GroupMember, error) { + q.mutex.RLock() + defer q.mutex.RUnlock() + + out := make([]database.GroupMember, len(q.groupMembers)) + copy(out, q.groupMembers) + return out, nil +} + +func (q *FakeQuerier) GetGroupMembersByGroupID(_ context.Context, id uuid.UUID) ([]database.User, error) { q.mutex.RLock() defer q.mutex.RUnlock() @@ -2365,6 +2462,15 @@ func (q *FakeQuerier) GetGroupMembers(_ context.Context, id uuid.UUID) ([]databa return users, nil } +func (q *FakeQuerier) GetGroups(_ context.Context) ([]database.Group, error) { + q.mutex.RLock() + defer q.mutex.RUnlock() + + out := make([]database.Group, len(q.groups)) + copy(out, q.groups) + return out, nil +} + func (q *FakeQuerier) GetGroupsByOrganizationAndUserID(_ context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { err := validateDatabaseType(arg) if err != nil { @@ -2751,41 +2857,6 @@ func (q *FakeQuerier) GetOrganizationIDsByMemberIDs(_ context.Context, ids []uui return getOrganizationIDsByMemberIDRows, nil } -func (q *FakeQuerier) GetOrganizationMemberByUserID(_ context.Context, arg database.GetOrganizationMemberByUserIDParams) (database.OrganizationMember, error) { - if err := validateDatabaseType(arg); err != nil { - return database.OrganizationMember{}, err - } - - q.mutex.RLock() - defer q.mutex.RUnlock() - - for _, organizationMember := range q.organizationMembers { - if organizationMember.OrganizationID != arg.OrganizationID { - continue - } - if organizationMember.UserID != arg.UserID { - continue - } - return organizationMember, nil - } - return database.OrganizationMember{}, sql.ErrNoRows -} - -func (q *FakeQuerier) GetOrganizationMembershipsByUserID(_ context.Context, userID uuid.UUID) ([]database.OrganizationMember, error) { - q.mutex.RLock() - defer q.mutex.RUnlock() - - var memberships []database.OrganizationMember - for _, organizationMember := range q.organizationMembers { - mem := organizationMember - if mem.UserID != userID { - continue - } - memberships = append(memberships, mem) - } - return memberships, nil -} - func (q *FakeQuerier) GetOrganizations(_ context.Context) ([]database.Organization, error) { q.mutex.RLock() defer q.mutex.RUnlock() @@ -4800,7 +4871,7 @@ func (q *FakeQuerier) GetUsers(_ context.Context, params database.GetUsersParams users = usersFilteredByStatus } - if len(params.RbacRole) > 0 && !slice.Contains(params.RbacRole, rbac.RoleMember()) { + if len(params.RbacRole) > 0 && !slice.Contains(params.RbacRole, rbac.RoleMember().String()) { usersFilteredByRole := make([]database.User, 0, len(users)) for i, user := range users { if slice.OverlapCompare(params.RbacRole, user.RBACRoles, strings.EqualFold) { @@ -6177,11 +6248,14 @@ func (q *FakeQuerier) InsertOrganization(_ context.Context, arg database.InsertO defer q.mutex.Unlock() organization := database.Organization{ - ID: arg.ID, - Name: arg.Name, - CreatedAt: arg.CreatedAt, - UpdatedAt: arg.UpdatedAt, - IsDefault: len(q.organizations) == 0, + ID: arg.ID, + Name: arg.Name, + DisplayName: arg.DisplayName, + Description: arg.Description, + Icon: arg.Icon, + CreatedAt: arg.CreatedAt, + UpdatedAt: arg.UpdatedAt, + IsDefault: len(q.organizations) == 0, } q.organizations = append(q.organizations, organization) return organization, nil @@ -6195,6 +6269,20 @@ func (q *FakeQuerier) InsertOrganizationMember(_ context.Context, arg database.I q.mutex.Lock() defer q.mutex.Unlock() + if slices.IndexFunc(q.data.organizationMembers, func(member database.OrganizationMember) bool { + return member.OrganizationID == arg.OrganizationID && member.UserID == arg.UserID + }) >= 0 { + // Error pulled from a live db error + return database.OrganizationMember{}, &pq.Error{ + Severity: "ERROR", + Code: "23505", + Message: "duplicate key value violates unique constraint \"organization_members_pkey\"", + Detail: "Key (organization_id, user_id)=(f7de1f4e-5833-4410-a28d-0a105f96003f, 36052a80-4a7f-4998-a7ca-44cefa608d3e) already exists.", + Table: "organization_members", + Constraint: "organization_members_pkey", + } + } + //nolint:gosimple organizationMember := database.OrganizationMember{ OrganizationID: arg.OrganizationID, @@ -6458,6 +6546,7 @@ func (q *FakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam CreatedAt: arg.CreatedAt, UpdatedAt: arg.UpdatedAt, Username: arg.Username, + Name: arg.Name, Status: database.UserStatusDormant, RBACRoles: arg.RBACRoles, LoginType: arg.LoginType, @@ -6953,6 +7042,34 @@ func (q *FakeQuerier) ListWorkspaceAgentPortShares(_ context.Context, workspaceI return shares, nil } +func (q *FakeQuerier) OrganizationMembers(_ context.Context, arg database.OrganizationMembersParams) ([]database.OrganizationMembersRow, error) { + if err := validateDatabaseType(arg); err != nil { + return []database.OrganizationMembersRow{}, err + } + + q.mutex.RLock() + defer q.mutex.RUnlock() + + tmp := make([]database.OrganizationMembersRow, 0) + for _, organizationMember := range q.organizationMembers { + if arg.OrganizationID != uuid.Nil && organizationMember.OrganizationID != arg.OrganizationID { + continue + } + + if arg.UserID != uuid.Nil && organizationMember.UserID != arg.UserID { + continue + } + + organizationMember := organizationMember + user, _ := q.getUserByIDNoLock(organizationMember.UserID) + tmp = append(tmp, database.OrganizationMembersRow{ + OrganizationMember: organizationMember, + Username: user.Username, + }) + } + return tmp, nil +} + func (q *FakeQuerier) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(_ context.Context, templateID uuid.UUID) error { err := validateDatabaseType(templateID) if err != nil { @@ -7322,6 +7439,9 @@ func (q *FakeQuerier) UpdateOrganization(_ context.Context, arg database.UpdateO for i, org := range q.organizations { if org.ID == arg.ID { org.Name = arg.Name + org.DisplayName = arg.DisplayName + org.Description = arg.Description + org.Icon = arg.Icon q.organizations[i] = org return org, nil } @@ -8403,6 +8523,7 @@ func (q *FakeQuerier) UpsertCustomRole(_ context.Context, arg database.UpsertCus } role := database.CustomRole{ + ID: uuid.New(), Name: arg.Name, DisplayName: arg.DisplayName, OrganizationID: arg.OrganizationID, diff --git a/coderd/database/dbmetrics/dbmetrics.go b/coderd/database/dbmetrics/dbmetrics.go index aff562fcdb89f..fbaf7d4fc0b4e 100644 --- a/coderd/database/dbmetrics/dbmetrics.go +++ b/coderd/database/dbmetrics/dbmetrics.go @@ -88,6 +88,13 @@ func (m metricsStore) AcquireLock(ctx context.Context, pgAdvisoryXactLock int64) return err } +func (m metricsStore) AcquireNotificationMessages(ctx context.Context, arg database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { + start := time.Now() + r0, r1 := m.s.AcquireNotificationMessages(ctx, arg) + m.queryLatencies.WithLabelValues("AcquireNotificationMessages").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) AcquireProvisionerJob(ctx context.Context, arg database.AcquireProvisionerJobParams) (database.ProvisionerJob, error) { start := time.Now() provisionerJob, err := m.s.AcquireProvisionerJob(ctx, arg) @@ -123,6 +130,20 @@ func (m metricsStore) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg da return r0 } +func (m metricsStore) BulkMarkNotificationMessagesFailed(ctx context.Context, arg database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + start := time.Now() + r0, r1 := m.s.BulkMarkNotificationMessagesFailed(ctx, arg) + m.queryLatencies.WithLabelValues("BulkMarkNotificationMessagesFailed").Observe(time.Since(start).Seconds()) + return r0, r1 +} + +func (m metricsStore) BulkMarkNotificationMessagesSent(ctx context.Context, arg database.BulkMarkNotificationMessagesSentParams) (int64, error) { + start := time.Now() + r0, r1 := m.s.BulkMarkNotificationMessagesSent(ctx, arg) + m.queryLatencies.WithLabelValues("BulkMarkNotificationMessagesSent").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) CleanTailnetCoordinators(ctx context.Context) error { start := time.Now() err := m.s.CleanTailnetCoordinators(ctx) @@ -263,6 +284,13 @@ func (m metricsStore) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Co return r0 } +func (m metricsStore) DeleteOldNotificationMessages(ctx context.Context) error { + start := time.Now() + r0 := m.s.DeleteOldNotificationMessages(ctx) + m.queryLatencies.WithLabelValues("DeleteOldNotificationMessages").Observe(time.Since(start).Seconds()) + return r0 +} + func (m metricsStore) DeleteOldProvisionerDaemons(ctx context.Context) error { start := time.Now() r0 := m.s.DeleteOldProvisionerDaemons(ctx) @@ -291,6 +319,13 @@ func (m metricsStore) DeleteOrganization(ctx context.Context, id uuid.UUID) erro return r0 } +func (m metricsStore) DeleteOrganizationMember(ctx context.Context, arg database.DeleteOrganizationMemberParams) error { + start := time.Now() + r0 := m.s.DeleteOrganizationMember(ctx, arg) + m.queryLatencies.WithLabelValues("DeleteOrganizationMember").Observe(time.Since(start).Seconds()) + return r0 +} + func (m metricsStore) DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error { start := time.Now() err := m.s.DeleteReplicasUpdatedBefore(ctx, updatedAt) @@ -347,6 +382,13 @@ func (m metricsStore) DeleteWorkspaceAgentPortSharesByTemplate(ctx context.Conte return r0 } +func (m metricsStore) EnqueueNotificationMessage(ctx context.Context, arg database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { + start := time.Now() + r0, r1 := m.s.EnqueueNotificationMessage(ctx, arg) + m.queryLatencies.WithLabelValues("EnqueueNotificationMessage").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) FavoriteWorkspace(ctx context.Context, arg uuid.UUID) error { start := time.Now() r0 := m.s.FavoriteWorkspace(ctx, arg) @@ -354,6 +396,13 @@ func (m metricsStore) FavoriteWorkspace(ctx context.Context, arg uuid.UUID) erro return r0 } +func (m metricsStore) FetchNewMessageMetadata(ctx context.Context, arg database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) { + start := time.Now() + r0, r1 := m.s.FetchNewMessageMetadata(ctx, arg) + m.queryLatencies.WithLabelValues("FetchNewMessageMetadata").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) GetAPIKeyByID(ctx context.Context, id string) (database.APIKey, error) { start := time.Now() apiKey, err := m.s.GetAPIKeyByID(ctx, id) @@ -578,13 +627,27 @@ func (m metricsStore) GetGroupByOrgAndName(ctx context.Context, arg database.Get return group, err } -func (m metricsStore) GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([]database.User, error) { +func (m metricsStore) GetGroupMembers(ctx context.Context) ([]database.GroupMember, error) { start := time.Now() - users, err := m.s.GetGroupMembers(ctx, groupID) + r0, r1 := m.s.GetGroupMembers(ctx) m.queryLatencies.WithLabelValues("GetGroupMembers").Observe(time.Since(start).Seconds()) + return r0, r1 +} + +func (m metricsStore) GetGroupMembersByGroupID(ctx context.Context, groupID uuid.UUID) ([]database.User, error) { + start := time.Now() + users, err := m.s.GetGroupMembersByGroupID(ctx, groupID) + m.queryLatencies.WithLabelValues("GetGroupMembersByGroupID").Observe(time.Since(start).Seconds()) return users, err } +func (m metricsStore) GetGroups(ctx context.Context) ([]database.Group, error) { + start := time.Now() + r0, r1 := m.s.GetGroups(ctx) + m.queryLatencies.WithLabelValues("GetGroups").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) GetGroupsByOrganizationAndUserID(ctx context.Context, arg database.GetGroupsByOrganizationAndUserIDParams) ([]database.Group, error) { start := time.Now() r0, r1 := m.s.GetGroupsByOrganizationAndUserID(ctx, arg) @@ -760,20 +823,6 @@ func (m metricsStore) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []u return organizations, err } -func (m metricsStore) GetOrganizationMemberByUserID(ctx context.Context, arg database.GetOrganizationMemberByUserIDParams) (database.OrganizationMember, error) { - start := time.Now() - member, err := m.s.GetOrganizationMemberByUserID(ctx, arg) - m.queryLatencies.WithLabelValues("GetOrganizationMemberByUserID").Observe(time.Since(start).Seconds()) - return member, err -} - -func (m metricsStore) GetOrganizationMembershipsByUserID(ctx context.Context, userID uuid.UUID) ([]database.OrganizationMember, error) { - start := time.Now() - memberships, err := m.s.GetOrganizationMembershipsByUserID(ctx, userID) - m.queryLatencies.WithLabelValues("GetOrganizationMembershipsByUserID").Observe(time.Since(start).Seconds()) - return memberships, err -} - func (m metricsStore) GetOrganizations(ctx context.Context) ([]database.Organization, error) { start := time.Now() organizations, err := m.s.GetOrganizations(ctx) @@ -1747,6 +1796,13 @@ func (m metricsStore) ListWorkspaceAgentPortShares(ctx context.Context, workspac return r0, r1 } +func (m metricsStore) OrganizationMembers(ctx context.Context, arg database.OrganizationMembersParams) ([]database.OrganizationMembersRow, error) { + start := time.Now() + r0, r1 := m.s.OrganizationMembers(ctx, arg) + m.queryLatencies.WithLabelValues("OrganizationMembers").Observe(time.Since(start).Seconds()) + return r0, r1 +} + func (m metricsStore) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx context.Context, templateID uuid.UUID) error { start := time.Now() r0 := m.s.ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx, templateID) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index 3ef96d13f8b33..7f00a57587216 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -58,6 +58,21 @@ func (mr *MockStoreMockRecorder) AcquireLock(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireLock", reflect.TypeOf((*MockStore)(nil).AcquireLock), arg0, arg1) } +// AcquireNotificationMessages mocks base method. +func (m *MockStore) AcquireNotificationMessages(arg0 context.Context, arg1 database.AcquireNotificationMessagesParams) ([]database.AcquireNotificationMessagesRow, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireNotificationMessages", arg0, arg1) + ret0, _ := ret[0].([]database.AcquireNotificationMessagesRow) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AcquireNotificationMessages indicates an expected call of AcquireNotificationMessages. +func (mr *MockStoreMockRecorder) AcquireNotificationMessages(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireNotificationMessages", reflect.TypeOf((*MockStore)(nil).AcquireNotificationMessages), arg0, arg1) +} + // AcquireProvisionerJob mocks base method. func (m *MockStore) AcquireProvisionerJob(arg0 context.Context, arg1 database.AcquireProvisionerJobParams) (database.ProvisionerJob, error) { m.ctrl.T.Helper() @@ -131,6 +146,36 @@ func (mr *MockStoreMockRecorder) BatchUpdateWorkspaceLastUsedAt(arg0, arg1 any) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchUpdateWorkspaceLastUsedAt", reflect.TypeOf((*MockStore)(nil).BatchUpdateWorkspaceLastUsedAt), arg0, arg1) } +// BulkMarkNotificationMessagesFailed mocks base method. +func (m *MockStore) BulkMarkNotificationMessagesFailed(arg0 context.Context, arg1 database.BulkMarkNotificationMessagesFailedParams) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BulkMarkNotificationMessagesFailed", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BulkMarkNotificationMessagesFailed indicates an expected call of BulkMarkNotificationMessagesFailed. +func (mr *MockStoreMockRecorder) BulkMarkNotificationMessagesFailed(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkMarkNotificationMessagesFailed", reflect.TypeOf((*MockStore)(nil).BulkMarkNotificationMessagesFailed), arg0, arg1) +} + +// BulkMarkNotificationMessagesSent mocks base method. +func (m *MockStore) BulkMarkNotificationMessagesSent(arg0 context.Context, arg1 database.BulkMarkNotificationMessagesSentParams) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BulkMarkNotificationMessagesSent", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BulkMarkNotificationMessagesSent indicates an expected call of BulkMarkNotificationMessagesSent. +func (mr *MockStoreMockRecorder) BulkMarkNotificationMessagesSent(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkMarkNotificationMessagesSent", reflect.TypeOf((*MockStore)(nil).BulkMarkNotificationMessagesSent), arg0, arg1) +} + // CleanTailnetCoordinators mocks base method. func (m *MockStore) CleanTailnetCoordinators(arg0 context.Context) error { m.ctrl.T.Helper() @@ -413,6 +458,20 @@ func (mr *MockStoreMockRecorder) DeleteOAuth2ProviderAppTokensByAppAndUserID(arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOAuth2ProviderAppTokensByAppAndUserID", reflect.TypeOf((*MockStore)(nil).DeleteOAuth2ProviderAppTokensByAppAndUserID), arg0, arg1) } +// DeleteOldNotificationMessages mocks base method. +func (m *MockStore) DeleteOldNotificationMessages(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOldNotificationMessages", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOldNotificationMessages indicates an expected call of DeleteOldNotificationMessages. +func (mr *MockStoreMockRecorder) DeleteOldNotificationMessages(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldNotificationMessages", reflect.TypeOf((*MockStore)(nil).DeleteOldNotificationMessages), arg0) +} + // DeleteOldProvisionerDaemons mocks base method. func (m *MockStore) DeleteOldProvisionerDaemons(arg0 context.Context) error { m.ctrl.T.Helper() @@ -469,6 +528,20 @@ func (mr *MockStoreMockRecorder) DeleteOrganization(arg0, arg1 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganization", reflect.TypeOf((*MockStore)(nil).DeleteOrganization), arg0, arg1) } +// DeleteOrganizationMember mocks base method. +func (m *MockStore) DeleteOrganizationMember(arg0 context.Context, arg1 database.DeleteOrganizationMemberParams) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOrganizationMember", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOrganizationMember indicates an expected call of DeleteOrganizationMember. +func (mr *MockStoreMockRecorder) DeleteOrganizationMember(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganizationMember", reflect.TypeOf((*MockStore)(nil).DeleteOrganizationMember), arg0, arg1) +} + // DeleteReplicasUpdatedBefore mocks base method. func (m *MockStore) DeleteReplicasUpdatedBefore(arg0 context.Context, arg1 time.Time) error { m.ctrl.T.Helper() @@ -585,6 +658,21 @@ func (mr *MockStoreMockRecorder) DeleteWorkspaceAgentPortSharesByTemplate(arg0, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkspaceAgentPortSharesByTemplate", reflect.TypeOf((*MockStore)(nil).DeleteWorkspaceAgentPortSharesByTemplate), arg0, arg1) } +// EnqueueNotificationMessage mocks base method. +func (m *MockStore) EnqueueNotificationMessage(arg0 context.Context, arg1 database.EnqueueNotificationMessageParams) (database.NotificationMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnqueueNotificationMessage", arg0, arg1) + ret0, _ := ret[0].(database.NotificationMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnqueueNotificationMessage indicates an expected call of EnqueueNotificationMessage. +func (mr *MockStoreMockRecorder) EnqueueNotificationMessage(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnqueueNotificationMessage", reflect.TypeOf((*MockStore)(nil).EnqueueNotificationMessage), arg0, arg1) +} + // FavoriteWorkspace mocks base method. func (m *MockStore) FavoriteWorkspace(arg0 context.Context, arg1 uuid.UUID) error { m.ctrl.T.Helper() @@ -599,6 +687,21 @@ func (mr *MockStoreMockRecorder) FavoriteWorkspace(arg0, arg1 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FavoriteWorkspace", reflect.TypeOf((*MockStore)(nil).FavoriteWorkspace), arg0, arg1) } +// FetchNewMessageMetadata mocks base method. +func (m *MockStore) FetchNewMessageMetadata(arg0 context.Context, arg1 database.FetchNewMessageMetadataParams) (database.FetchNewMessageMetadataRow, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchNewMessageMetadata", arg0, arg1) + ret0, _ := ret[0].(database.FetchNewMessageMetadataRow) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchNewMessageMetadata indicates an expected call of FetchNewMessageMetadata. +func (mr *MockStoreMockRecorder) FetchNewMessageMetadata(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchNewMessageMetadata", reflect.TypeOf((*MockStore)(nil).FetchNewMessageMetadata), arg0, arg1) +} + // GetAPIKeyByID mocks base method. func (m *MockStore) GetAPIKeyByID(arg0 context.Context, arg1 string) (database.APIKey, error) { m.ctrl.T.Helper() @@ -1125,18 +1228,48 @@ func (mr *MockStoreMockRecorder) GetGroupByOrgAndName(arg0, arg1 any) *gomock.Ca } // GetGroupMembers mocks base method. -func (m *MockStore) GetGroupMembers(arg0 context.Context, arg1 uuid.UUID) ([]database.User, error) { +func (m *MockStore) GetGroupMembers(arg0 context.Context) ([]database.GroupMember, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGroupMembers", arg0, arg1) - ret0, _ := ret[0].([]database.User) + ret := m.ctrl.Call(m, "GetGroupMembers", arg0) + ret0, _ := ret[0].([]database.GroupMember) ret1, _ := ret[1].(error) return ret0, ret1 } // GetGroupMembers indicates an expected call of GetGroupMembers. -func (mr *MockStoreMockRecorder) GetGroupMembers(arg0, arg1 any) *gomock.Call { +func (mr *MockStoreMockRecorder) GetGroupMembers(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupMembers", reflect.TypeOf((*MockStore)(nil).GetGroupMembers), arg0) +} + +// GetGroupMembersByGroupID mocks base method. +func (m *MockStore) GetGroupMembersByGroupID(arg0 context.Context, arg1 uuid.UUID) ([]database.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupMembersByGroupID", arg0, arg1) + ret0, _ := ret[0].([]database.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupMembersByGroupID indicates an expected call of GetGroupMembersByGroupID. +func (mr *MockStoreMockRecorder) GetGroupMembersByGroupID(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupMembersByGroupID", reflect.TypeOf((*MockStore)(nil).GetGroupMembersByGroupID), arg0, arg1) +} + +// GetGroups mocks base method. +func (m *MockStore) GetGroups(arg0 context.Context) ([]database.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroups", arg0) + ret0, _ := ret[0].([]database.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroups indicates an expected call of GetGroups. +func (mr *MockStoreMockRecorder) GetGroups(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupMembers", reflect.TypeOf((*MockStore)(nil).GetGroupMembers), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroups", reflect.TypeOf((*MockStore)(nil).GetGroups), arg0) } // GetGroupsByOrganizationAndUserID mocks base method. @@ -1514,36 +1647,6 @@ func (mr *MockStoreMockRecorder) GetOrganizationIDsByMemberIDs(arg0, arg1 any) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationIDsByMemberIDs", reflect.TypeOf((*MockStore)(nil).GetOrganizationIDsByMemberIDs), arg0, arg1) } -// GetOrganizationMemberByUserID mocks base method. -func (m *MockStore) GetOrganizationMemberByUserID(arg0 context.Context, arg1 database.GetOrganizationMemberByUserIDParams) (database.OrganizationMember, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOrganizationMemberByUserID", arg0, arg1) - ret0, _ := ret[0].(database.OrganizationMember) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetOrganizationMemberByUserID indicates an expected call of GetOrganizationMemberByUserID. -func (mr *MockStoreMockRecorder) GetOrganizationMemberByUserID(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationMemberByUserID", reflect.TypeOf((*MockStore)(nil).GetOrganizationMemberByUserID), arg0, arg1) -} - -// GetOrganizationMembershipsByUserID mocks base method. -func (m *MockStore) GetOrganizationMembershipsByUserID(arg0 context.Context, arg1 uuid.UUID) ([]database.OrganizationMember, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOrganizationMembershipsByUserID", arg0, arg1) - ret0, _ := ret[0].([]database.OrganizationMember) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetOrganizationMembershipsByUserID indicates an expected call of GetOrganizationMembershipsByUserID. -func (mr *MockStoreMockRecorder) GetOrganizationMembershipsByUserID(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationMembershipsByUserID", reflect.TypeOf((*MockStore)(nil).GetOrganizationMembershipsByUserID), arg0, arg1) -} - // GetOrganizations mocks base method. func (m *MockStore) GetOrganizations(arg0 context.Context) ([]database.Organization, error) { m.ctrl.T.Helper() @@ -3661,6 +3764,21 @@ func (mr *MockStoreMockRecorder) ListWorkspaceAgentPortShares(arg0, arg1 any) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWorkspaceAgentPortShares", reflect.TypeOf((*MockStore)(nil).ListWorkspaceAgentPortShares), arg0, arg1) } +// OrganizationMembers mocks base method. +func (m *MockStore) OrganizationMembers(arg0 context.Context, arg1 database.OrganizationMembersParams) ([]database.OrganizationMembersRow, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OrganizationMembers", arg0, arg1) + ret0, _ := ret[0].([]database.OrganizationMembersRow) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OrganizationMembers indicates an expected call of OrganizationMembers. +func (mr *MockStoreMockRecorder) OrganizationMembers(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OrganizationMembers", reflect.TypeOf((*MockStore)(nil).OrganizationMembers), arg0, arg1) +} + // Ping mocks base method. func (m *MockStore) Ping(arg0 context.Context) (time.Duration, error) { m.ctrl.T.Helper() diff --git a/coderd/database/dbpurge/dbpurge.go b/coderd/database/dbpurge/dbpurge.go index a6ad0a125d5f2..2bcfefdca79ff 100644 --- a/coderd/database/dbpurge/dbpurge.go +++ b/coderd/database/dbpurge/dbpurge.go @@ -58,6 +58,9 @@ func New(ctx context.Context, logger slog.Logger, db database.Store) io.Closer { if err := tx.DeleteOldProvisionerDaemons(ctx); err != nil { return xerrors.Errorf("failed to delete old provisioner daemons: %w", err) } + if err := tx.DeleteOldNotificationMessages(ctx); err != nil { + return xerrors.Errorf("failed to delete old notification messages: %w", err) + } logger.Info(ctx, "purged old database entries", slog.F("duration", time.Since(start))) diff --git a/coderd/database/dbtestutil/postgres.go b/coderd/database/dbtestutil/postgres.go index 33e0350821099..3a559778b6968 100644 --- a/coderd/database/dbtestutil/postgres.go +++ b/coderd/database/dbtestutil/postgres.go @@ -28,6 +28,7 @@ func Open() (string, func(), error) { if err != nil { return "", nil, xerrors.Errorf("connect to ci postgres: %w", err) } + defer db.Close() dbName, err := cryptorand.StringCharset(cryptorand.Lower, 10) diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index fde9c9556ac84..0b51a6c300205 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -73,6 +73,25 @@ CREATE TYPE login_type AS ENUM ( COMMENT ON TYPE login_type IS 'Specifies the method of authentication. "none" is a special case in which no authentication method is allowed.'; +CREATE TYPE name_organization_pair AS ( + name text, + organization_id uuid +); + +CREATE TYPE notification_message_status AS ENUM ( + 'pending', + 'leased', + 'sent', + 'permanent_failure', + 'temporary_failure', + 'unknown' +); + +CREATE TYPE notification_method AS ENUM ( + 'smtp', + 'webhook' +); + CREATE TYPE parameter_destination_scheme AS ENUM ( 'none', 'environment_variable', @@ -142,7 +161,9 @@ CREATE TYPE resource_type AS ENUM ( 'convert_login', 'health_settings', 'oauth2_provider_app', - 'oauth2_provider_app_secret' + 'oauth2_provider_app_secret', + 'custom_role', + 'organization_member' ); CREATE TYPE startup_script_behavior AS ENUM ( @@ -412,13 +433,16 @@ CREATE TABLE custom_roles ( user_permissions jsonb DEFAULT '[]'::jsonb NOT NULL, created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - organization_id uuid + organization_id uuid, + id uuid DEFAULT gen_random_uuid() NOT NULL ); COMMENT ON TABLE custom_roles IS 'Custom roles allow dynamic roles expanded at runtime'; COMMENT ON COLUMN custom_roles.organization_id IS 'Roles can optionally be scoped to an organization'; +COMMENT ON COLUMN custom_roles.id IS 'Custom roles ID is used purely for auditing purposes. Name is a better unique identifier.'; + CREATE TABLE dbcrypt_keys ( number integer NOT NULL, active_key_digest text, @@ -524,6 +548,34 @@ CREATE SEQUENCE licenses_id_seq ALTER SEQUENCE licenses_id_seq OWNED BY licenses.id; +CREATE TABLE notification_messages ( + id uuid NOT NULL, + notification_template_id uuid NOT NULL, + user_id uuid NOT NULL, + method notification_method NOT NULL, + status notification_message_status DEFAULT 'pending'::notification_message_status NOT NULL, + status_reason text, + created_by text NOT NULL, + payload jsonb NOT NULL, + attempt_count integer DEFAULT 0, + targets uuid[], + created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp with time zone, + leased_until timestamp with time zone, + next_retry_after timestamp with time zone +); + +CREATE TABLE notification_templates ( + id uuid NOT NULL, + name text NOT NULL, + title_template text NOT NULL, + body_template text NOT NULL, + actions jsonb, + "group" text +); + +COMMENT ON TABLE notification_templates IS 'Templates from which to create notification messages.'; + CREATE TABLE oauth2_provider_app_codes ( id uuid NOT NULL, created_at timestamp with time zone NOT NULL, @@ -576,7 +628,7 @@ CREATE TABLE organization_members ( organization_id uuid NOT NULL, created_at timestamp with time zone NOT NULL, updated_at timestamp with time zone NOT NULL, - roles text[] DEFAULT '{organization-member}'::text[] NOT NULL + roles text[] DEFAULT '{}'::text[] NOT NULL ); CREATE TABLE organizations ( @@ -585,7 +637,9 @@ CREATE TABLE organizations ( description text NOT NULL, created_at timestamp with time zone NOT NULL, updated_at timestamp with time zone NOT NULL, - is_default boolean DEFAULT false NOT NULL + is_default boolean DEFAULT false NOT NULL, + display_name text NOT NULL, + icon text DEFAULT ''::text NOT NULL ); CREATE TABLE parameter_schemas ( @@ -1461,6 +1515,15 @@ ALTER TABLE ONLY licenses ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_pkey PRIMARY KEY (id); +ALTER TABLE ONLY notification_messages + ADD CONSTRAINT notification_messages_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY notification_templates + ADD CONSTRAINT notification_templates_name_key UNIQUE (name); + +ALTER TABLE ONLY notification_templates + ADD CONSTRAINT notification_templates_pkey PRIMARY KEY (id); + ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_pkey PRIMARY KEY (id); @@ -1636,8 +1699,12 @@ CREATE INDEX idx_audit_log_user_id ON audit_logs USING btree (user_id); CREATE INDEX idx_audit_logs_time_desc ON audit_logs USING btree ("time" DESC); +CREATE INDEX idx_custom_roles_id ON custom_roles USING btree (id); + CREATE UNIQUE INDEX idx_custom_roles_name_lower ON custom_roles USING btree (lower(name)); +CREATE INDEX idx_notification_messages_status ON notification_messages USING btree (status); + CREATE INDEX idx_organization_member_organization_id_uuid ON organization_members USING btree (organization_id); CREATE INDEX idx_organization_member_user_id_uuid ON organization_members USING btree (user_id); @@ -1755,6 +1822,12 @@ ALTER TABLE ONLY jfrog_xray_scans ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE; +ALTER TABLE ONLY notification_messages + ADD CONSTRAINT notification_messages_notification_template_id_fkey FOREIGN KEY (notification_template_id) REFERENCES notification_templates(id) ON DELETE CASCADE; + +ALTER TABLE ONLY notification_messages + ADD CONSTRAINT notification_messages_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; + ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_app_id_fkey FOREIGN KEY (app_id) REFERENCES oauth2_provider_apps(id) ON DELETE CASCADE; diff --git a/coderd/database/foreign_key_constraint.go b/coderd/database/foreign_key_constraint.go index 2a8f1738d3cb8..3a9557a9758dd 100644 --- a/coderd/database/foreign_key_constraint.go +++ b/coderd/database/foreign_key_constraint.go @@ -15,6 +15,8 @@ const ( ForeignKeyGroupsOrganizationID ForeignKeyConstraint = "groups_organization_id_fkey" // ALTER TABLE ONLY groups ADD CONSTRAINT groups_organization_id_fkey FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE; ForeignKeyJfrogXrayScansAgentID ForeignKeyConstraint = "jfrog_xray_scans_agent_id_fkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_agent_id_fkey FOREIGN KEY (agent_id) REFERENCES workspace_agents(id) ON DELETE CASCADE; ForeignKeyJfrogXrayScansWorkspaceID ForeignKeyConstraint = "jfrog_xray_scans_workspace_id_fkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_workspace_id_fkey FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE; + ForeignKeyNotificationMessagesNotificationTemplateID ForeignKeyConstraint = "notification_messages_notification_template_id_fkey" // ALTER TABLE ONLY notification_messages ADD CONSTRAINT notification_messages_notification_template_id_fkey FOREIGN KEY (notification_template_id) REFERENCES notification_templates(id) ON DELETE CASCADE; + ForeignKeyNotificationMessagesUserID ForeignKeyConstraint = "notification_messages_user_id_fkey" // ALTER TABLE ONLY notification_messages ADD CONSTRAINT notification_messages_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; ForeignKeyOauth2ProviderAppCodesAppID ForeignKeyConstraint = "oauth2_provider_app_codes_app_id_fkey" // ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_app_id_fkey FOREIGN KEY (app_id) REFERENCES oauth2_provider_apps(id) ON DELETE CASCADE; ForeignKeyOauth2ProviderAppCodesUserID ForeignKeyConstraint = "oauth2_provider_app_codes_user_id_fkey" // ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; ForeignKeyOauth2ProviderAppSecretsAppID ForeignKeyConstraint = "oauth2_provider_app_secrets_app_id_fkey" // ALTER TABLE ONLY oauth2_provider_app_secrets ADD CONSTRAINT oauth2_provider_app_secrets_app_id_fkey FOREIGN KEY (app_id) REFERENCES oauth2_provider_apps(id) ON DELETE CASCADE; diff --git a/coderd/database/migrations/000214_org_custom_role_array.down.sql b/coderd/database/migrations/000214_org_custom_role_array.down.sql new file mode 100644 index 0000000000000..099389eac58ce --- /dev/null +++ b/coderd/database/migrations/000214_org_custom_role_array.down.sql @@ -0,0 +1 @@ +UPDATE custom_roles SET org_permissions = '{}'; diff --git a/coderd/database/migrations/000214_org_custom_role_array.up.sql b/coderd/database/migrations/000214_org_custom_role_array.up.sql new file mode 100644 index 0000000000000..294d2826fe5f3 --- /dev/null +++ b/coderd/database/migrations/000214_org_custom_role_array.up.sql @@ -0,0 +1,4 @@ +-- Previous custom roles are now invalid, as the json changed. Since this is an +-- experimental feature, there is no point in trying to save the perms. +-- This does not elevate any permissions, so it is not a security issue. +UPDATE custom_roles SET org_permissions = '[]'; diff --git a/coderd/database/migrations/000215_scoped_org_db_roles.down.sql b/coderd/database/migrations/000215_scoped_org_db_roles.down.sql new file mode 100644 index 0000000000000..68a43a8fe8c7a --- /dev/null +++ b/coderd/database/migrations/000215_scoped_org_db_roles.down.sql @@ -0,0 +1 @@ +ALTER TABLE ONLY organization_members ALTER COLUMN roles SET DEFAULT '{organization-member}'; diff --git a/coderd/database/migrations/000215_scoped_org_db_roles.up.sql b/coderd/database/migrations/000215_scoped_org_db_roles.up.sql new file mode 100644 index 0000000000000..aecd19b8da668 --- /dev/null +++ b/coderd/database/migrations/000215_scoped_org_db_roles.up.sql @@ -0,0 +1,7 @@ +-- The default was 'organization-member', but we imply that in the +-- 'GetAuthorizationUserRoles' query. +ALTER TABLE ONLY organization_members ALTER COLUMN roles SET DEFAULT '{}'; + +-- No one should be using organization roles yet. If they are, the names in the +-- database are now incorrect. Just remove them all. +UPDATE organization_members SET roles = '{}'; diff --git a/coderd/database/migrations/000216_organization_display_name.down.sql b/coderd/database/migrations/000216_organization_display_name.down.sql new file mode 100644 index 0000000000000..4dea440465b11 --- /dev/null +++ b/coderd/database/migrations/000216_organization_display_name.down.sql @@ -0,0 +1,2 @@ +alter table organizations + drop column display_name; diff --git a/coderd/database/migrations/000216_organization_display_name.up.sql b/coderd/database/migrations/000216_organization_display_name.up.sql new file mode 100644 index 0000000000000..26245f03fc525 --- /dev/null +++ b/coderd/database/migrations/000216_organization_display_name.up.sql @@ -0,0 +1,10 @@ +-- This default is just a temporary thing to avoid null errors when first creating the column. +alter table organizations + add column display_name text not null default ''; + +update organizations + set display_name = name; + +-- We can remove the default now that everything has been copied. +alter table organizations + alter column display_name drop default; diff --git a/coderd/database/migrations/000217_custom_role_pair_parameter.down.sql b/coderd/database/migrations/000217_custom_role_pair_parameter.down.sql new file mode 100644 index 0000000000000..7322a09ee26b8 --- /dev/null +++ b/coderd/database/migrations/000217_custom_role_pair_parameter.down.sql @@ -0,0 +1 @@ +DROP TYPE name_organization_pair; diff --git a/coderd/database/migrations/000217_custom_role_pair_parameter.up.sql b/coderd/database/migrations/000217_custom_role_pair_parameter.up.sql new file mode 100644 index 0000000000000..b131054fc8dfb --- /dev/null +++ b/coderd/database/migrations/000217_custom_role_pair_parameter.up.sql @@ -0,0 +1 @@ +CREATE TYPE name_organization_pair AS (name text, organization_id uuid); diff --git a/coderd/database/migrations/000218_org_custom_role_audit.down.sql b/coderd/database/migrations/000218_org_custom_role_audit.down.sql new file mode 100644 index 0000000000000..5ad6106f2fc26 --- /dev/null +++ b/coderd/database/migrations/000218_org_custom_role_audit.down.sql @@ -0,0 +1,2 @@ +DROP INDEX idx_custom_roles_id; +ALTER TABLE custom_roles DROP COLUMN id; diff --git a/coderd/database/migrations/000218_org_custom_role_audit.up.sql b/coderd/database/migrations/000218_org_custom_role_audit.up.sql new file mode 100644 index 0000000000000..a780f34960907 --- /dev/null +++ b/coderd/database/migrations/000218_org_custom_role_audit.up.sql @@ -0,0 +1,8 @@ +-- (name) is the primary key, this column is almost exclusively for auditing. +-- Audit logs require a uuid as the unique identifier for a resource. +ALTER TABLE custom_roles ADD COLUMN id uuid DEFAULT gen_random_uuid() NOT NULL; +COMMENT ON COLUMN custom_roles.id IS 'Custom roles ID is used purely for auditing purposes. Name is a better unique identifier.'; + +-- Ensure unique uuids. +CREATE INDEX idx_custom_roles_id ON custom_roles (id); +ALTER TYPE resource_type ADD VALUE IF NOT EXISTS 'custom_role'; diff --git a/coderd/database/migrations/000219_organization_icon.down.sql b/coderd/database/migrations/000219_organization_icon.down.sql new file mode 100644 index 0000000000000..99b32ec8dab41 --- /dev/null +++ b/coderd/database/migrations/000219_organization_icon.down.sql @@ -0,0 +1,2 @@ +alter table organizations + drop column icon; diff --git a/coderd/database/migrations/000219_organization_icon.up.sql b/coderd/database/migrations/000219_organization_icon.up.sql new file mode 100644 index 0000000000000..6690301a3b549 --- /dev/null +++ b/coderd/database/migrations/000219_organization_icon.up.sql @@ -0,0 +1,2 @@ +alter table organizations + add column icon text not null default ''; diff --git a/coderd/database/migrations/000220_audit_org_member.down.sql b/coderd/database/migrations/000220_audit_org_member.down.sql new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/coderd/database/migrations/000220_audit_org_member.up.sql b/coderd/database/migrations/000220_audit_org_member.up.sql new file mode 100644 index 0000000000000..c6f0f799a367d --- /dev/null +++ b/coderd/database/migrations/000220_audit_org_member.up.sql @@ -0,0 +1 @@ +ALTER TYPE resource_type ADD VALUE IF NOT EXISTS 'organization_member'; diff --git a/coderd/database/migrations/000221_notifications.down.sql b/coderd/database/migrations/000221_notifications.down.sql new file mode 100644 index 0000000000000..a7cd8a5f6a4c3 --- /dev/null +++ b/coderd/database/migrations/000221_notifications.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS notification_messages; +DROP TABLE IF EXISTS notification_templates; +DROP TYPE IF EXISTS notification_method; +DROP TYPE IF EXISTS notification_message_status; \ No newline at end of file diff --git a/coderd/database/migrations/000221_notifications.up.sql b/coderd/database/migrations/000221_notifications.up.sql new file mode 100644 index 0000000000000..567ed87d80764 --- /dev/null +++ b/coderd/database/migrations/000221_notifications.up.sql @@ -0,0 +1,65 @@ +CREATE TYPE notification_message_status AS ENUM ( + 'pending', + 'leased', + 'sent', + 'permanent_failure', + 'temporary_failure', + 'unknown' + ); + +CREATE TYPE notification_method AS ENUM ( + 'smtp', + 'webhook' + ); + +CREATE TABLE notification_templates +( + id uuid NOT NULL, + name text NOT NULL, + title_template text NOT NULL, + body_template text NOT NULL, + actions jsonb, + "group" text, + PRIMARY KEY (id), + UNIQUE (name) +); + +COMMENT ON TABLE notification_templates IS 'Templates from which to create notification messages.'; + +CREATE TABLE notification_messages +( + id uuid NOT NULL, + notification_template_id uuid NOT NULL, + user_id uuid NOT NULL, + method notification_method NOT NULL, + status notification_message_status NOT NULL DEFAULT 'pending'::notification_message_status, + status_reason text, + created_by text NOT NULL, + payload jsonb NOT NULL, + attempt_count int DEFAULT 0, + targets uuid[], + created_at timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at timestamp with time zone, + leased_until timestamp with time zone, + next_retry_after timestamp with time zone, + PRIMARY KEY (id), + FOREIGN KEY (notification_template_id) REFERENCES notification_templates (id) ON DELETE CASCADE, + FOREIGN KEY (user_id) REFERENCES users (id) ON DELETE CASCADE +); + +CREATE INDEX idx_notification_messages_status ON notification_messages (status); + +-- TODO: autogenerate constants which reference the UUIDs +INSERT INTO notification_templates (id, name, title_template, body_template, "group", actions) +VALUES ('f517da0b-cdc9-410f-ab89-a86107c420ed', 'Workspace Deleted', E'Workspace "{{.Labels.name}}" deleted', + E'Hi {{.UserName}}\n\nYour workspace **{{.Labels.name}}** was deleted.\nThe specified reason was "**{{.Labels.reason}}**".', + 'Workspace Events', '[ + { + "label": "View workspaces", + "url": "{{ base_url }}/workspaces" + }, + { + "label": "View templates", + "url": "{{ base_url }}/templates" + } + ]'::jsonb); diff --git a/coderd/database/migrations/testdata/fixtures/000221_notifications.up.sql b/coderd/database/migrations/testdata/fixtures/000221_notifications.up.sql new file mode 100644 index 0000000000000..a3bd8a73f2566 --- /dev/null +++ b/coderd/database/migrations/testdata/fixtures/000221_notifications.up.sql @@ -0,0 +1,21 @@ +DO +$$ + DECLARE + template text; + BEGIN + SELECT 'You successfully did {{.thing}}!' INTO template; + + INSERT INTO notification_templates (id, name, title_template, body_template, "group") + VALUES ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', 'A', template, template, 'Group 1'), + ('b0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12', 'B', template, template, 'Group 1'), + ('c0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13', 'C', template, template, 'Group 2'); + + INSERT INTO users(id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, deleted) + VALUES ('fc1511ef-4fcf-4a3b-98a1-8df64160e35a', 'githubuser@coder.com', 'githubuser', '\x', '2022-11-02 13:05:21.445455+02', '2022-11-02 13:05:21.445455+02', 'active', '{}', false) ON CONFLICT DO NOTHING; + + INSERT INTO notification_messages (id, notification_template_id, user_id, method, created_by, payload) + VALUES ( + gen_random_uuid(), 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', 'fc1511ef-4fcf-4a3b-98a1-8df64160e35a', 'smtp'::notification_method, 'test', '{}' + ); + END +$$; diff --git a/coderd/database/modelmethods.go b/coderd/database/modelmethods.go index d71c63b089556..f8a3fc2c537b1 100644 --- a/coderd/database/modelmethods.go +++ b/coderd/database/modelmethods.go @@ -7,6 +7,7 @@ import ( "golang.org/x/exp/maps" "golang.org/x/oauth2" + "golang.org/x/xerrors" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/rbac" @@ -59,6 +60,18 @@ func (s WorkspaceAgentStatus) Valid() bool { } } +type AuditableOrganizationMember struct { + OrganizationMember + Username string `json:"username"` +} + +func (m OrganizationMember) Auditable(username string) AuditableOrganizationMember { + return AuditableOrganizationMember{ + OrganizationMember: m, + Username: username, + } +} + type AuditableGroup struct { Group Members []GroupMember `json:"members"` @@ -178,6 +191,10 @@ func (m OrganizationMember) RBACObject() rbac.Object { WithOwner(m.UserID.String()) } +func (m OrganizationMembersRow) RBACObject() rbac.Object { + return m.OrganizationMember.RBACObject() +} + func (m GetOrganizationIDsByMemberIDsRow) RBACObject() rbac.Object { // TODO: This feels incorrect as we are really returning a list of orgmembers. // This return type should be refactored to return a list of orgmembers, not this @@ -313,6 +330,7 @@ func ConvertUserRows(rows []GetUsersRow) []User { ID: r.ID, Email: r.Email, Username: r.Username, + Name: r.Name, HashedPassword: r.HashedPassword, CreatedAt: r.CreatedAt, UpdatedAt: r.UpdatedAt, @@ -373,3 +391,22 @@ func (p ProvisionerJob) FinishedAt() time.Time { return time.Time{} } + +func (r CustomRole) RoleIdentifier() rbac.RoleIdentifier { + return rbac.RoleIdentifier{ + Name: r.Name, + OrganizationID: r.OrganizationID.UUID, + } +} + +func (r GetAuthorizationUserRolesRow) RoleNames() ([]rbac.RoleIdentifier, error) { + names := make([]rbac.RoleIdentifier, 0, len(r.Roles)) + for _, role := range r.Roles { + value, err := rbac.RoleNameFromString(role) + if err != nil { + return nil, xerrors.Errorf("convert role %q: %w", role, err) + } + names = append(names, value) + } + return names, nil +} diff --git a/coderd/database/modelqueries.go b/coderd/database/modelqueries.go index ca38505b28ef0..9cc5d7792101c 100644 --- a/coderd/database/modelqueries.go +++ b/coderd/database/modelqueries.go @@ -2,6 +2,7 @@ package database import ( "context" + "database/sql" "fmt" "strings" @@ -17,6 +18,29 @@ const ( authorizedQueryPlaceholder = "-- @authorize_filter" ) +// ExpectOne can be used to convert a ':many:' query into a ':one' +// query. To reduce the quantity of SQL queries, a :many with a filter is used. +// These filters sometimes are expected to return just 1 row. +// +// A :many query will never return a sql.ErrNoRows, but a :one does. +// This function will correct the error for the empty set. +func ExpectOne[T any](ret []T, err error) (T, error) { + var empty T + if err != nil { + return empty, err + } + + if len(ret) == 0 { + return empty, sql.ErrNoRows + } + + if len(ret) > 1 { + return empty, xerrors.Errorf("too many rows returned, expected 1") + } + + return ret[0], nil +} + // customQuerier encompasses all non-generated queries. // It provides a flexible way to write queries for cases // where sqlc proves inadequate. diff --git a/coderd/database/models.go b/coderd/database/models.go index 42c41c83bd5dc..d7f1ab9972a61 100644 --- a/coderd/database/models.go +++ b/coderd/database/models.go @@ -660,6 +660,134 @@ func AllLoginTypeValues() []LoginType { } } +type NotificationMessageStatus string + +const ( + NotificationMessageStatusPending NotificationMessageStatus = "pending" + NotificationMessageStatusLeased NotificationMessageStatus = "leased" + NotificationMessageStatusSent NotificationMessageStatus = "sent" + NotificationMessageStatusPermanentFailure NotificationMessageStatus = "permanent_failure" + NotificationMessageStatusTemporaryFailure NotificationMessageStatus = "temporary_failure" + NotificationMessageStatusUnknown NotificationMessageStatus = "unknown" +) + +func (e *NotificationMessageStatus) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = NotificationMessageStatus(s) + case string: + *e = NotificationMessageStatus(s) + default: + return fmt.Errorf("unsupported scan type for NotificationMessageStatus: %T", src) + } + return nil +} + +type NullNotificationMessageStatus struct { + NotificationMessageStatus NotificationMessageStatus `json:"notification_message_status"` + Valid bool `json:"valid"` // Valid is true if NotificationMessageStatus is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullNotificationMessageStatus) Scan(value interface{}) error { + if value == nil { + ns.NotificationMessageStatus, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.NotificationMessageStatus.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullNotificationMessageStatus) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.NotificationMessageStatus), nil +} + +func (e NotificationMessageStatus) Valid() bool { + switch e { + case NotificationMessageStatusPending, + NotificationMessageStatusLeased, + NotificationMessageStatusSent, + NotificationMessageStatusPermanentFailure, + NotificationMessageStatusTemporaryFailure, + NotificationMessageStatusUnknown: + return true + } + return false +} + +func AllNotificationMessageStatusValues() []NotificationMessageStatus { + return []NotificationMessageStatus{ + NotificationMessageStatusPending, + NotificationMessageStatusLeased, + NotificationMessageStatusSent, + NotificationMessageStatusPermanentFailure, + NotificationMessageStatusTemporaryFailure, + NotificationMessageStatusUnknown, + } +} + +type NotificationMethod string + +const ( + NotificationMethodSmtp NotificationMethod = "smtp" + NotificationMethodWebhook NotificationMethod = "webhook" +) + +func (e *NotificationMethod) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = NotificationMethod(s) + case string: + *e = NotificationMethod(s) + default: + return fmt.Errorf("unsupported scan type for NotificationMethod: %T", src) + } + return nil +} + +type NullNotificationMethod struct { + NotificationMethod NotificationMethod `json:"notification_method"` + Valid bool `json:"valid"` // Valid is true if NotificationMethod is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullNotificationMethod) Scan(value interface{}) error { + if value == nil { + ns.NotificationMethod, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.NotificationMethod.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullNotificationMethod) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.NotificationMethod), nil +} + +func (e NotificationMethod) Valid() bool { + switch e { + case NotificationMethodSmtp, + NotificationMethodWebhook: + return true + } + return false +} + +func AllNotificationMethodValues() []NotificationMethod { + return []NotificationMethod{ + NotificationMethodSmtp, + NotificationMethodWebhook, + } +} + type ParameterDestinationScheme string const ( @@ -1222,6 +1350,8 @@ const ( ResourceTypeHealthSettings ResourceType = "health_settings" ResourceTypeOauth2ProviderApp ResourceType = "oauth2_provider_app" ResourceTypeOauth2ProviderAppSecret ResourceType = "oauth2_provider_app_secret" + ResourceTypeCustomRole ResourceType = "custom_role" + ResourceTypeOrganizationMember ResourceType = "organization_member" ) func (e *ResourceType) Scan(src interface{}) error { @@ -1275,7 +1405,9 @@ func (e ResourceType) Valid() bool { ResourceTypeConvertLogin, ResourceTypeHealthSettings, ResourceTypeOauth2ProviderApp, - ResourceTypeOauth2ProviderAppSecret: + ResourceTypeOauth2ProviderAppSecret, + ResourceTypeCustomRole, + ResourceTypeOrganizationMember: return true } return false @@ -1298,6 +1430,8 @@ func AllResourceTypeValues() []ResourceType { ResourceTypeHealthSettings, ResourceTypeOauth2ProviderApp, ResourceTypeOauth2ProviderAppSecret, + ResourceTypeCustomRole, + ResourceTypeOrganizationMember, } } @@ -1783,15 +1917,17 @@ type AuditLog struct { // Custom roles allow dynamic roles expanded at runtime type CustomRole struct { - Name string `db:"name" json:"name"` - DisplayName string `db:"display_name" json:"display_name"` - SitePermissions json.RawMessage `db:"site_permissions" json:"site_permissions"` - OrgPermissions json.RawMessage `db:"org_permissions" json:"org_permissions"` - UserPermissions json.RawMessage `db:"user_permissions" json:"user_permissions"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - UpdatedAt time.Time `db:"updated_at" json:"updated_at"` + Name string `db:"name" json:"name"` + DisplayName string `db:"display_name" json:"display_name"` + SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"` + OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"` + UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + UpdatedAt time.Time `db:"updated_at" json:"updated_at"` // Roles can optionally be scoped to an organization OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"` + // Custom roles ID is used purely for auditing purposes. Name is a better unique identifier. + ID uuid.UUID `db:"id" json:"id"` } // A table used to store the keys used to encrypt the database. @@ -1877,6 +2013,33 @@ type License struct { UUID uuid.UUID `db:"uuid" json:"uuid"` } +type NotificationMessage struct { + ID uuid.UUID `db:"id" json:"id"` + NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + Method NotificationMethod `db:"method" json:"method"` + Status NotificationMessageStatus `db:"status" json:"status"` + StatusReason sql.NullString `db:"status_reason" json:"status_reason"` + CreatedBy string `db:"created_by" json:"created_by"` + Payload []byte `db:"payload" json:"payload"` + AttemptCount sql.NullInt32 `db:"attempt_count" json:"attempt_count"` + Targets []uuid.UUID `db:"targets" json:"targets"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at" json:"updated_at"` + LeasedUntil sql.NullTime `db:"leased_until" json:"leased_until"` + NextRetryAfter sql.NullTime `db:"next_retry_after" json:"next_retry_after"` +} + +// Templates from which to create notification messages. +type NotificationTemplate struct { + ID uuid.UUID `db:"id" json:"id"` + Name string `db:"name" json:"name"` + TitleTemplate string `db:"title_template" json:"title_template"` + BodyTemplate string `db:"body_template" json:"body_template"` + Actions []byte `db:"actions" json:"actions"` + Group sql.NullString `db:"group" json:"group"` +} + // A table used to configure apps that can use Coder as an OAuth2 provider, the reverse of what we are calling external authentication. type OAuth2ProviderApp struct { ID uuid.UUID `db:"id" json:"id"` @@ -1927,6 +2090,8 @@ type Organization struct { CreatedAt time.Time `db:"created_at" json:"created_at"` UpdatedAt time.Time `db:"updated_at" json:"updated_at"` IsDefault bool `db:"is_default" json:"is_default"` + DisplayName string `db:"display_name" json:"display_name"` + Icon string `db:"icon" json:"icon"` } type OrganizationMember struct { diff --git a/coderd/database/pubsub/pubsub_linux_test.go b/coderd/database/pubsub/pubsub_linux_test.go index 203287eb71637..f208af921b441 100644 --- a/coderd/database/pubsub/pubsub_linux_test.go +++ b/coderd/database/pubsub/pubsub_linux_test.go @@ -351,7 +351,7 @@ func TestMeasureLatency(t *testing.T) { send, recv, err := pubsub.NewLatencyMeasurer(logger).Measure(ctx, ps) require.ErrorContains(t, err, context.Canceled.Error()) - require.Greater(t, send.Nanoseconds(), int64(0)) + require.GreaterOrEqual(t, send.Nanoseconds(), int64(0)) require.EqualValues(t, recv, time.Duration(-1)) }) diff --git a/coderd/database/pubsub/watchdog.go b/coderd/database/pubsub/watchdog.go index 687129fc5bcc2..df54019bb49b2 100644 --- a/coderd/database/pubsub/watchdog.go +++ b/coderd/database/pubsub/watchdog.go @@ -7,9 +7,8 @@ import ( "sync" "time" - "github.com/benbjohnson/clock" - "cdr.dev/slog" + "github.com/coder/coder/v2/clock" ) const ( @@ -36,7 +35,7 @@ type Watchdog struct { } func NewWatchdog(ctx context.Context, logger slog.Logger, ps Pubsub) *Watchdog { - return NewWatchdogWithClock(ctx, logger, ps, clock.New()) + return NewWatchdogWithClock(ctx, logger, ps, clock.NewReal()) } // NewWatchdogWithClock returns a watchdog with the given clock. Product code should always call NewWatchDog. @@ -79,32 +78,23 @@ func (w *Watchdog) Timeout() <-chan struct{} { func (w *Watchdog) publishLoop() { defer w.wg.Done() - tkr := w.clock.Ticker(periodHeartbeat) - defer tkr.Stop() - // immediate publish after starting the ticker. This helps testing so that we can tell from - // the outside that the ticker is started. - err := w.ps.Publish(EventPubsubWatchdog, []byte{}) - if err != nil { - w.logger.Warn(w.ctx, "failed to publish heartbeat on pubsub watchdog", slog.Error(err)) - } - for { - select { - case <-w.ctx.Done(): - w.logger.Debug(w.ctx, "context done; exiting publishLoop") - return - case <-tkr.C: - err := w.ps.Publish(EventPubsubWatchdog, []byte{}) - if err != nil { - w.logger.Warn(w.ctx, "failed to publish heartbeat on pubsub watchdog", slog.Error(err)) - } + tkr := w.clock.TickerFunc(w.ctx, periodHeartbeat, func() error { + err := w.ps.Publish(EventPubsubWatchdog, []byte{}) + if err != nil { + w.logger.Warn(w.ctx, "failed to publish heartbeat on pubsub watchdog", slog.Error(err)) + } else { + w.logger.Debug(w.ctx, "published heartbeat on pubsub watchdog") } - } + return err + }, "publish") + // ignore the error, since we log before returning the error + _ = tkr.Wait() } func (w *Watchdog) subscribeMonitor() { defer w.wg.Done() - tmr := w.clock.Timer(periodTimeout) - defer tmr.Stop() + tmr := w.clock.NewTimer(periodTimeout) + defer tmr.Stop("subscribe") beats := make(chan struct{}) unsub, err := w.ps.Subscribe(EventPubsubWatchdog, func(context.Context, []byte) { w.logger.Debug(w.ctx, "got heartbeat for pubsub watchdog") diff --git a/coderd/database/pubsub/watchdog_test.go b/coderd/database/pubsub/watchdog_test.go index ddd5a864e2c66..942f9eeb849c4 100644 --- a/coderd/database/pubsub/watchdog_test.go +++ b/coderd/database/pubsub/watchdog_test.go @@ -4,36 +4,51 @@ import ( "testing" "time" - "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/clock" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/testutil" ) func TestWatchdog_NoTimeout(t *testing.T) { t.Parallel() - ctx := testutil.Context(t, time.Hour) - mClock := clock.NewMock() - start := time.Date(2024, 2, 5, 8, 7, 6, 5, time.UTC) - mClock.Set(start) + ctx := testutil.Context(t, testutil.WaitShort) + mClock := clock.NewMock(t) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) fPS := newFakePubsub() + + // trap the ticker and timer.Stop() calls + pubTrap := mClock.Trap().TickerFunc("publish") + defer pubTrap.Close() + subTrap := mClock.Trap().TimerStop("subscribe") + defer subTrap.Close() + uut := pubsub.NewWatchdogWithClock(ctx, logger, fPS, mClock) + // wait for the ticker to be created so that we know it starts from the + // right baseline time. + pc, err := pubTrap.Wait(ctx) + require.NoError(t, err) + pc.Release() + require.Equal(t, 15*time.Second, pc.Duration) + + // we subscribe after starting the timer, so we know the timer also starts + // from the baseline. sub := testutil.RequireRecvCtx(ctx, t, fPS.subs) require.Equal(t, pubsub.EventPubsubWatchdog, sub.event) - p := testutil.RequireRecvCtx(ctx, t, fPS.pubs) - require.Equal(t, pubsub.EventPubsubWatchdog, p) // 5 min / 15 sec = 20, so do 21 ticks for i := 0; i < 21; i++ { - mClock.Add(15 * time.Second) - p = testutil.RequireRecvCtx(ctx, t, fPS.pubs) + d, w := mClock.AdvanceNext() + w.MustWait(ctx) + require.LessOrEqual(t, d, 15*time.Second) + p := testutil.RequireRecvCtx(ctx, t, fPS.pubs) require.Equal(t, pubsub.EventPubsubWatchdog, p) - mClock.Add(30 * time.Millisecond) // reasonable round-trip + mClock.Advance(30 * time.Millisecond). // reasonable round-trip + MustWait(ctx) // forward the beat sub.listener(ctx, []byte{}) // we shouldn't time out @@ -45,31 +60,51 @@ func TestWatchdog_NoTimeout(t *testing.T) { } } - err := uut.Close() + errCh := make(chan error, 1) + go func() { + errCh <- uut.Close() + }() + sc, err := subTrap.Wait(ctx) // timer.Stop() called + require.NoError(t, err) + sc.Release() + err = testutil.RequireRecvCtx(ctx, t, errCh) require.NoError(t, err) } func TestWatchdog_Timeout(t *testing.T) { t.Parallel() ctx := testutil.Context(t, testutil.WaitShort) - mClock := clock.NewMock() - start := time.Date(2024, 2, 5, 8, 7, 6, 5, time.UTC) - mClock.Set(start) + mClock := clock.NewMock(t) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) fPS := newFakePubsub() + + // trap the ticker calls + pubTrap := mClock.Trap().TickerFunc("publish") + defer pubTrap.Close() + uut := pubsub.NewWatchdogWithClock(ctx, logger, fPS, mClock) + // wait for the ticker to be created so that we know it starts from the + // right baseline time. + pc, err := pubTrap.Wait(ctx) + require.NoError(t, err) + pc.Release() + require.Equal(t, 15*time.Second, pc.Duration) + + // we subscribe after starting the timer, so we know the timer also starts + // from the baseline. sub := testutil.RequireRecvCtx(ctx, t, fPS.subs) require.Equal(t, pubsub.EventPubsubWatchdog, sub.event) - p := testutil.RequireRecvCtx(ctx, t, fPS.pubs) - require.Equal(t, pubsub.EventPubsubWatchdog, p) // 5 min / 15 sec = 20, so do 19 ticks without timing out for i := 0; i < 19; i++ { - mClock.Add(15 * time.Second) - p = testutil.RequireRecvCtx(ctx, t, fPS.pubs) + d, w := mClock.AdvanceNext() + w.MustWait(ctx) + require.LessOrEqual(t, d, 15*time.Second) + p := testutil.RequireRecvCtx(ctx, t, fPS.pubs) require.Equal(t, pubsub.EventPubsubWatchdog, p) - mClock.Add(30 * time.Millisecond) // reasonable round-trip + mClock.Advance(30 * time.Millisecond). // reasonable round-trip + MustWait(ctx) // we DO NOT forward the heartbeat // we shouldn't time out select { @@ -79,12 +114,14 @@ func TestWatchdog_Timeout(t *testing.T) { // OK! } } - mClock.Add(15 * time.Second) - p = testutil.RequireRecvCtx(ctx, t, fPS.pubs) + d, w := mClock.AdvanceNext() + w.MustWait(ctx) + require.LessOrEqual(t, d, 15*time.Second) + p := testutil.RequireRecvCtx(ctx, t, fPS.pubs) require.Equal(t, pubsub.EventPubsubWatchdog, p) testutil.RequireRecvCtx(ctx, t, uut.Timeout()) - err := uut.Close() + err = uut.Close() require.NoError(t, err) } @@ -118,7 +155,7 @@ func (f *fakePubsub) Publish(event string, _ []byte) error { func newFakePubsub() *fakePubsub { return &fakePubsub{ - pubs: make(chan string), + pubs: make(chan string, 1), subs: make(chan subscribe), } } diff --git a/coderd/database/querier.go b/coderd/database/querier.go index 6e2b1ff60cfdf..179a5e06039ff 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -17,6 +17,18 @@ type sqlcQuerier interface { // This must be called from within a transaction. The lock will be automatically // released when the transaction ends. AcquireLock(ctx context.Context, pgAdvisoryXactLock int64) error + // Acquires the lease for a given count of notification messages, to enable concurrent dequeuing and subsequent sending. + // Only rows that aren't already leased (or ones which are leased but have exceeded their lease period) are returned. + // + // A "lease" here refers to a notifier taking ownership of a notification_messages row. A lease survives for the duration + // of CODER_NOTIFICATIONS_LEASE_PERIOD. Once a message is delivered, its status is updated and the lease expires (set to NULL). + // If a message exceeds its lease, that implies the notifier did not shutdown cleanly, or the table update failed somehow, + // and the row will then be eligible to be dequeued by another notifier. + // + // SKIP LOCKED is used to jump over locked rows. This prevents multiple notifiers from acquiring the same messages. + // See: https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE + // + AcquireNotificationMessages(ctx context.Context, arg AcquireNotificationMessagesParams) ([]AcquireNotificationMessagesRow, error) // Acquires the lock for a single job that isn't started, completed, // canceled, and that matches an array of provisioner types. // @@ -45,6 +57,8 @@ type sqlcQuerier interface { // referenced by the latest build of a workspace. ArchiveUnusedTemplateVersions(ctx context.Context, arg ArchiveUnusedTemplateVersionsParams) ([]uuid.UUID, error) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg BatchUpdateWorkspaceLastUsedAtParams) error + BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error) + BulkMarkNotificationMessagesSent(ctx context.Context, arg BulkMarkNotificationMessagesSentParams) (int64, error) CleanTailnetCoordinators(ctx context.Context) error CleanTailnetLostPeers(ctx context.Context) error CleanTailnetTunnels(ctx context.Context) error @@ -65,6 +79,8 @@ type sqlcQuerier interface { DeleteOAuth2ProviderAppCodesByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppCodesByAppAndUserIDParams) error DeleteOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) error DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppTokensByAppAndUserIDParams) error + // Delete all notification messages which have not been updated for over a week. + DeleteOldNotificationMessages(ctx context.Context) error // Delete provisioner daemons that have been created at least a week ago // and have not connected to coderd since a week. // A provisioner daemon with "zeroed" last_seen_at column indicates possible @@ -75,6 +91,7 @@ type sqlcQuerier interface { DeleteOldWorkspaceAgentLogs(ctx context.Context) error DeleteOldWorkspaceAgentStats(ctx context.Context) error DeleteOrganization(ctx context.Context, id uuid.UUID) error + DeleteOrganizationMember(ctx context.Context, arg DeleteOrganizationMemberParams) error DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error DeleteTailnetAgent(ctx context.Context, arg DeleteTailnetAgentParams) (DeleteTailnetAgentRow, error) DeleteTailnetClient(ctx context.Context, arg DeleteTailnetClientParams) (DeleteTailnetClientRow, error) @@ -83,7 +100,10 @@ type sqlcQuerier interface { DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetTunnelParams) (DeleteTailnetTunnelRow, error) DeleteWorkspaceAgentPortShare(ctx context.Context, arg DeleteWorkspaceAgentPortShareParams) error DeleteWorkspaceAgentPortSharesByTemplate(ctx context.Context, templateID uuid.UUID) error + EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) (NotificationMessage, error) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error + // This is used to build up the notification_message's JSON payload. + FetchNewMessageMetadata(ctx context.Context, arg FetchNewMessageMetadataParams) (FetchNewMessageMetadataRow, error) GetAPIKeyByID(ctx context.Context, id string) (APIKey, error) // there is no unique constraint on empty token names GetAPIKeyByName(ctx context.Context, arg GetAPIKeyByNameParams) (APIKey, error) @@ -123,9 +143,11 @@ type sqlcQuerier interface { GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error) GetGroupByID(ctx context.Context, id uuid.UUID) (Group, error) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrgAndNameParams) (Group, error) + GetGroupMembers(ctx context.Context) ([]GroupMember, error) // If the group is a user made group, then we need to check the group_members table. // If it is the "Everyone" group, then we need to check the organization_members table. - GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([]User, error) + GetGroupMembersByGroupID(ctx context.Context, groupID uuid.UUID) ([]User, error) + GetGroups(ctx context.Context) ([]Group, error) GetGroupsByOrganizationAndUserID(ctx context.Context, arg GetGroupsByOrganizationAndUserIDParams) ([]Group, error) GetGroupsByOrganizationID(ctx context.Context, organizationID uuid.UUID) ([]Group, error) GetHealthSettings(ctx context.Context) (string, error) @@ -151,8 +173,6 @@ type sqlcQuerier interface { GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error) GetOrganizationByName(ctx context.Context, name string) (Organization, error) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uuid.UUID) ([]GetOrganizationIDsByMemberIDsRow, error) - GetOrganizationMemberByUserID(ctx context.Context, arg GetOrganizationMemberByUserIDParams) (OrganizationMember, error) - GetOrganizationMembershipsByUserID(ctx context.Context, userID uuid.UUID) ([]OrganizationMember, error) GetOrganizations(ctx context.Context) ([]Organization, error) GetOrganizationsByUserID(ctx context.Context, userID uuid.UUID) ([]Organization, error) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]ParameterSchema, error) @@ -349,6 +369,11 @@ type sqlcQuerier interface { InsertWorkspaceResource(ctx context.Context, arg InsertWorkspaceResourceParams) (WorkspaceResource, error) InsertWorkspaceResourceMetadata(ctx context.Context, arg InsertWorkspaceResourceMetadataParams) ([]WorkspaceResourceMetadatum, error) ListWorkspaceAgentPortShares(ctx context.Context, workspaceID uuid.UUID) ([]WorkspaceAgentPortShare, error) + // Arguments are optional with uuid.Nil to ignore. + // - Use just 'organization_id' to get all members of an org + // - Use just 'user_id' to get all orgs a user is a member of + // - Use both to get a specific org member row + OrganizationMembers(ctx context.Context, arg OrganizationMembersParams) ([]OrganizationMembersRow, error) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx context.Context, templateID uuid.UUID) error RegisterWorkspaceProxy(ctx context.Context, arg RegisterWorkspaceProxyParams) (WorkspaceProxy, error) RemoveUserFromAllGroups(ctx context.Context, userID uuid.UUID) error diff --git a/coderd/database/querier_test.go b/coderd/database/querier_test.go index c3e1f2e46b3db..544f7e55ed2c5 100644 --- a/coderd/database/querier_test.go +++ b/coderd/database/querier_test.go @@ -6,6 +6,7 @@ import ( "context" "database/sql" "encoding/json" + "fmt" "sort" "testing" "time" @@ -14,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/migrations" @@ -514,6 +516,257 @@ func TestDefaultOrg(t *testing.T) { require.True(t, all[0].IsDefault, "first org should always be default") } +func TestAuditLogDefaultLimit(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + + sqlDB := testSQLDB(t) + err := migrations.Up(sqlDB) + require.NoError(t, err) + db := database.New(sqlDB) + + for i := 0; i < 110; i++ { + dbgen.AuditLog(t, db, database.AuditLog{}) + } + + ctx := testutil.Context(t, testutil.WaitShort) + rows, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{}) + require.NoError(t, err) + // The length should match the default limit of the SQL query. + // Updating the sql query requires changing the number below to match. + require.Len(t, rows, 100) +} + +// TestReadCustomRoles tests the input params returns the correct set of roles. +func TestReadCustomRoles(t *testing.T) { + t.Parallel() + + if testing.Short() { + t.SkipNow() + } + + sqlDB := testSQLDB(t) + err := migrations.Up(sqlDB) + require.NoError(t, err) + + db := database.New(sqlDB) + ctx := testutil.Context(t, testutil.WaitLong) + + // Make a few site roles, and a few org roles + orgIDs := make([]uuid.UUID, 3) + for i := range orgIDs { + orgIDs[i] = uuid.New() + } + + allRoles := make([]database.CustomRole, 0) + siteRoles := make([]database.CustomRole, 0) + orgRoles := make([]database.CustomRole, 0) + for i := 0; i < 15; i++ { + orgID := uuid.NullUUID{ + UUID: orgIDs[i%len(orgIDs)], + Valid: true, + } + if i%4 == 0 { + // Some should be site wide + orgID = uuid.NullUUID{} + } + + role, err := db.UpsertCustomRole(ctx, database.UpsertCustomRoleParams{ + Name: fmt.Sprintf("role-%d", i), + OrganizationID: orgID, + }) + require.NoError(t, err) + allRoles = append(allRoles, role) + if orgID.Valid { + orgRoles = append(orgRoles, role) + } else { + siteRoles = append(siteRoles, role) + } + } + + // normalizedRoleName allows for the simple ElementsMatch to work properly. + normalizedRoleName := func(role database.CustomRole) string { + return role.Name + ":" + role.OrganizationID.UUID.String() + } + + roleToLookup := func(role database.CustomRole) database.NameOrganizationPair { + return database.NameOrganizationPair{ + Name: role.Name, + OrganizationID: role.OrganizationID.UUID, + } + } + + testCases := []struct { + Name string + Params database.CustomRolesParams + Match func(role database.CustomRole) bool + }{ + { + Name: "NilRoles", + Params: database.CustomRolesParams{ + LookupRoles: nil, + ExcludeOrgRoles: false, + OrganizationID: uuid.UUID{}, + }, + Match: func(role database.CustomRole) bool { + return true + }, + }, + { + // Empty params should return all roles + Name: "Empty", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{}, + ExcludeOrgRoles: false, + OrganizationID: uuid.UUID{}, + }, + Match: func(role database.CustomRole) bool { + return true + }, + }, + { + Name: "Organization", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{}, + ExcludeOrgRoles: false, + OrganizationID: orgIDs[1], + }, + Match: func(role database.CustomRole) bool { + return role.OrganizationID.UUID == orgIDs[1] + }, + }, + { + Name: "SpecificOrgRole", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: orgRoles[0].Name, + OrganizationID: orgRoles[0].OrganizationID.UUID, + }, + }, + }, + Match: func(role database.CustomRole) bool { + return role.Name == orgRoles[0].Name && role.OrganizationID.UUID == orgRoles[0].OrganizationID.UUID + }, + }, + { + Name: "SpecificSiteRole", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: siteRoles[0].Name, + OrganizationID: siteRoles[0].OrganizationID.UUID, + }, + }, + }, + Match: func(role database.CustomRole) bool { + return role.Name == siteRoles[0].Name && role.OrganizationID.UUID == siteRoles[0].OrganizationID.UUID + }, + }, + { + Name: "FewSpecificRoles", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: orgRoles[0].Name, + OrganizationID: orgRoles[0].OrganizationID.UUID, + }, + { + Name: orgRoles[1].Name, + OrganizationID: orgRoles[1].OrganizationID.UUID, + }, + { + Name: siteRoles[0].Name, + OrganizationID: siteRoles[0].OrganizationID.UUID, + }, + }, + }, + Match: func(role database.CustomRole) bool { + return (role.Name == orgRoles[0].Name && role.OrganizationID.UUID == orgRoles[0].OrganizationID.UUID) || + (role.Name == orgRoles[1].Name && role.OrganizationID.UUID == orgRoles[1].OrganizationID.UUID) || + (role.Name == siteRoles[0].Name && role.OrganizationID.UUID == siteRoles[0].OrganizationID.UUID) + }, + }, + { + Name: "AllRolesByLookup", + Params: database.CustomRolesParams{ + LookupRoles: db2sdk.List(allRoles, roleToLookup), + }, + Match: func(role database.CustomRole) bool { + return true + }, + }, + { + Name: "NotExists", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: "not-exists", + OrganizationID: uuid.New(), + }, + { + Name: "not-exists", + OrganizationID: uuid.Nil, + }, + }, + }, + Match: func(role database.CustomRole) bool { + return false + }, + }, + { + Name: "Mixed", + Params: database.CustomRolesParams{ + LookupRoles: []database.NameOrganizationPair{ + { + Name: "not-exists", + OrganizationID: uuid.New(), + }, + { + Name: "not-exists", + OrganizationID: uuid.Nil, + }, + { + Name: orgRoles[0].Name, + OrganizationID: orgRoles[0].OrganizationID.UUID, + }, + { + Name: siteRoles[0].Name, + }, + }, + }, + Match: func(role database.CustomRole) bool { + return (role.Name == orgRoles[0].Name && role.OrganizationID.UUID == orgRoles[0].OrganizationID.UUID) || + (role.Name == siteRoles[0].Name && role.OrganizationID.UUID == siteRoles[0].OrganizationID.UUID) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + + ctx := testutil.Context(t, testutil.WaitLong) + found, err := db.CustomRoles(ctx, tc.Params) + require.NoError(t, err) + filtered := make([]database.CustomRole, 0) + for _, role := range allRoles { + if tc.Match(role) { + filtered = append(filtered, role) + } + } + + a := db2sdk.List(filtered, normalizedRoleName) + b := db2sdk.List(found, normalizedRoleName) + require.Equal(t, a, b) + }) + } +} + type tvArgs struct { Status database.ProvisionerJobStatus // CreateWorkspace is true if we should create a workspace for the template version @@ -673,6 +926,42 @@ func TestArchiveVersions(t *testing.T) { }) } +func TestExpectOne(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + + t.Run("ErrNoRows", func(t *testing.T) { + t.Parallel() + sqlDB := testSQLDB(t) + err := migrations.Up(sqlDB) + require.NoError(t, err) + db := database.New(sqlDB) + ctx := context.Background() + + _, err = database.ExpectOne(db.GetUsers(ctx, database.GetUsersParams{})) + require.ErrorIs(t, err, sql.ErrNoRows) + }) + + t.Run("TooMany", func(t *testing.T) { + t.Parallel() + sqlDB := testSQLDB(t) + err := migrations.Up(sqlDB) + require.NoError(t, err) + db := database.New(sqlDB) + ctx := context.Background() + + // Create 2 organizations so the query returns >1 + dbgen.Organization(t, db, database.Organization{}) + dbgen.Organization(t, db, database.Organization{}) + + // Organizations is an easy table without foreign key dependencies + _, err = database.ExpectOne(db.GetOrganizations(ctx)) + require.ErrorContains(t, err, "too many rows returned") + }) +} + func requireUsersMatch(t testing.TB, expected []database.User, found []database.GetUsersRow, msg string) { t.Helper() require.ElementsMatch(t, expected, database.ConvertUserRows(found), msg) diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index 56fcfaf998e4f..55db74634c740 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -444,12 +444,21 @@ func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDP const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many SELECT audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon, + -- sqlc.embed(users) would be nice but it does not seem to play well with + -- left joins. users.username AS user_username, + users.name AS user_name, users.email AS user_email, users.created_at AS user_created_at, + users.updated_at AS user_updated_at, + users.last_seen_at AS user_last_seen_at, users.status AS user_status, + users.login_type AS user_login_type, users.rbac_roles AS user_roles, users.avatar_url AS user_avatar_url, + users.deleted AS user_deleted, + users.theme_preference AS user_theme_preference, + users.quiet_hours_schedule AS user_quiet_hours_schedule, COUNT(audit_logs.*) OVER () AS count FROM audit_logs @@ -481,77 +490,85 @@ FROM WHERE -- Filter resource_type CASE - WHEN $3 :: text != '' THEN - resource_type = $3 :: resource_type + WHEN $1 :: text != '' THEN + resource_type = $1 :: resource_type ELSE true END -- Filter resource_id AND CASE - WHEN $4 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN - resource_id = $4 + WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + resource_id = $2 + ELSE true + END + -- Filter organization_id + AND CASE + WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + audit_logs.organization_id = $3 ELSE true END -- Filter by resource_target AND CASE - WHEN $5 :: text != '' THEN - resource_target = $5 + WHEN $4 :: text != '' THEN + resource_target = $4 ELSE true END -- Filter action AND CASE - WHEN $6 :: text != '' THEN - action = $6 :: audit_action + WHEN $5 :: text != '' THEN + action = $5 :: audit_action ELSE true END -- Filter by user_id AND CASE - WHEN $7 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN - user_id = $7 + WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = $6 ELSE true END -- Filter by username AND CASE - WHEN $8 :: text != '' THEN - user_id = (SELECT id FROM users WHERE lower(username) = lower($8) AND deleted = false) + WHEN $7 :: text != '' THEN + user_id = (SELECT id FROM users WHERE lower(username) = lower($7) AND deleted = false) ELSE true END -- Filter by user_email AND CASE - WHEN $9 :: text != '' THEN - users.email = $9 + WHEN $8 :: text != '' THEN + users.email = $8 ELSE true END -- Filter by date_from AND CASE - WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN - "time" >= $10 + WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + "time" >= $9 ELSE true END -- Filter by date_to AND CASE - WHEN $11 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN - "time" <= $11 + WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN + "time" <= $10 ELSE true END -- Filter by build_reason AND CASE - WHEN $12::text != '' THEN - workspace_builds.reason::text = $12 + WHEN $11::text != '' THEN + workspace_builds.reason::text = $11 ELSE true END ORDER BY "time" DESC LIMIT - $1 + -- a limit of 0 means "no limit". The audit log table is unbounded + -- in size, and is expected to be quite large. Implement a default + -- limit of 100 to prevent accidental excessively large queries. + COALESCE(NULLIF($13 :: int, 0), 100) OFFSET - $2 + $12 ` type GetAuditLogsOffsetParams struct { - Limit int32 `db:"limit" json:"limit"` - Offset int32 `db:"offset" json:"offset"` ResourceType string `db:"resource_type" json:"resource_type"` ResourceID uuid.UUID `db:"resource_id" json:"resource_id"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` ResourceTarget string `db:"resource_target" json:"resource_target"` Action string `db:"action" json:"action"` UserID uuid.UUID `db:"user_id" json:"user_id"` @@ -560,41 +577,49 @@ type GetAuditLogsOffsetParams struct { DateFrom time.Time `db:"date_from" json:"date_from"` DateTo time.Time `db:"date_to" json:"date_to"` BuildReason string `db:"build_reason" json:"build_reason"` + OffsetOpt int32 `db:"offset_opt" json:"offset_opt"` + LimitOpt int32 `db:"limit_opt" json:"limit_opt"` } type GetAuditLogsOffsetRow struct { - ID uuid.UUID `db:"id" json:"id"` - Time time.Time `db:"time" json:"time"` - UserID uuid.UUID `db:"user_id" json:"user_id"` - OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` - Ip pqtype.Inet `db:"ip" json:"ip"` - UserAgent sql.NullString `db:"user_agent" json:"user_agent"` - ResourceType ResourceType `db:"resource_type" json:"resource_type"` - ResourceID uuid.UUID `db:"resource_id" json:"resource_id"` - ResourceTarget string `db:"resource_target" json:"resource_target"` - Action AuditAction `db:"action" json:"action"` - Diff json.RawMessage `db:"diff" json:"diff"` - StatusCode int32 `db:"status_code" json:"status_code"` - AdditionalFields json.RawMessage `db:"additional_fields" json:"additional_fields"` - RequestID uuid.UUID `db:"request_id" json:"request_id"` - ResourceIcon string `db:"resource_icon" json:"resource_icon"` - UserUsername sql.NullString `db:"user_username" json:"user_username"` - UserEmail sql.NullString `db:"user_email" json:"user_email"` - UserCreatedAt sql.NullTime `db:"user_created_at" json:"user_created_at"` - UserStatus NullUserStatus `db:"user_status" json:"user_status"` - UserRoles pq.StringArray `db:"user_roles" json:"user_roles"` - UserAvatarUrl sql.NullString `db:"user_avatar_url" json:"user_avatar_url"` - Count int64 `db:"count" json:"count"` + ID uuid.UUID `db:"id" json:"id"` + Time time.Time `db:"time" json:"time"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + Ip pqtype.Inet `db:"ip" json:"ip"` + UserAgent sql.NullString `db:"user_agent" json:"user_agent"` + ResourceType ResourceType `db:"resource_type" json:"resource_type"` + ResourceID uuid.UUID `db:"resource_id" json:"resource_id"` + ResourceTarget string `db:"resource_target" json:"resource_target"` + Action AuditAction `db:"action" json:"action"` + Diff json.RawMessage `db:"diff" json:"diff"` + StatusCode int32 `db:"status_code" json:"status_code"` + AdditionalFields json.RawMessage `db:"additional_fields" json:"additional_fields"` + RequestID uuid.UUID `db:"request_id" json:"request_id"` + ResourceIcon string `db:"resource_icon" json:"resource_icon"` + UserUsername sql.NullString `db:"user_username" json:"user_username"` + UserName sql.NullString `db:"user_name" json:"user_name"` + UserEmail sql.NullString `db:"user_email" json:"user_email"` + UserCreatedAt sql.NullTime `db:"user_created_at" json:"user_created_at"` + UserUpdatedAt sql.NullTime `db:"user_updated_at" json:"user_updated_at"` + UserLastSeenAt sql.NullTime `db:"user_last_seen_at" json:"user_last_seen_at"` + UserStatus NullUserStatus `db:"user_status" json:"user_status"` + UserLoginType NullLoginType `db:"user_login_type" json:"user_login_type"` + UserRoles pq.StringArray `db:"user_roles" json:"user_roles"` + UserAvatarUrl sql.NullString `db:"user_avatar_url" json:"user_avatar_url"` + UserDeleted sql.NullBool `db:"user_deleted" json:"user_deleted"` + UserThemePreference sql.NullString `db:"user_theme_preference" json:"user_theme_preference"` + UserQuietHoursSchedule sql.NullString `db:"user_quiet_hours_schedule" json:"user_quiet_hours_schedule"` + Count int64 `db:"count" json:"count"` } // GetAuditLogsBefore retrieves `row_limit` number of audit logs before the provided // ID. func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams) ([]GetAuditLogsOffsetRow, error) { rows, err := q.db.QueryContext(ctx, getAuditLogsOffset, - arg.Limit, - arg.Offset, arg.ResourceType, arg.ResourceID, + arg.OrganizationID, arg.ResourceTarget, arg.Action, arg.UserID, @@ -603,6 +628,8 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff arg.DateFrom, arg.DateTo, arg.BuildReason, + arg.OffsetOpt, + arg.LimitOpt, ) if err != nil { return nil, err @@ -628,11 +655,18 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff &i.RequestID, &i.ResourceIcon, &i.UserUsername, + &i.UserName, &i.UserEmail, &i.UserCreatedAt, + &i.UserUpdatedAt, + &i.UserLastSeenAt, &i.UserStatus, + &i.UserLoginType, &i.UserRoles, &i.UserAvatarUrl, + &i.UserDeleted, + &i.UserThemePreference, + &i.UserQuietHoursSchedule, &i.Count, ); err != nil { return nil, err @@ -1289,6 +1323,33 @@ func (q *sqlQuerier) DeleteGroupMemberFromGroup(ctx context.Context, arg DeleteG } const getGroupMembers = `-- name: GetGroupMembers :many +SELECT user_id, group_id FROM group_members +` + +func (q *sqlQuerier) GetGroupMembers(ctx context.Context) ([]GroupMember, error) { + rows, err := q.db.QueryContext(ctx, getGroupMembers) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GroupMember + for rows.Next() { + var i GroupMember + if err := rows.Scan(&i.UserID, &i.GroupID); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getGroupMembersByGroupID = `-- name: GetGroupMembersByGroupID :many SELECT users.id, users.email, users.username, users.hashed_password, users.created_at, users.updated_at, users.status, users.rbac_roles, users.login_type, users.avatar_url, users.deleted, users.last_seen_at, users.quiet_hours_schedule, users.theme_preference, users.name FROM @@ -1314,8 +1375,8 @@ AND // If the group is a user made group, then we need to check the group_members table. // If it is the "Everyone" group, then we need to check the organization_members table. -func (q *sqlQuerier) GetGroupMembers(ctx context.Context, groupID uuid.UUID) ([]User, error) { - rows, err := q.db.QueryContext(ctx, getGroupMembers, groupID) +func (q *sqlQuerier) GetGroupMembersByGroupID(ctx context.Context, groupID uuid.UUID) ([]User, error) { + rows, err := q.db.QueryContext(ctx, getGroupMembersByGroupID, groupID) if err != nil { return nil, err } @@ -1484,6 +1545,41 @@ func (q *sqlQuerier) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrg return i, err } +const getGroups = `-- name: GetGroups :many +SELECT id, name, organization_id, avatar_url, quota_allowance, display_name, source FROM groups +` + +func (q *sqlQuerier) GetGroups(ctx context.Context) ([]Group, error) { + rows, err := q.db.QueryContext(ctx, getGroups) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Group + for rows.Next() { + var i Group + if err := rows.Scan( + &i.ID, + &i.Name, + &i.OrganizationID, + &i.AvatarURL, + &i.QuotaAllowance, + &i.DisplayName, + &i.Source, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const getGroupsByOrganizationAndUserID = `-- name: GetGroupsByOrganizationAndUserID :many SELECT groups.id, groups.name, groups.organization_id, groups.avatar_url, groups.quota_allowance, groups.display_name, groups.source @@ -3192,6 +3288,297 @@ func (q *sqlQuerier) TryAcquireLock(ctx context.Context, pgTryAdvisoryXactLock i return pg_try_advisory_xact_lock, err } +const acquireNotificationMessages = `-- name: AcquireNotificationMessages :many +WITH acquired AS ( + UPDATE + notification_messages + SET updated_at = NOW(), + status = 'leased'::notification_message_status, + status_reason = 'Leased by notifier ' || $1::uuid, + leased_until = NOW() + CONCAT($2::int, ' seconds')::interval + WHERE id IN (SELECT nm.id + FROM notification_messages AS nm + WHERE ( + ( + -- message is in acquirable states + nm.status IN ( + 'pending'::notification_message_status, + 'temporary_failure'::notification_message_status + ) + ) + -- or somehow the message was left in leased for longer than its lease period + OR ( + nm.status = 'leased'::notification_message_status + AND nm.leased_until < NOW() + ) + ) + AND ( + -- exclude all messages which have exceeded the max attempts; these will be purged later + nm.attempt_count IS NULL OR nm.attempt_count < $3::int + ) + -- if set, do not retry until we've exceeded the wait time + AND ( + CASE + WHEN nm.next_retry_after IS NOT NULL THEN nm.next_retry_after < NOW() + ELSE true + END + ) + ORDER BY nm.created_at ASC + -- Ensure that multiple concurrent readers cannot retrieve the same rows + FOR UPDATE OF nm + SKIP LOCKED + LIMIT $4) + RETURNING id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after) +SELECT + -- message + nm.id, + nm.payload, + nm.method, + nm.created_by, + -- template + nt.title_template, + nt.body_template +FROM acquired nm + JOIN notification_templates nt ON nm.notification_template_id = nt.id +` + +type AcquireNotificationMessagesParams struct { + NotifierID uuid.UUID `db:"notifier_id" json:"notifier_id"` + LeaseSeconds int32 `db:"lease_seconds" json:"lease_seconds"` + MaxAttemptCount int32 `db:"max_attempt_count" json:"max_attempt_count"` + Count int32 `db:"count" json:"count"` +} + +type AcquireNotificationMessagesRow struct { + ID uuid.UUID `db:"id" json:"id"` + Payload json.RawMessage `db:"payload" json:"payload"` + Method NotificationMethod `db:"method" json:"method"` + CreatedBy string `db:"created_by" json:"created_by"` + TitleTemplate string `db:"title_template" json:"title_template"` + BodyTemplate string `db:"body_template" json:"body_template"` +} + +// Acquires the lease for a given count of notification messages, to enable concurrent dequeuing and subsequent sending. +// Only rows that aren't already leased (or ones which are leased but have exceeded their lease period) are returned. +// +// A "lease" here refers to a notifier taking ownership of a notification_messages row. A lease survives for the duration +// of CODER_NOTIFICATIONS_LEASE_PERIOD. Once a message is delivered, its status is updated and the lease expires (set to NULL). +// If a message exceeds its lease, that implies the notifier did not shutdown cleanly, or the table update failed somehow, +// and the row will then be eligible to be dequeued by another notifier. +// +// SKIP LOCKED is used to jump over locked rows. This prevents multiple notifiers from acquiring the same messages. +// See: https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE +func (q *sqlQuerier) AcquireNotificationMessages(ctx context.Context, arg AcquireNotificationMessagesParams) ([]AcquireNotificationMessagesRow, error) { + rows, err := q.db.QueryContext(ctx, acquireNotificationMessages, + arg.NotifierID, + arg.LeaseSeconds, + arg.MaxAttemptCount, + arg.Count, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []AcquireNotificationMessagesRow + for rows.Next() { + var i AcquireNotificationMessagesRow + if err := rows.Scan( + &i.ID, + &i.Payload, + &i.Method, + &i.CreatedBy, + &i.TitleTemplate, + &i.BodyTemplate, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const bulkMarkNotificationMessagesFailed = `-- name: BulkMarkNotificationMessagesFailed :execrows +UPDATE notification_messages +SET updated_at = subquery.failed_at, + attempt_count = attempt_count + 1, + status = CASE + WHEN attempt_count + 1 < $1::int THEN subquery.status + ELSE 'permanent_failure'::notification_message_status END, + status_reason = subquery.status_reason, + leased_until = NULL, + next_retry_after = CASE + WHEN (attempt_count + 1 < $1::int) + THEN NOW() + CONCAT($2::int, ' seconds')::interval END +FROM (SELECT UNNEST($3::uuid[]) AS id, + UNNEST($4::timestamptz[]) AS failed_at, + UNNEST($5::notification_message_status[]) AS status, + UNNEST($6::text[]) AS status_reason) AS subquery +WHERE notification_messages.id = subquery.id +` + +type BulkMarkNotificationMessagesFailedParams struct { + MaxAttempts int32 `db:"max_attempts" json:"max_attempts"` + RetryInterval int32 `db:"retry_interval" json:"retry_interval"` + IDs []uuid.UUID `db:"ids" json:"ids"` + FailedAts []time.Time `db:"failed_ats" json:"failed_ats"` + Statuses []NotificationMessageStatus `db:"statuses" json:"statuses"` + StatusReasons []string `db:"status_reasons" json:"status_reasons"` +} + +func (q *sqlQuerier) BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error) { + result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesFailed, + arg.MaxAttempts, + arg.RetryInterval, + pq.Array(arg.IDs), + pq.Array(arg.FailedAts), + pq.Array(arg.Statuses), + pq.Array(arg.StatusReasons), + ) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +const bulkMarkNotificationMessagesSent = `-- name: BulkMarkNotificationMessagesSent :execrows +UPDATE notification_messages +SET updated_at = new_values.sent_at, + attempt_count = attempt_count + 1, + status = 'sent'::notification_message_status, + status_reason = NULL, + leased_until = NULL, + next_retry_after = NULL +FROM (SELECT UNNEST($1::uuid[]) AS id, + UNNEST($2::timestamptz[]) AS sent_at) + AS new_values +WHERE notification_messages.id = new_values.id +` + +type BulkMarkNotificationMessagesSentParams struct { + IDs []uuid.UUID `db:"ids" json:"ids"` + SentAts []time.Time `db:"sent_ats" json:"sent_ats"` +} + +func (q *sqlQuerier) BulkMarkNotificationMessagesSent(ctx context.Context, arg BulkMarkNotificationMessagesSentParams) (int64, error) { + result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesSent, pq.Array(arg.IDs), pq.Array(arg.SentAts)) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +const deleteOldNotificationMessages = `-- name: DeleteOldNotificationMessages :exec +DELETE +FROM notification_messages +WHERE id IN + (SELECT id + FROM notification_messages AS nested + WHERE nested.updated_at < NOW() - INTERVAL '7 days') +` + +// Delete all notification messages which have not been updated for over a week. +func (q *sqlQuerier) DeleteOldNotificationMessages(ctx context.Context) error { + _, err := q.db.ExecContext(ctx, deleteOldNotificationMessages) + return err +} + +const enqueueNotificationMessage = `-- name: EnqueueNotificationMessage :one +INSERT INTO notification_messages (id, notification_template_id, user_id, method, payload, targets, created_by) +VALUES ($1, + $2, + $3, + $4::notification_method, + $5::jsonb, + $6, + $7) +RETURNING id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after +` + +type EnqueueNotificationMessageParams struct { + ID uuid.UUID `db:"id" json:"id"` + NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + Method NotificationMethod `db:"method" json:"method"` + Payload json.RawMessage `db:"payload" json:"payload"` + Targets []uuid.UUID `db:"targets" json:"targets"` + CreatedBy string `db:"created_by" json:"created_by"` +} + +func (q *sqlQuerier) EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) (NotificationMessage, error) { + row := q.db.QueryRowContext(ctx, enqueueNotificationMessage, + arg.ID, + arg.NotificationTemplateID, + arg.UserID, + arg.Method, + arg.Payload, + pq.Array(arg.Targets), + arg.CreatedBy, + ) + var i NotificationMessage + err := row.Scan( + &i.ID, + &i.NotificationTemplateID, + &i.UserID, + &i.Method, + &i.Status, + &i.StatusReason, + &i.CreatedBy, + &i.Payload, + &i.AttemptCount, + pq.Array(&i.Targets), + &i.CreatedAt, + &i.UpdatedAt, + &i.LeasedUntil, + &i.NextRetryAfter, + ) + return i, err +} + +const fetchNewMessageMetadata = `-- name: FetchNewMessageMetadata :one +SELECT nt.name AS notification_name, + nt.actions AS actions, + u.id AS user_id, + u.email AS user_email, + COALESCE(NULLIF(u.name, ''), NULLIF(u.username, ''))::text AS user_name +FROM notification_templates nt, + users u +WHERE nt.id = $1 + AND u.id = $2 +` + +type FetchNewMessageMetadataParams struct { + NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` +} + +type FetchNewMessageMetadataRow struct { + NotificationName string `db:"notification_name" json:"notification_name"` + Actions []byte `db:"actions" json:"actions"` + UserID uuid.UUID `db:"user_id" json:"user_id"` + UserEmail string `db:"user_email" json:"user_email"` + UserName string `db:"user_name" json:"user_name"` +} + +// This is used to build up the notification_message's JSON payload. +func (q *sqlQuerier) FetchNewMessageMetadata(ctx context.Context, arg FetchNewMessageMetadataParams) (FetchNewMessageMetadataRow, error) { + row := q.db.QueryRowContext(ctx, fetchNewMessageMetadata, arg.NotificationTemplateID, arg.UserID) + var i FetchNewMessageMetadataRow + err := row.Scan( + &i.NotificationName, + &i.Actions, + &i.UserID, + &i.UserEmail, + &i.UserName, + ) + return i, err +} + const deleteOAuth2ProviderAppByID = `-- name: DeleteOAuth2ProviderAppByID :exec DELETE FROM oauth2_provider_apps WHERE id = $1 ` @@ -3756,6 +4143,25 @@ func (q *sqlQuerier) UpdateOAuth2ProviderAppSecretByID(ctx context.Context, arg return i, err } +const deleteOrganizationMember = `-- name: DeleteOrganizationMember :exec +DELETE + FROM + organization_members + WHERE + organization_id = $1 AND + user_id = $2 +` + +type DeleteOrganizationMemberParams struct { + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` +} + +func (q *sqlQuerier) DeleteOrganizationMember(ctx context.Context, arg DeleteOrganizationMemberParams) error { + _, err := q.db.ExecContext(ctx, deleteOrganizationMember, arg.OrganizationID, arg.UserID) + return err +} + const getOrganizationIDsByMemberIDs = `-- name: GetOrganizationIDsByMemberIDs :many SELECT user_id, array_agg(organization_id) :: uuid [ ] AS "organization_IDs" @@ -3795,25 +4201,35 @@ func (q *sqlQuerier) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uu return items, nil } -const getOrganizationMemberByUserID = `-- name: GetOrganizationMemberByUserID :one -SELECT - user_id, organization_id, created_at, updated_at, roles -FROM - organization_members -WHERE - organization_id = $1 - AND user_id = $2 -LIMIT - 1 +const insertOrganizationMember = `-- name: InsertOrganizationMember :one +INSERT INTO + organization_members ( + organization_id, + user_id, + created_at, + updated_at, + roles + ) +VALUES + ($1, $2, $3, $4, $5) RETURNING user_id, organization_id, created_at, updated_at, roles ` -type GetOrganizationMemberByUserIDParams struct { +type InsertOrganizationMemberParams struct { OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` UserID uuid.UUID `db:"user_id" json:"user_id"` + CreatedAt time.Time `db:"created_at" json:"created_at"` + UpdatedAt time.Time `db:"updated_at" json:"updated_at"` + Roles []string `db:"roles" json:"roles"` } -func (q *sqlQuerier) GetOrganizationMemberByUserID(ctx context.Context, arg GetOrganizationMemberByUserIDParams) (OrganizationMember, error) { - row := q.db.QueryRowContext(ctx, getOrganizationMemberByUserID, arg.OrganizationID, arg.UserID) +func (q *sqlQuerier) InsertOrganizationMember(ctx context.Context, arg InsertOrganizationMemberParams) (OrganizationMember, error) { + row := q.db.QueryRowContext(ctx, insertOrganizationMember, + arg.OrganizationID, + arg.UserID, + arg.CreatedAt, + arg.UpdatedAt, + pq.Array(arg.Roles), + ) var i OrganizationMember err := row.Scan( &i.UserID, @@ -3825,30 +4241,59 @@ func (q *sqlQuerier) GetOrganizationMemberByUserID(ctx context.Context, arg GetO return i, err } -const getOrganizationMembershipsByUserID = `-- name: GetOrganizationMembershipsByUserID :many +const organizationMembers = `-- name: OrganizationMembers :many SELECT - user_id, organization_id, created_at, updated_at, roles + organization_members.user_id, organization_members.organization_id, organization_members.created_at, organization_members.updated_at, organization_members.roles, + users.username FROM organization_members + INNER JOIN + users ON organization_members.user_id = users.id WHERE - user_id = $1 + -- Filter by organization id + CASE + WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + organization_id = $1 + ELSE true + END + -- Filter by user id + AND CASE + WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = $2 + ELSE true + END ` -func (q *sqlQuerier) GetOrganizationMembershipsByUserID(ctx context.Context, userID uuid.UUID) ([]OrganizationMember, error) { - rows, err := q.db.QueryContext(ctx, getOrganizationMembershipsByUserID, userID) +type OrganizationMembersParams struct { + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + UserID uuid.UUID `db:"user_id" json:"user_id"` +} + +type OrganizationMembersRow struct { + OrganizationMember OrganizationMember `db:"organization_member" json:"organization_member"` + Username string `db:"username" json:"username"` +} + +// Arguments are optional with uuid.Nil to ignore. +// - Use just 'organization_id' to get all members of an org +// - Use just 'user_id' to get all orgs a user is a member of +// - Use both to get a specific org member row +func (q *sqlQuerier) OrganizationMembers(ctx context.Context, arg OrganizationMembersParams) ([]OrganizationMembersRow, error) { + rows, err := q.db.QueryContext(ctx, organizationMembers, arg.OrganizationID, arg.UserID) if err != nil { return nil, err } defer rows.Close() - var items []OrganizationMember + var items []OrganizationMembersRow for rows.Next() { - var i OrganizationMember + var i OrganizationMembersRow if err := rows.Scan( - &i.UserID, - &i.OrganizationID, - &i.CreatedAt, - &i.UpdatedAt, - pq.Array(&i.Roles), + &i.OrganizationMember.UserID, + &i.OrganizationMember.OrganizationID, + &i.OrganizationMember.CreatedAt, + &i.OrganizationMember.UpdatedAt, + pq.Array(&i.OrganizationMember.Roles), + &i.Username, ); err != nil { return nil, err } @@ -3863,46 +4308,6 @@ func (q *sqlQuerier) GetOrganizationMembershipsByUserID(ctx context.Context, use return items, nil } -const insertOrganizationMember = `-- name: InsertOrganizationMember :one -INSERT INTO - organization_members ( - organization_id, - user_id, - created_at, - updated_at, - roles - ) -VALUES - ($1, $2, $3, $4, $5) RETURNING user_id, organization_id, created_at, updated_at, roles -` - -type InsertOrganizationMemberParams struct { - OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` - UserID uuid.UUID `db:"user_id" json:"user_id"` - CreatedAt time.Time `db:"created_at" json:"created_at"` - UpdatedAt time.Time `db:"updated_at" json:"updated_at"` - Roles []string `db:"roles" json:"roles"` -} - -func (q *sqlQuerier) InsertOrganizationMember(ctx context.Context, arg InsertOrganizationMemberParams) (OrganizationMember, error) { - row := q.db.QueryRowContext(ctx, insertOrganizationMember, - arg.OrganizationID, - arg.UserID, - arg.CreatedAt, - arg.UpdatedAt, - pq.Array(arg.Roles), - ) - var i OrganizationMember - err := row.Scan( - &i.UserID, - &i.OrganizationID, - &i.CreatedAt, - &i.UpdatedAt, - pq.Array(&i.Roles), - ) - return i, err -} - const updateMemberRoles = `-- name: UpdateMemberRoles :one UPDATE organization_members @@ -3949,7 +4354,7 @@ func (q *sqlQuerier) DeleteOrganization(ctx context.Context, id uuid.UUID) error const getDefaultOrganization = `-- name: GetDefaultOrganization :one SELECT - id, name, description, created_at, updated_at, is_default + id, name, description, created_at, updated_at, is_default, display_name, icon FROM organizations WHERE @@ -3968,13 +4373,15 @@ func (q *sqlQuerier) GetDefaultOrganization(ctx context.Context) (Organization, &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ) return i, err } const getOrganizationByID = `-- name: GetOrganizationByID :one SELECT - id, name, description, created_at, updated_at, is_default + id, name, description, created_at, updated_at, is_default, display_name, icon FROM organizations WHERE @@ -3991,13 +4398,15 @@ func (q *sqlQuerier) GetOrganizationByID(ctx context.Context, id uuid.UUID) (Org &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ) return i, err } const getOrganizationByName = `-- name: GetOrganizationByName :one SELECT - id, name, description, created_at, updated_at, is_default + id, name, description, created_at, updated_at, is_default, display_name, icon FROM organizations WHERE @@ -4016,13 +4425,15 @@ func (q *sqlQuerier) GetOrganizationByName(ctx context.Context, name string) (Or &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ) return i, err } const getOrganizations = `-- name: GetOrganizations :many SELECT - id, name, description, created_at, updated_at, is_default + id, name, description, created_at, updated_at, is_default, display_name, icon FROM organizations ` @@ -4043,6 +4454,8 @@ func (q *sqlQuerier) GetOrganizations(ctx context.Context) ([]Organization, erro &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ); err != nil { return nil, err } @@ -4059,7 +4472,7 @@ func (q *sqlQuerier) GetOrganizations(ctx context.Context) ([]Organization, erro const getOrganizationsByUserID = `-- name: GetOrganizationsByUserID :many SELECT - id, name, description, created_at, updated_at, is_default + id, name, description, created_at, updated_at, is_default, display_name, icon FROM organizations WHERE @@ -4089,6 +4502,8 @@ func (q *sqlQuerier) GetOrganizationsByUserID(ctx context.Context, userID uuid.U &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ); err != nil { return nil, err } @@ -4105,16 +4520,18 @@ func (q *sqlQuerier) GetOrganizationsByUserID(ctx context.Context, userID uuid.U const insertOrganization = `-- name: InsertOrganization :one INSERT INTO - organizations (id, "name", description, created_at, updated_at, is_default) + organizations (id, "name", display_name, description, icon, created_at, updated_at, is_default) VALUES -- If no organizations exist, and this is the first, make it the default. - ($1, $2, $3, $4, $5, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING id, name, description, created_at, updated_at, is_default + ($1, $2, $3, $4, $5, $6, $7, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon ` type InsertOrganizationParams struct { ID uuid.UUID `db:"id" json:"id"` Name string `db:"name" json:"name"` + DisplayName string `db:"display_name" json:"display_name"` Description string `db:"description" json:"description"` + Icon string `db:"icon" json:"icon"` CreatedAt time.Time `db:"created_at" json:"created_at"` UpdatedAt time.Time `db:"updated_at" json:"updated_at"` } @@ -4123,7 +4540,9 @@ func (q *sqlQuerier) InsertOrganization(ctx context.Context, arg InsertOrganizat row := q.db.QueryRowContext(ctx, insertOrganization, arg.ID, arg.Name, + arg.DisplayName, arg.Description, + arg.Icon, arg.CreatedAt, arg.UpdatedAt, ) @@ -4135,6 +4554,8 @@ func (q *sqlQuerier) InsertOrganization(ctx context.Context, arg InsertOrganizat &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ) return i, err } @@ -4144,20 +4565,33 @@ UPDATE organizations SET updated_at = $1, - name = $2 + name = $2, + display_name = $3, + description = $4, + icon = $5 WHERE - id = $3 -RETURNING id, name, description, created_at, updated_at, is_default + id = $6 +RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon ` type UpdateOrganizationParams struct { - UpdatedAt time.Time `db:"updated_at" json:"updated_at"` - Name string `db:"name" json:"name"` - ID uuid.UUID `db:"id" json:"id"` + UpdatedAt time.Time `db:"updated_at" json:"updated_at"` + Name string `db:"name" json:"name"` + DisplayName string `db:"display_name" json:"display_name"` + Description string `db:"description" json:"description"` + Icon string `db:"icon" json:"icon"` + ID uuid.UUID `db:"id" json:"id"` } func (q *sqlQuerier) UpdateOrganization(ctx context.Context, arg UpdateOrganizationParams) (Organization, error) { - row := q.db.QueryRowContext(ctx, updateOrganization, arg.UpdatedAt, arg.Name, arg.ID) + row := q.db.QueryRowContext(ctx, updateOrganization, + arg.UpdatedAt, + arg.Name, + arg.DisplayName, + arg.Description, + arg.Icon, + arg.ID, + ) var i Organization err := row.Scan( &i.ID, @@ -4166,6 +4600,8 @@ func (q *sqlQuerier) UpdateOrganization(ctx context.Context, arg UpdateOrganizat &i.CreatedAt, &i.UpdatedAt, &i.IsDefault, + &i.DisplayName, + &i.Icon, ) return i, err } @@ -5599,25 +6035,25 @@ func (q *sqlQuerier) UpdateReplica(ctx context.Context, arg UpdateReplicaParams) const customRoles = `-- name: CustomRoles :many SELECT - name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id + name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id FROM custom_roles WHERE true - -- Lookup roles filter expects the role names to be in the rbac package - -- format. Eg: name[:] - AND CASE WHEN array_length($1 :: text[], 1) > 0 THEN - -- Case insensitive lookup with org_id appended (if non-null). - -- This will return just the name if org_id is null. It'll append - -- the org_id if not null - concat(name, NULLIF(concat(':', organization_id), ':')) ILIKE ANY($1 :: text []) + -- @lookup_roles will filter for exact (role_name, org_id) pairs + -- To do this manually in SQL, you can construct an array and cast it: + -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[]) + AND CASE WHEN array_length($1 :: name_organization_pair[], 1) > 0 THEN + -- Using 'coalesce' to avoid troubles with null literals being an empty string. + (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY ($1::name_organization_pair[]) ELSE true END - -- Org scoping filter, to only fetch site wide roles + -- This allows fetching all roles, or just site wide roles AND CASE WHEN $2 :: boolean THEN organization_id IS null ELSE true END + -- Allows fetching all roles to a particular organization AND CASE WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN organization_id = $3 ELSE true @@ -5625,9 +6061,9 @@ WHERE ` type CustomRolesParams struct { - LookupRoles []string `db:"lookup_roles" json:"lookup_roles"` - ExcludeOrgRoles bool `db:"exclude_org_roles" json:"exclude_org_roles"` - OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` + LookupRoles []NameOrganizationPair `db:"lookup_roles" json:"lookup_roles"` + ExcludeOrgRoles bool `db:"exclude_org_roles" json:"exclude_org_roles"` + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` } func (q *sqlQuerier) CustomRoles(ctx context.Context, arg CustomRolesParams) ([]CustomRole, error) { @@ -5648,6 +6084,7 @@ func (q *sqlQuerier) CustomRoles(ctx context.Context, arg CustomRolesParams) ([] &i.CreatedAt, &i.UpdatedAt, &i.OrganizationID, + &i.ID, ); err != nil { return nil, err } @@ -5692,16 +6129,16 @@ ON CONFLICT (name) org_permissions = $5, user_permissions = $6, updated_at = now() -RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id +RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id ` type UpsertCustomRoleParams struct { - Name string `db:"name" json:"name"` - DisplayName string `db:"display_name" json:"display_name"` - OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"` - SitePermissions json.RawMessage `db:"site_permissions" json:"site_permissions"` - OrgPermissions json.RawMessage `db:"org_permissions" json:"org_permissions"` - UserPermissions json.RawMessage `db:"user_permissions" json:"user_permissions"` + Name string `db:"name" json:"name"` + DisplayName string `db:"display_name" json:"display_name"` + OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"` + SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"` + OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"` + UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"` } func (q *sqlQuerier) UpsertCustomRole(ctx context.Context, arg UpsertCustomRoleParams) (CustomRole, error) { @@ -5723,6 +6160,7 @@ func (q *sqlQuerier) UpsertCustomRole(ctx context.Context, arg UpsertCustomRoleP &i.CreatedAt, &i.UpdatedAt, &i.OrganizationID, + &i.ID, ) return i, err } @@ -8432,12 +8870,14 @@ SELECT array_append(users.rbac_roles, 'member'), ( SELECT - array_agg(org_roles) + -- The roles are returned as a flat array, org scoped and site side. + -- Concatenating the organization id scopes the organization roles. + array_agg(org_roles || ':' || organization_members.organization_id::text) FROM organization_members, - -- All org_members get the org-member role for their orgs + -- All org_members get the organization-member role for their orgs unnest( - array_append(roles, 'organization-member:' || organization_members.organization_id::text) + array_append(roles, 'organization-member') ) AS org_roles WHERE user_id = users.id @@ -8779,6 +9219,7 @@ INSERT INTO id, email, username, + name, hashed_password, created_at, updated_at, @@ -8786,13 +9227,14 @@ INSERT INTO login_type ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name + ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name ` type InsertUserParams struct { ID uuid.UUID `db:"id" json:"id"` Email string `db:"email" json:"email"` Username string `db:"username" json:"username"` + Name string `db:"name" json:"name"` HashedPassword []byte `db:"hashed_password" json:"hashed_password"` CreatedAt time.Time `db:"created_at" json:"created_at"` UpdatedAt time.Time `db:"updated_at" json:"updated_at"` @@ -8805,6 +9247,7 @@ func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User arg.ID, arg.Email, arg.Username, + arg.Name, arg.HashedPassword, arg.CreatedAt, arg.UpdatedAt, diff --git a/coderd/database/queries/auditlogs.sql b/coderd/database/queries/auditlogs.sql index fc48489ca2104..aa62b71d1a002 100644 --- a/coderd/database/queries/auditlogs.sql +++ b/coderd/database/queries/auditlogs.sql @@ -3,12 +3,21 @@ -- name: GetAuditLogsOffset :many SELECT audit_logs.*, + -- sqlc.embed(users) would be nice but it does not seem to play well with + -- left joins. users.username AS user_username, + users.name AS user_name, users.email AS user_email, users.created_at AS user_created_at, + users.updated_at AS user_updated_at, + users.last_seen_at AS user_last_seen_at, users.status AS user_status, + users.login_type AS user_login_type, users.rbac_roles AS user_roles, users.avatar_url AS user_avatar_url, + users.deleted AS user_deleted, + users.theme_preference AS user_theme_preference, + users.quiet_hours_schedule AS user_quiet_hours_schedule, COUNT(audit_logs.*) OVER () AS count FROM audit_logs @@ -50,6 +59,12 @@ WHERE resource_id = @resource_id ELSE true END + -- Filter organization_id + AND CASE + WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + audit_logs.organization_id = @organization_id + ELSE true + END -- Filter by resource_target AND CASE WHEN @resource_target :: text != '' THEN @@ -101,9 +116,12 @@ WHERE ORDER BY "time" DESC LIMIT - $1 + -- a limit of 0 means "no limit". The audit log table is unbounded + -- in size, and is expected to be quite large. Implement a default + -- limit of 100 to prevent accidental excessively large queries. + COALESCE(NULLIF(@limit_opt :: int, 0), 100) OFFSET - $2; + @offset_opt; -- name: InsertAuditLog :one INSERT INTO diff --git a/coderd/database/queries/groupmembers.sql b/coderd/database/queries/groupmembers.sql index d755212132383..8f4770eff112e 100644 --- a/coderd/database/queries/groupmembers.sql +++ b/coderd/database/queries/groupmembers.sql @@ -1,4 +1,7 @@ -- name: GetGroupMembers :many +SELECT * FROM group_members; + +-- name: GetGroupMembersByGroupID :many SELECT users.* FROM diff --git a/coderd/database/queries/groups.sql b/coderd/database/queries/groups.sql index 53d0b25874987..9dea20f0fa6e6 100644 --- a/coderd/database/queries/groups.sql +++ b/coderd/database/queries/groups.sql @@ -1,3 +1,6 @@ +-- name: GetGroups :many +SELECT * FROM groups; + -- name: GetGroupByID :one SELECT * diff --git a/coderd/database/queries/notifications.sql b/coderd/database/queries/notifications.sql new file mode 100644 index 0000000000000..8cc31e0661927 --- /dev/null +++ b/coderd/database/queries/notifications.sql @@ -0,0 +1,127 @@ +-- name: FetchNewMessageMetadata :one +-- This is used to build up the notification_message's JSON payload. +SELECT nt.name AS notification_name, + nt.actions AS actions, + u.id AS user_id, + u.email AS user_email, + COALESCE(NULLIF(u.name, ''), NULLIF(u.username, ''))::text AS user_name +FROM notification_templates nt, + users u +WHERE nt.id = @notification_template_id + AND u.id = @user_id; + +-- name: EnqueueNotificationMessage :one +INSERT INTO notification_messages (id, notification_template_id, user_id, method, payload, targets, created_by) +VALUES (@id, + @notification_template_id, + @user_id, + @method::notification_method, + @payload::jsonb, + @targets, + @created_by) +RETURNING *; + +-- Acquires the lease for a given count of notification messages, to enable concurrent dequeuing and subsequent sending. +-- Only rows that aren't already leased (or ones which are leased but have exceeded their lease period) are returned. +-- +-- A "lease" here refers to a notifier taking ownership of a notification_messages row. A lease survives for the duration +-- of CODER_NOTIFICATIONS_LEASE_PERIOD. Once a message is delivered, its status is updated and the lease expires (set to NULL). +-- If a message exceeds its lease, that implies the notifier did not shutdown cleanly, or the table update failed somehow, +-- and the row will then be eligible to be dequeued by another notifier. +-- +-- SKIP LOCKED is used to jump over locked rows. This prevents multiple notifiers from acquiring the same messages. +-- See: https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE +-- +-- name: AcquireNotificationMessages :many +WITH acquired AS ( + UPDATE + notification_messages + SET updated_at = NOW(), + status = 'leased'::notification_message_status, + status_reason = 'Leased by notifier ' || sqlc.arg('notifier_id')::uuid, + leased_until = NOW() + CONCAT(sqlc.arg('lease_seconds')::int, ' seconds')::interval + WHERE id IN (SELECT nm.id + FROM notification_messages AS nm + WHERE ( + ( + -- message is in acquirable states + nm.status IN ( + 'pending'::notification_message_status, + 'temporary_failure'::notification_message_status + ) + ) + -- or somehow the message was left in leased for longer than its lease period + OR ( + nm.status = 'leased'::notification_message_status + AND nm.leased_until < NOW() + ) + ) + AND ( + -- exclude all messages which have exceeded the max attempts; these will be purged later + nm.attempt_count IS NULL OR nm.attempt_count < sqlc.arg('max_attempt_count')::int + ) + -- if set, do not retry until we've exceeded the wait time + AND ( + CASE + WHEN nm.next_retry_after IS NOT NULL THEN nm.next_retry_after < NOW() + ELSE true + END + ) + ORDER BY nm.created_at ASC + -- Ensure that multiple concurrent readers cannot retrieve the same rows + FOR UPDATE OF nm + SKIP LOCKED + LIMIT sqlc.arg('count')) + RETURNING *) +SELECT + -- message + nm.id, + nm.payload, + nm.method, + nm.created_by, + -- template + nt.title_template, + nt.body_template +FROM acquired nm + JOIN notification_templates nt ON nm.notification_template_id = nt.id; + +-- name: BulkMarkNotificationMessagesFailed :execrows +UPDATE notification_messages +SET updated_at = subquery.failed_at, + attempt_count = attempt_count + 1, + status = CASE + WHEN attempt_count + 1 < @max_attempts::int THEN subquery.status + ELSE 'permanent_failure'::notification_message_status END, + status_reason = subquery.status_reason, + leased_until = NULL, + next_retry_after = CASE + WHEN (attempt_count + 1 < @max_attempts::int) + THEN NOW() + CONCAT(@retry_interval::int, ' seconds')::interval END +FROM (SELECT UNNEST(@ids::uuid[]) AS id, + UNNEST(@failed_ats::timestamptz[]) AS failed_at, + UNNEST(@statuses::notification_message_status[]) AS status, + UNNEST(@status_reasons::text[]) AS status_reason) AS subquery +WHERE notification_messages.id = subquery.id; + +-- name: BulkMarkNotificationMessagesSent :execrows +UPDATE notification_messages +SET updated_at = new_values.sent_at, + attempt_count = attempt_count + 1, + status = 'sent'::notification_message_status, + status_reason = NULL, + leased_until = NULL, + next_retry_after = NULL +FROM (SELECT UNNEST(@ids::uuid[]) AS id, + UNNEST(@sent_ats::timestamptz[]) AS sent_at) + AS new_values +WHERE notification_messages.id = new_values.id; + +-- Delete all notification messages which have not been updated for over a week. +-- name: DeleteOldNotificationMessages :exec +DELETE +FROM notification_messages +WHERE id IN + (SELECT id + FROM notification_messages AS nested + WHERE nested.updated_at < NOW() - INTERVAL '7 days'); + diff --git a/coderd/database/queries/organizationmembers.sql b/coderd/database/queries/organizationmembers.sql index 10a45d25eb2c5..4722973d38589 100644 --- a/coderd/database/queries/organizationmembers.sql +++ b/coderd/database/queries/organizationmembers.sql @@ -1,13 +1,28 @@ --- name: GetOrganizationMemberByUserID :one +-- name: OrganizationMembers :many +-- Arguments are optional with uuid.Nil to ignore. +-- - Use just 'organization_id' to get all members of an org +-- - Use just 'user_id' to get all orgs a user is a member of +-- - Use both to get a specific org member row SELECT - * + sqlc.embed(organization_members), + users.username FROM organization_members + INNER JOIN + users ON organization_members.user_id = users.id WHERE - organization_id = $1 - AND user_id = $2 -LIMIT - 1; + -- Filter by organization id + CASE + WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + organization_id = @organization_id + ELSE true + END + -- Filter by user id + AND CASE + WHEN @user_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN + user_id = @user_id + ELSE true + END; -- name: InsertOrganizationMember :one INSERT INTO @@ -21,14 +36,15 @@ INSERT INTO VALUES ($1, $2, $3, $4, $5) RETURNING *; +-- name: DeleteOrganizationMember :exec +DELETE + FROM + organization_members + WHERE + organization_id = @organization_id AND + user_id = @user_id +; --- name: GetOrganizationMembershipsByUserID :many -SELECT - * -FROM - organization_members -WHERE - user_id = $1; -- name: GetOrganizationIDsByMemberIDs :many SELECT diff --git a/coderd/database/queries/organizations.sql b/coderd/database/queries/organizations.sql index 9d5cec1324fe6..787985c3bdbbc 100644 --- a/coderd/database/queries/organizations.sql +++ b/coderd/database/queries/organizations.sql @@ -49,17 +49,20 @@ WHERE -- name: InsertOrganization :one INSERT INTO - organizations (id, "name", description, created_at, updated_at, is_default) + organizations (id, "name", display_name, description, icon, created_at, updated_at, is_default) VALUES -- If no organizations exist, and this is the first, make it the default. - ($1, $2, $3, $4, $5, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING *; + (@id, @name, @display_name, @description, @icon, @created_at, @updated_at, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING *; -- name: UpdateOrganization :one UPDATE organizations SET updated_at = @updated_at, - name = @name + name = @name, + display_name = @display_name, + description = @description, + icon = @icon WHERE id = @id RETURNING *; diff --git a/coderd/database/queries/roles.sql b/coderd/database/queries/roles.sql index dd8816d40eecc..ec5566a3d0dbb 100644 --- a/coderd/database/queries/roles.sql +++ b/coderd/database/queries/roles.sql @@ -5,26 +5,27 @@ FROM custom_roles WHERE true - -- Lookup roles filter expects the role names to be in the rbac package - -- format. Eg: name[:] - AND CASE WHEN array_length(@lookup_roles :: text[], 1) > 0 THEN - -- Case insensitive lookup with org_id appended (if non-null). - -- This will return just the name if org_id is null. It'll append - -- the org_id if not null - concat(name, NULLIF(concat(':', organization_id), ':')) ILIKE ANY(@lookup_roles :: text []) + -- @lookup_roles will filter for exact (role_name, org_id) pairs + -- To do this manually in SQL, you can construct an array and cast it: + -- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[]) + AND CASE WHEN array_length(@lookup_roles :: name_organization_pair[], 1) > 0 THEN + -- Using 'coalesce' to avoid troubles with null literals being an empty string. + (name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY (@lookup_roles::name_organization_pair[]) ELSE true END - -- Org scoping filter, to only fetch site wide roles + -- This allows fetching all roles, or just site wide roles AND CASE WHEN @exclude_org_roles :: boolean THEN organization_id IS null ELSE true END + -- Allows fetching all roles to a particular organization AND CASE WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN organization_id = @organization_id ELSE true END ; + -- name: UpsertCustomRole :one INSERT INTO custom_roles ( diff --git a/coderd/database/queries/users.sql b/coderd/database/queries/users.sql index 5062b14429427..6bbfdac112d7a 100644 --- a/coderd/database/queries/users.sql +++ b/coderd/database/queries/users.sql @@ -62,6 +62,7 @@ INSERT INTO id, email, username, + name, hashed_password, created_at, updated_at, @@ -69,7 +70,7 @@ INSERT INTO login_type ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING *; + ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *; -- name: UpdateUserProfile :one UPDATE @@ -227,12 +228,14 @@ SELECT array_append(users.rbac_roles, 'member'), ( SELECT - array_agg(org_roles) + -- The roles are returned as a flat array, org scoped and site side. + -- Concatenating the organization id scopes the organization roles. + array_agg(org_roles || ':' || organization_members.organization_id::text) FROM organization_members, - -- All org_members get the org-member role for their orgs + -- All org_members get the organization-member role for their orgs unnest( - array_append(roles, 'organization-member:' || organization_members.organization_id::text) + array_append(roles, 'organization-member') ) AS org_roles WHERE user_id = users.id diff --git a/coderd/database/sqlc.yaml b/coderd/database/sqlc.yaml index 7913a9acf1627..5d6f4419d5b8b 100644 --- a/coderd/database/sqlc.yaml +++ b/coderd/database/sqlc.yaml @@ -28,6 +28,19 @@ sql: emit_enum_valid_method: true emit_all_enum_values: true overrides: + # Used in 'CustomRoles' query to filter by (name,organization_id) + - db_type: "name_organization_pair" + go_type: + type: "NameOrganizationPair" + - column: "custom_roles.site_permissions" + go_type: + type: "CustomRolePermissions" + - column: "custom_roles.org_permissions" + go_type: + type: "CustomRolePermissions" + - column: "custom_roles.user_permissions" + go_type: + type: "CustomRolePermissions" - column: "provisioner_daemons.tags" go_type: type: "StringMap" @@ -51,6 +64,12 @@ sql: - column: "template_usage_stats.app_usage_mins" go_type: type: "StringMapOfInt" + - column: "notification_templates.actions" + go_type: + type: "[]byte" + - column: "notification_messages.payload" + go_type: + type: "[]byte" rename: template: TemplateTable template_with_user: Template diff --git a/coderd/database/types.go b/coderd/database/types.go index 497446b25abfa..fd7a2fed82300 100644 --- a/coderd/database/types.go +++ b/coderd/database/types.go @@ -3,6 +3,7 @@ package database import ( "database/sql/driver" "encoding/json" + "fmt" "time" "github.com/google/uuid" @@ -112,3 +113,60 @@ func (m *StringMapOfInt) Scan(src interface{}) error { func (m StringMapOfInt) Value() (driver.Value, error) { return json.Marshal(m) } + +type CustomRolePermissions []CustomRolePermission + +func (a *CustomRolePermissions) Scan(src interface{}) error { + switch v := src.(type) { + case string: + return json.Unmarshal([]byte(v), &a) + case []byte: + return json.Unmarshal(v, &a) + } + return xerrors.Errorf("unexpected type %T", src) +} + +func (a CustomRolePermissions) Value() (driver.Value, error) { + return json.Marshal(a) +} + +type CustomRolePermission struct { + Negate bool `json:"negate"` + ResourceType string `json:"resource_type"` + Action policy.Action `json:"action"` +} + +func (a CustomRolePermission) String() string { + str := a.ResourceType + "." + string(a.Action) + if a.Negate { + return "-" + str + } + return str +} + +// NameOrganizationPair is used as a lookup tuple for custom role rows. +type NameOrganizationPair struct { + Name string `db:"name" json:"name"` + // OrganizationID if unset will assume a null column value + OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"` +} + +func (*NameOrganizationPair) Scan(_ interface{}) error { + return xerrors.Errorf("this should never happen, type 'NameOrganizationPair' should only be used as a parameter") +} + +// Value returns the tuple **literal** +// To get the literal value to return, you can use the expression syntax in a psql +// shell. +// +// SELECT ('customrole'::text,'ece79dac-926e-44ca-9790-2ff7c5eb6e0c'::uuid); +// To see 'null' option. Using the nil uuid as null to avoid empty string literals for null. +// SELECT ('customrole',00000000-0000-0000-0000-000000000000); +// +// This value is usually used as an array, NameOrganizationPair[]. You can see +// what that literal is as well, with proper quoting. +// +// SELECT ARRAY[('customrole'::text,'ece79dac-926e-44ca-9790-2ff7c5eb6e0c'::uuid)]; +func (a NameOrganizationPair) Value() (driver.Value, error) { + return fmt.Sprintf(`(%s,%s)`, a.Name, a.OrganizationID.String()), nil +} diff --git a/coderd/database/unique_constraint.go b/coderd/database/unique_constraint.go index cbae30279c5e9..d090af80626b8 100644 --- a/coderd/database/unique_constraint.go +++ b/coderd/database/unique_constraint.go @@ -23,6 +23,9 @@ const ( UniqueJfrogXrayScansPkey UniqueConstraint = "jfrog_xray_scans_pkey" // ALTER TABLE ONLY jfrog_xray_scans ADD CONSTRAINT jfrog_xray_scans_pkey PRIMARY KEY (agent_id, workspace_id); UniqueLicensesJWTKey UniqueConstraint = "licenses_jwt_key" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_jwt_key UNIQUE (jwt); UniqueLicensesPkey UniqueConstraint = "licenses_pkey" // ALTER TABLE ONLY licenses ADD CONSTRAINT licenses_pkey PRIMARY KEY (id); + UniqueNotificationMessagesPkey UniqueConstraint = "notification_messages_pkey" // ALTER TABLE ONLY notification_messages ADD CONSTRAINT notification_messages_pkey PRIMARY KEY (id); + UniqueNotificationTemplatesNameKey UniqueConstraint = "notification_templates_name_key" // ALTER TABLE ONLY notification_templates ADD CONSTRAINT notification_templates_name_key UNIQUE (name); + UniqueNotificationTemplatesPkey UniqueConstraint = "notification_templates_pkey" // ALTER TABLE ONLY notification_templates ADD CONSTRAINT notification_templates_pkey PRIMARY KEY (id); UniqueOauth2ProviderAppCodesPkey UniqueConstraint = "oauth2_provider_app_codes_pkey" // ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_pkey PRIMARY KEY (id); UniqueOauth2ProviderAppCodesSecretPrefixKey UniqueConstraint = "oauth2_provider_app_codes_secret_prefix_key" // ALTER TABLE ONLY oauth2_provider_app_codes ADD CONSTRAINT oauth2_provider_app_codes_secret_prefix_key UNIQUE (secret_prefix); UniqueOauth2ProviderAppSecretsPkey UniqueConstraint = "oauth2_provider_app_secrets_pkey" // ALTER TABLE ONLY oauth2_provider_app_secrets ADD CONSTRAINT oauth2_provider_app_secrets_pkey PRIMARY KEY (id); diff --git a/coderd/deprecated.go b/coderd/deprecated.go index 762b5bc931e38..6dc03e540ce33 100644 --- a/coderd/deprecated.go +++ b/coderd/deprecated.go @@ -3,13 +3,9 @@ package coderd import ( "net/http" - "github.com/go-chi/chi/v5" - - "cdr.dev/slog" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/codersdk" - "github.com/coder/coder/v2/codersdk/agentsdk" ) // @Summary Removed: Get parameters by template version @@ -34,19 +30,6 @@ func templateVersionSchemaDeprecated(rw http.ResponseWriter, r *http.Request) { httpapi.Write(r.Context(), rw, http.StatusOK, []struct{}{}) } -// @Summary Removed: Patch workspace agent logs -// @ID removed-patch-workspace-agent-logs -// @Security CoderSessionToken -// @Accept json -// @Produce json -// @Tags Agents -// @Param request body agentsdk.PatchLogs true "logs" -// @Success 200 {object} codersdk.Response -// @Router /workspaceagents/me/startup-logs [patch] -func (api *API) patchWorkspaceAgentLogsDeprecated(rw http.ResponseWriter, r *http.Request) { - api.patchWorkspaceAgentLogs(rw, r) -} - // @Summary Removed: Get logs by workspace agent // @ID removed-get-logs-by-workspace-agent // @Security CoderSessionToken @@ -77,45 +60,6 @@ func (api *API) workspaceAgentsGitAuth(rw http.ResponseWriter, r *http.Request) api.workspaceAgentsExternalAuth(rw, r) } -// @Summary Removed: Submit workspace agent metadata -// @ID removed-submit-workspace-agent-metadata -// @Security CoderSessionToken -// @Accept json -// @Tags Agents -// @Param request body agentsdk.PostMetadataRequestDeprecated true "Workspace agent metadata request" -// @Param key path string true "metadata key" format(string) -// @Success 204 "Success" -// @Router /workspaceagents/me/metadata/{key} [post] -// @x-apidocgen {"skip": true} -func (api *API) workspaceAgentPostMetadataDeprecated(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var req agentsdk.PostMetadataRequestDeprecated - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - workspaceAgent := httpmw.WorkspaceAgent(r) - - key := chi.URLParam(r, "key") - - err := api.workspaceAgentUpdateMetadata(ctx, workspaceAgent, agentsdk.PostMetadataRequest{ - Metadata: []agentsdk.Metadata{ - { - Key: key, - WorkspaceAgentMetadataResult: req, - }, - }, - }) - if err != nil { - api.Logger.Error(ctx, "failed to handle metadata request", slog.Error(err)) - httpapi.InternalServerError(rw, err) - return - } - - httpapi.Write(ctx, rw, http.StatusNoContent, nil) -} - // @Summary Removed: Get workspace resources for workspace build // @ID removed-get-workspace-resources-for-workspace-build // @Security CoderSessionToken diff --git a/coderd/externalauth.go b/coderd/externalauth.go index a2d017ed43e0e..8f8514fa17442 100644 --- a/coderd/externalauth.go +++ b/coderd/externalauth.go @@ -351,15 +351,17 @@ func (api *API) listUserExternalAuths(rw http.ResponseWriter, r *http.Request) { if link.OAuthAccessToken != "" { cfg, ok := configs[link.ProviderID] if ok { - newLink, valid, err := cfg.RefreshToken(ctx, api.Database, link) + newLink, err := cfg.RefreshToken(ctx, api.Database, link) meta := db2sdk.ExternalAuthMeta{ - Authenticated: valid, + Authenticated: err == nil, } if err != nil { meta.ValidateError = err.Error() } + linkMeta[link.ProviderID] = meta + // Update the link if it was potentially refreshed. - if err == nil && valid { + if err == nil { links[i] = newLink } } diff --git a/coderd/externalauth/externalauth.go b/coderd/externalauth/externalauth.go index 85e53f2e91f33..b626a5e28fb1f 100644 --- a/coderd/externalauth/externalauth.go +++ b/coderd/externalauth/externalauth.go @@ -95,9 +95,23 @@ func (c *Config) GenerateTokenExtra(token *oauth2.Token) (pqtype.NullRawMessage, }, nil } +// InvalidTokenError is a case where the "RefreshToken" failed to complete +// as a result of invalid credentials. Error contains the reason of the failure. +type InvalidTokenError string + +func (e InvalidTokenError) Error() string { + return string(e) +} + +func IsInvalidTokenError(err error) bool { + var invalidTokenError InvalidTokenError + return xerrors.As(err, &invalidTokenError) +} + // RefreshToken automatically refreshes the token if expired and permitted. -// It returns the token and a bool indicating if the token is valid. -func (c *Config) RefreshToken(ctx context.Context, db database.Store, externalAuthLink database.ExternalAuthLink) (database.ExternalAuthLink, bool, error) { +// If an error is returned, the token is either invalid, or an error occurred. +// Use 'IsInvalidTokenError(err)' to determine the difference. +func (c *Config) RefreshToken(ctx context.Context, db database.Store, externalAuthLink database.ExternalAuthLink) (database.ExternalAuthLink, error) { // If the token is expired and refresh is disabled, we prompt // the user to authenticate again. if c.NoRefresh && @@ -105,7 +119,7 @@ func (c *Config) RefreshToken(ctx context.Context, db database.Store, externalAu // This is true for github, which has no expiry. !externalAuthLink.OAuthExpiry.IsZero() && externalAuthLink.OAuthExpiry.Before(dbtime.Now()) { - return externalAuthLink, false, nil + return externalAuthLink, InvalidTokenError("token expired, refreshing is disabled") } // This is additional defensive programming. Because TokenSource is an interface, @@ -123,14 +137,16 @@ func (c *Config) RefreshToken(ctx context.Context, db database.Store, externalAu Expiry: externalAuthLink.OAuthExpiry, }).Token() if err != nil { - // Even if the token fails to be obtained, we still return false because - // we aren't trying to surface an error, we're just trying to obtain a valid token. - return externalAuthLink, false, nil + // Even if the token fails to be obtained, do not return the error as an error. + // TokenSource(...).Token() will always return the current token if the token is not expired. + // If it is expired, it will attempt to refresh the token, and if it cannot, it will fail with + // an error. This error is a reason the token is invalid. + return externalAuthLink, InvalidTokenError(fmt.Sprintf("refresh token: %s", err.Error())) } extra, err := c.GenerateTokenExtra(token) if err != nil { - return externalAuthLink, false, xerrors.Errorf("generate token extra: %w", err) + return externalAuthLink, xerrors.Errorf("generate token extra: %w", err) } r := retry.New(50*time.Millisecond, 200*time.Millisecond) @@ -140,7 +156,7 @@ func (c *Config) RefreshToken(ctx context.Context, db database.Store, externalAu validate: valid, _, err := c.ValidateToken(ctx, token) if err != nil { - return externalAuthLink, false, xerrors.Errorf("validate external auth token: %w", err) + return externalAuthLink, xerrors.Errorf("validate external auth token: %w", err) } if !valid { // A customer using GitHub in Australia reported that validating immediately @@ -154,7 +170,7 @@ validate: goto validate } // The token is no longer valid! - return externalAuthLink, false, nil + return externalAuthLink, InvalidTokenError("token failed to validate") } if token.AccessToken != externalAuthLink.OAuthAccessToken { @@ -170,11 +186,11 @@ validate: OAuthExtra: extra, }) if err != nil { - return updatedAuthLink, false, xerrors.Errorf("update external auth link: %w", err) + return updatedAuthLink, xerrors.Errorf("update external auth link: %w", err) } externalAuthLink = updatedAuthLink } - return externalAuthLink, true, nil + return externalAuthLink, nil } // ValidateToken ensures the Git token provided is valid! @@ -202,7 +218,7 @@ func (c *Config) ValidateToken(ctx context.Context, link *oauth2.Token) (bool, * return false, nil, err } defer res.Body.Close() - if res.StatusCode == http.StatusUnauthorized { + if res.StatusCode == http.StatusUnauthorized || res.StatusCode == http.StatusForbidden { // The token is no longer valid! return false, nil, nil } diff --git a/coderd/externalauth/externalauth_test.go b/coderd/externalauth/externalauth_test.go index 88f3b7a3b59e9..fbc1cab4b7091 100644 --- a/coderd/externalauth/externalauth_test.go +++ b/coderd/externalauth/externalauth_test.go @@ -59,9 +59,10 @@ func TestRefreshToken(t *testing.T) { // Expire the link link.OAuthExpiry = expired - _, refreshed, err := config.RefreshToken(ctx, nil, link) - require.NoError(t, err) - require.False(t, refreshed) + _, err := config.RefreshToken(ctx, nil, link) + require.Error(t, err) + require.True(t, externalauth.IsInvalidTokenError(err)) + require.Contains(t, err.Error(), "refreshing is disabled") }) // NoRefreshNoExpiry tests that an oauth token without an expiry is always valid. @@ -90,9 +91,8 @@ func TestRefreshToken(t *testing.T) { // Zero time used link.OAuthExpiry = time.Time{} - _, refreshed, err := config.RefreshToken(ctx, nil, link) + _, err := config.RefreshToken(ctx, nil, link) require.NoError(t, err) - require.True(t, refreshed, "token without expiry is always valid") require.True(t, validated, "token should have been validated") }) @@ -105,11 +105,12 @@ func TestRefreshToken(t *testing.T) { }, }, } - _, refreshed, err := config.RefreshToken(context.Background(), nil, database.ExternalAuthLink{ + _, err := config.RefreshToken(context.Background(), nil, database.ExternalAuthLink{ OAuthExpiry: expired, }) - require.NoError(t, err) - require.False(t, refreshed) + require.Error(t, err) + require.True(t, externalauth.IsInvalidTokenError(err)) + require.Contains(t, err.Error(), "failure") }) t.Run("ValidateServerError", func(t *testing.T) { @@ -131,8 +132,12 @@ func TestRefreshToken(t *testing.T) { ctx := oidc.ClientContext(context.Background(), fake.HTTPClient(nil)) link.OAuthExpiry = expired - _, _, err := config.RefreshToken(ctx, nil, link) + _, err := config.RefreshToken(ctx, nil, link) require.ErrorContains(t, err, staticError) + // Unsure if this should be the correct behavior. It's an invalid token because + // 'ValidateToken()' failed with a runtime error. This was the previous behavior, + // so not going to change it. + require.False(t, externalauth.IsInvalidTokenError(err)) require.True(t, validated, "token should have been attempted to be validated") }) @@ -156,9 +161,9 @@ func TestRefreshToken(t *testing.T) { ctx := oidc.ClientContext(context.Background(), fake.HTTPClient(nil)) link.OAuthExpiry = expired - _, refreshed, err := config.RefreshToken(ctx, nil, link) - require.NoError(t, err, staticError) - require.False(t, refreshed) + _, err := config.RefreshToken(ctx, nil, link) + require.ErrorContains(t, err, "token failed to validate") + require.True(t, externalauth.IsInvalidTokenError(err)) require.True(t, validated, "token should have been attempted to be validated") }) @@ -191,9 +196,8 @@ func TestRefreshToken(t *testing.T) { // Unlimited lifetime, this is what GitHub returns tokens as link.OAuthExpiry = time.Time{} - _, ok, err := config.RefreshToken(ctx, nil, link) + _, err := config.RefreshToken(ctx, nil, link) require.NoError(t, err) - require.True(t, ok) require.Equal(t, 2, validateCalls, "token should have been attempted to be validated more than once") }) @@ -219,9 +223,8 @@ func TestRefreshToken(t *testing.T) { ctx := oidc.ClientContext(context.Background(), fake.HTTPClient(nil)) - _, ok, err := config.RefreshToken(ctx, nil, link) + _, err := config.RefreshToken(ctx, nil, link) require.NoError(t, err) - require.True(t, ok) require.Equal(t, 1, validateCalls, "token is validated") }) @@ -253,9 +256,8 @@ func TestRefreshToken(t *testing.T) { // Force a refresh link.OAuthExpiry = expired - updated, ok, err := config.RefreshToken(ctx, db, link) + updated, err := config.RefreshToken(ctx, db, link) require.NoError(t, err) - require.True(t, ok) require.Equal(t, 1, validateCalls, "token is validated") require.Equal(t, 1, refreshCalls, "token is refreshed") require.NotEqualf(t, link.OAuthAccessToken, updated.OAuthAccessToken, "token is updated") @@ -292,9 +294,9 @@ func TestRefreshToken(t *testing.T) { // Force a refresh link.OAuthExpiry = expired - updated, ok, err := config.RefreshToken(ctx, db, link) + updated, err := config.RefreshToken(ctx, db, link) require.NoError(t, err) - require.True(t, ok) + require.True(t, updated.OAuthExtra.Valid) extra := map[string]interface{}{} require.NoError(t, json.Unmarshal(updated.OAuthExtra.RawMessage, &extra)) diff --git a/coderd/externalauth_test.go b/coderd/externalauth_test.go index db40ccf38a554..916a88460d53c 100644 --- a/coderd/externalauth_test.go +++ b/coderd/externalauth_test.go @@ -79,11 +79,11 @@ func TestExternalAuthByID(t *testing.T) { client := coderdtest.New(t, &coderdtest.Options{ ExternalAuthConfigs: []*externalauth.Config{ fake.ExternalAuthConfig(t, providerID, &oidctest.ExternalAuthConfigOptions{ - ValidatePayload: func(_ string) interface{} { + ValidatePayload: func(_ string) (interface{}, int, error) { return github.User{ Login: github.String("kyle"), AvatarURL: github.String("https://avatars.githubusercontent.com/u/12345678?v=4"), - } + }, 0, nil }, }, func(cfg *externalauth.Config) { cfg.Type = codersdk.EnhancedExternalAuthProviderGitHub.String() @@ -108,11 +108,11 @@ func TestExternalAuthByID(t *testing.T) { // routes includes a route for /install that returns a list of installations routes := (&oidctest.ExternalAuthConfigOptions{ - ValidatePayload: func(_ string) interface{} { + ValidatePayload: func(_ string) (interface{}, int, error) { return github.User{ Login: github.String("kyle"), AvatarURL: github.String("https://avatars.githubusercontent.com/u/12345678?v=4"), - } + }, 0, nil }, }).AddRoute("/installs", func(_ string, rw http.ResponseWriter, r *http.Request) { httpapi.Write(r.Context(), rw, http.StatusOK, struct { @@ -556,7 +556,7 @@ func TestExternalAuthCallback(t *testing.T) { // If the validation URL gives a non-OK status code, this // should be treated as an internal server error. srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusForbidden) + w.WriteHeader(http.StatusBadRequest) w.Write([]byte("Something went wrong!")) }) _, err = agentClient.ExternalAuth(ctx, agentsdk.ExternalAuthRequest{ @@ -565,7 +565,7 @@ func TestExternalAuthCallback(t *testing.T) { var apiError *codersdk.Error require.ErrorAs(t, err, &apiError) require.Equal(t, http.StatusInternalServerError, apiError.StatusCode()) - require.Equal(t, "validate external auth token: status 403: body: Something went wrong!", apiError.Detail) + require.Equal(t, "validate external auth token: status 400: body: Something went wrong!", apiError.Detail) }) t.Run("ExpiredNoRefresh", func(t *testing.T) { diff --git a/coderd/healthcheck/derphealth/derp.go b/coderd/healthcheck/derphealth/derp.go index 65d905f16917e..f74db243cbc18 100644 --- a/coderd/healthcheck/derphealth/derp.go +++ b/coderd/healthcheck/derphealth/derp.go @@ -236,8 +236,12 @@ func (r *NodeReport) derpURL() *url.URL { } func (r *NodeReport) Run(ctx context.Context) { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() + // If there already is a deadline set on the context, do not override it. + if _, ok := ctx.Deadline(); !ok { + dCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + ctx = dCtx + } r.Severity = health.SeverityOK r.ClientLogs = [][]string{} diff --git a/coderd/healthcheck/derphealth/derp_test.go b/coderd/healthcheck/derphealth/derp_test.go index 90e5db63c9763..c009ea982d620 100644 --- a/coderd/healthcheck/derphealth/derp_test.go +++ b/coderd/healthcheck/derphealth/derp_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -84,6 +85,45 @@ func TestDERP(t *testing.T) { } }) + t.Run("TimeoutCtx", func(t *testing.T) { + t.Parallel() + + derpSrv := derp.NewServer(key.NewNode(), func(format string, args ...any) { t.Logf(format, args...) }) + defer derpSrv.Close() + srv := httptest.NewServer(derphttp.Handler(derpSrv)) + defer srv.Close() + + var ( + // nolint:gocritic // testing a deadline exceeded + ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) + report = derphealth.Report{} + derpURL, _ = url.Parse(srv.URL) + opts = &derphealth.ReportOptions{ + DERPMap: &tailcfg.DERPMap{Regions: map[int]*tailcfg.DERPRegion{ + 1: { + EmbeddedRelay: true, + RegionID: 999, + Nodes: []*tailcfg.DERPNode{{ + Name: "1a", + RegionID: 999, + HostName: derpURL.Host, + IPv4: derpURL.Host, + STUNPort: -1, + InsecureForTests: true, + ForceHTTP: true, + }}, + }, + }}, + } + ) + cancel() + + report.Run(ctx, opts) + + assert.False(t, report.Healthy) + assert.Nil(t, report.Error) + }) + t.Run("HealthyWithNodeDegraded", func(t *testing.T) { t.Parallel() diff --git a/coderd/healthcheck/health/model.go b/coderd/healthcheck/health/model.go index ce332a0fe33ad..50f0078db10b2 100644 --- a/coderd/healthcheck/health/model.go +++ b/coderd/healthcheck/health/model.go @@ -43,6 +43,8 @@ const ( CodeProvisionerDaemonsNoProvisionerDaemons Code = `EPD01` CodeProvisionerDaemonVersionMismatch Code = `EPD02` CodeProvisionerDaemonAPIMajorVersionDeprecated Code = `EPD03` + + CodeInterfaceSmallMTU = `EIF01` ) // Default docs URL diff --git a/coderd/healthcheck/healthcheck.go b/coderd/healthcheck/healthcheck.go index c724347721335..f33c318d332d2 100644 --- a/coderd/healthcheck/healthcheck.go +++ b/coderd/healthcheck/healthcheck.go @@ -156,27 +156,27 @@ func Run(ctx context.Context, opts *ReportOptions) *healthsdk.HealthcheckReport wg.Wait() report.Time = time.Now() - report.FailingSections = []healthsdk.HealthSection{} + failingSections := []healthsdk.HealthSection{} if report.DERP.Severity.Value() > health.SeverityWarning.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionDERP) + failingSections = append(failingSections, healthsdk.HealthSectionDERP) } if report.AccessURL.Severity.Value() > health.SeverityOK.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionAccessURL) + failingSections = append(failingSections, healthsdk.HealthSectionAccessURL) } if report.Websocket.Severity.Value() > health.SeverityWarning.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionWebsocket) + failingSections = append(failingSections, healthsdk.HealthSectionWebsocket) } if report.Database.Severity.Value() > health.SeverityWarning.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionDatabase) + failingSections = append(failingSections, healthsdk.HealthSectionDatabase) } if report.WorkspaceProxy.Severity.Value() > health.SeverityWarning.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionWorkspaceProxy) + failingSections = append(failingSections, healthsdk.HealthSectionWorkspaceProxy) } if report.ProvisionerDaemons.Severity.Value() > health.SeverityWarning.Value() { - report.FailingSections = append(report.FailingSections, healthsdk.HealthSectionProvisionerDaemons) + failingSections = append(failingSections, healthsdk.HealthSectionProvisionerDaemons) } - report.Healthy = len(report.FailingSections) == 0 + report.Healthy = len(failingSections) == 0 // Review healthcheck sub-reports. report.Severity = health.SeverityOK diff --git a/coderd/healthcheck/healthcheck_test.go b/coderd/healthcheck/healthcheck_test.go index 58fbe7305380d..9c744b42d1dca 100644 --- a/coderd/healthcheck/healthcheck_test.go +++ b/coderd/healthcheck/healthcheck_test.go @@ -49,11 +49,10 @@ func TestHealthcheck(t *testing.T) { t.Parallel() for _, c := range []struct { - name string - checker *testChecker - healthy bool - severity health.Severity - failingSections []healthsdk.HealthSection + name string + checker *testChecker + healthy bool + severity health.Severity }{{ name: "OK", checker: &testChecker{ @@ -93,9 +92,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: true, - severity: health.SeverityOK, - failingSections: []healthsdk.HealthSection{}, + healthy: true, + severity: health.SeverityOK, }, { name: "DERPFail", checker: &testChecker{ @@ -135,9 +133,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: false, - severity: health.SeverityError, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionDERP}, + healthy: false, + severity: health.SeverityError, }, { name: "DERPWarning", checker: &testChecker{ @@ -178,9 +175,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: true, - severity: health.SeverityWarning, - failingSections: []healthsdk.HealthSection{}, + healthy: true, + severity: health.SeverityWarning, }, { name: "AccessURLFail", checker: &testChecker{ @@ -220,9 +216,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: false, - severity: health.SeverityWarning, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionAccessURL}, + healthy: false, + severity: health.SeverityWarning, }, { name: "WebsocketFail", checker: &testChecker{ @@ -262,9 +257,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: false, - severity: health.SeverityError, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionWebsocket}, + healthy: false, + severity: health.SeverityError, }, { name: "DatabaseFail", checker: &testChecker{ @@ -304,9 +298,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - healthy: false, - severity: health.SeverityError, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionDatabase}, + healthy: false, + severity: health.SeverityError, }, { name: "ProxyFail", checker: &testChecker{ @@ -346,9 +339,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - severity: health.SeverityError, - healthy: false, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionWorkspaceProxy}, + severity: health.SeverityError, + healthy: false, }, { name: "ProxyWarn", checker: &testChecker{ @@ -389,9 +381,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - severity: health.SeverityWarning, - healthy: true, - failingSections: []healthsdk.HealthSection{}, + severity: health.SeverityWarning, + healthy: true, }, { name: "ProvisionerDaemonsFail", checker: &testChecker{ @@ -431,9 +422,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - severity: health.SeverityError, - healthy: false, - failingSections: []healthsdk.HealthSection{healthsdk.HealthSectionProvisionerDaemons}, + severity: health.SeverityError, + healthy: false, }, { name: "ProvisionerDaemonsWarn", checker: &testChecker{ @@ -474,9 +464,8 @@ func TestHealthcheck(t *testing.T) { }, }, }, - severity: health.SeverityWarning, - healthy: true, - failingSections: []healthsdk.HealthSection{}, + severity: health.SeverityWarning, + healthy: true, }, { name: "AllFail", healthy: false, @@ -518,14 +507,6 @@ func TestHealthcheck(t *testing.T) { }, }, severity: health.SeverityError, - failingSections: []healthsdk.HealthSection{ - healthsdk.HealthSectionDERP, - healthsdk.HealthSectionAccessURL, - healthsdk.HealthSectionWebsocket, - healthsdk.HealthSectionDatabase, - healthsdk.HealthSectionWorkspaceProxy, - healthsdk.HealthSectionProvisionerDaemons, - }, }} { c := c t.Run(c.name, func(t *testing.T) { @@ -537,7 +518,6 @@ func TestHealthcheck(t *testing.T) { assert.Equal(t, c.healthy, report.Healthy) assert.Equal(t, c.severity, report.Severity) - assert.Equal(t, c.failingSections, report.FailingSections) assert.Equal(t, c.checker.DERPReport.Healthy, report.DERP.Healthy) assert.Equal(t, c.checker.DERPReport.Severity, report.DERP.Severity) assert.Equal(t, c.checker.DERPReport.Warnings, report.DERP.Warnings) diff --git a/coderd/httpapi/httpapi.go b/coderd/httpapi/httpapi.go index fb5e4361ec32c..c1267d1720e17 100644 --- a/coderd/httpapi/httpapi.go +++ b/coderd/httpapi/httpapi.go @@ -46,25 +46,27 @@ func init() { valid := NameValid(str) return valid == nil } - for _, tag := range []string{"username", "template_name", "workspace_name", "oauth2_app_name"} { + for _, tag := range []string{"username", "organization_name", "template_name", "group_name", "workspace_name", "oauth2_app_name"} { err := Validate.RegisterValidation(tag, nameValidator) if err != nil { panic(err) } } - templateDisplayNameValidator := func(fl validator.FieldLevel) bool { + displayNameValidator := func(fl validator.FieldLevel) bool { f := fl.Field().Interface() str, ok := f.(string) if !ok { return false } - valid := TemplateDisplayNameValid(str) + valid := DisplayNameValid(str) return valid == nil } - err := Validate.RegisterValidation("template_display_name", templateDisplayNameValidator) - if err != nil { - panic(err) + for _, displayNameTag := range []string{"organization_display_name", "template_display_name", "group_display_name"} { + err := Validate.RegisterValidation(displayNameTag, displayNameValidator) + if err != nil { + panic(err) + } } templateVersionNameValidator := func(fl validator.FieldLevel) bool { @@ -76,7 +78,7 @@ func init() { valid := TemplateVersionNameValid(str) return valid == nil } - err = Validate.RegisterValidation("template_version_name", templateVersionNameValidator) + err := Validate.RegisterValidation("template_version_name", templateVersionNameValidator) if err != nil { panic(err) } diff --git a/coderd/httpapi/name.go b/coderd/httpapi/name.go index d8b64a71bdc44..c9f926d4b3b42 100644 --- a/coderd/httpapi/name.go +++ b/coderd/httpapi/name.go @@ -46,6 +46,10 @@ func NameValid(str string) error { if len(str) < 1 { return xerrors.New("must be >= 1 character") } + // Avoid conflicts with routes like /templates/new and /groups/create. + if str == "new" || str == "create" { + return xerrors.Errorf("cannot use %q as a name", str) + } matched := UsernameValidRegex.MatchString(str) if !matched { return xerrors.New("must be alphanumeric with hyphens") @@ -65,8 +69,8 @@ func TemplateVersionNameValid(str string) error { return nil } -// TemplateDisplayNameValid returns whether the input string is a valid template display name. -func TemplateDisplayNameValid(str string) error { +// DisplayNameValid returns whether the input string is a valid template display name. +func DisplayNameValid(str string) error { if len(str) == 0 { return nil // empty display_name is correct } @@ -91,3 +95,14 @@ func UserRealNameValid(str string) error { } return nil } + +// NormalizeUserRealName normalizes a user name such that it will pass +// validation by UserRealNameValid. This is done to avoid blocking +// little Bobby Whitespace from using Coder. +func NormalizeRealUsername(str string) string { + s := strings.TrimSpace(str) + if len(s) > 128 { + s = s[:128] + } + return s +} diff --git a/coderd/httpapi/name_test.go b/coderd/httpapi/name_test.go index a6313c54034f5..f0c83ea2bdb0c 100644 --- a/coderd/httpapi/name_test.go +++ b/coderd/httpapi/name_test.go @@ -1,9 +1,11 @@ package httpapi_test import ( + "strings" "testing" "github.com/moby/moby/pkg/namesgenerator" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/coder/coder/v2/coderd/httpapi" @@ -115,7 +117,7 @@ func TestTemplateDisplayNameValid(t *testing.T) { testCase := testCase t.Run(testCase.Name, func(t *testing.T) { t.Parallel() - valid := httpapi.TemplateDisplayNameValid(testCase.Name) + valid := httpapi.DisplayNameValid(testCase.Name) require.Equal(t, testCase.Valid, valid == nil) }) } @@ -217,6 +219,10 @@ func TestUserRealNameValid(t *testing.T) { Name string Valid bool }{ + {"", true}, + {" a", false}, + {"a ", false}, + {" a ", false}, {"1", true}, {"A", true}, {"A1", true}, @@ -229,17 +235,22 @@ func TestUserRealNameValid(t *testing.T) { {"Małgorzata Kalinowska-Iszkowska", true}, {"成龍", true}, {". .", true}, - {"Lord Voldemort ", false}, {" Bellatrix Lestrange", false}, {" ", false}, + {strings.Repeat("a", 128), true}, + {strings.Repeat("a", 129), false}, } for _, testCase := range testCases { testCase := testCase t.Run(testCase.Name, func(t *testing.T) { t.Parallel() - valid := httpapi.UserRealNameValid(testCase.Name) - require.Equal(t, testCase.Valid, valid == nil) + err := httpapi.UserRealNameValid(testCase.Name) + norm := httpapi.NormalizeRealUsername(testCase.Name) + normErr := httpapi.UserRealNameValid(norm) + assert.NoError(t, normErr) + assert.Equal(t, testCase.Valid, err == nil) + assert.Equal(t, testCase.Valid, norm == testCase.Name, "invalid name should be different after normalization") }) } } diff --git a/coderd/httpmw/apikey.go b/coderd/httpmw/apikey.go index 5bb45424b57f9..c4d1c7f202533 100644 --- a/coderd/httpmw/apikey.go +++ b/coderd/httpmw/apikey.go @@ -406,8 +406,7 @@ func ExtractAPIKey(rw http.ResponseWriter, r *http.Request, cfg ExtractAPIKeyCon // If the key is valid, we also fetch the user roles and status. // The roles are used for RBAC authorize checks, and the status // is to block 'suspended' users from accessing the platform. - //nolint:gocritic // system needs to update user roles - roles, err := cfg.DB.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), key.UserID) + actor, userStatus, err := UserRBACSubject(ctx, cfg.DB, key.UserID, rbac.ScopeName(key.Scope)) if err != nil { return write(http.StatusUnauthorized, codersdk.Response{ Message: internalErrorMessage, @@ -415,7 +414,7 @@ func ExtractAPIKey(rw http.ResponseWriter, r *http.Request, cfg ExtractAPIKeyCon }) } - if roles.Status == database.UserStatusDormant { + if userStatus == database.UserStatusDormant { // If coder confirms that the dormant user is valid, it can switch their account to active. // nolint:gocritic u, err := cfg.DB.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{ @@ -429,39 +428,50 @@ func ExtractAPIKey(rw http.ResponseWriter, r *http.Request, cfg ExtractAPIKeyCon Detail: fmt.Sprintf("can't activate a dormant user: %s", err.Error()), }) } - roles.Status = u.Status + userStatus = u.Status } - if roles.Status != database.UserStatusActive { + if userStatus != database.UserStatusActive { return write(http.StatusUnauthorized, codersdk.Response{ - Message: fmt.Sprintf("User is not active (status = %q). Contact an admin to reactivate your account.", roles.Status), + Message: fmt.Sprintf("User is not active (status = %q). Contact an admin to reactivate your account.", userStatus), }) } + if cfg.PostAuthAdditionalHeadersFunc != nil { + cfg.PostAuthAdditionalHeadersFunc(actor, rw.Header()) + } + + return key, &actor, true +} + +// UserRBACSubject fetches a user's rbac.Subject from the database. It pulls all roles from both +// site and organization scopes. It also pulls the groups, and the user's status. +func UserRBACSubject(ctx context.Context, db database.Store, userID uuid.UUID, scope rbac.ExpandableScope) (rbac.Subject, database.UserStatus, error) { + //nolint:gocritic // system needs to update user roles + roles, err := db.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), userID) + if err != nil { + return rbac.Subject{}, "", xerrors.Errorf("get authorization user roles: %w", err) + } + + roleNames, err := roles.RoleNames() + if err != nil { + return rbac.Subject{}, "", xerrors.Errorf("expand role names: %w", err) + } + //nolint:gocritic // Permission to lookup custom roles the user has assigned. - rbacRoles, err := rolestore.Expand(dbauthz.AsSystemRestricted(ctx), cfg.DB, roles.Roles) + rbacRoles, err := rolestore.Expand(dbauthz.AsSystemRestricted(ctx), db, roleNames) if err != nil { - return write(http.StatusInternalServerError, codersdk.Response{ - Message: "Failed to expand authenticated user roles", - Detail: err.Error(), - Validations: nil, - }) + return rbac.Subject{}, "", xerrors.Errorf("expand role names: %w", err) } - // Actor is the user's authorization context. actor := rbac.Subject{ FriendlyName: roles.Username, - ID: key.UserID.String(), + ID: userID.String(), Roles: rbacRoles, Groups: roles.Groups, - Scope: rbac.ScopeName(key.Scope), + Scope: scope, }.WithCachedASTValue() - - if cfg.PostAuthAdditionalHeadersFunc != nil { - cfg.PostAuthAdditionalHeadersFunc(actor, rw.Header()) - } - - return key, &actor, true + return actor, roles.Status, nil } // APITokenFromRequest returns the api token from the request. diff --git a/coderd/httpmw/apikey_test.go b/coderd/httpmw/apikey_test.go index 33ba90a4d728c..c2e69eb7ae686 100644 --- a/coderd/httpmw/apikey_test.go +++ b/coderd/httpmw/apikey_test.go @@ -14,16 +14,20 @@ import ( "testing" "time" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "golang.org/x/oauth2" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/cryptorand" "github.com/coder/coder/v2/testutil" @@ -38,6 +42,37 @@ func randomAPIKeyParts() (id string, secret string) { func TestAPIKey(t *testing.T) { t.Parallel() + // assertActorOk asserts all the properties of the user auth are ok. + assertActorOk := func(t *testing.T, r *http.Request) { + t.Helper() + + actor, ok := dbauthz.ActorFromContext(r.Context()) + assert.True(t, ok, "dbauthz actor ok") + if ok { + _, err := actor.Roles.Expand() + assert.NoError(t, err, "actor roles ok") + + _, err = actor.Scope.Expand() + assert.NoError(t, err, "actor scope ok") + + err = actor.RegoValueOk() + assert.NoError(t, err, "actor rego ok") + } + + auth, ok := httpmw.UserAuthorizationOptional(r) + assert.True(t, ok, "httpmw auth ok") + if ok { + _, err := auth.Roles.Expand() + assert.NoError(t, err, "auth roles ok") + + _, err = auth.Scope.Expand() + assert.NoError(t, err, "auth scope ok") + + err = auth.RegoValueOk() + assert.NoError(t, err, "auth rego ok") + } + } + successHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { // Only called if the API key passes through the handler. httpapi.Write(context.Background(), rw, http.StatusOK, codersdk.Response{ @@ -256,6 +291,7 @@ func TestAPIKey(t *testing.T) { })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { // Checks that it exists on the context! _ = httpmw.APIKey(r) + assertActorOk(t, r) httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ Message: "It worked!", }) @@ -296,6 +332,7 @@ func TestAPIKey(t *testing.T) { // Checks that it exists on the context! apiKey := httpmw.APIKey(r) assert.Equal(t, database.APIKeyScopeApplicationConnect, apiKey.Scope) + assertActorOk(t, r) httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ Message: "it worked!", @@ -330,6 +367,8 @@ func TestAPIKey(t *testing.T) { })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { // Checks that it exists on the context! _ = httpmw.APIKey(r) + assertActorOk(t, r) + httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ Message: "It worked!", }) @@ -633,7 +672,7 @@ func TestAPIKey(t *testing.T) { require.Equal(t, sentAPIKey.LoginType, gotAPIKey.LoginType) }) - t.Run("MissongConfig", func(t *testing.T) { + t.Run("MissingConfig", func(t *testing.T) { t.Parallel() var ( db = dbmem.New() @@ -667,4 +706,133 @@ func TestAPIKey(t *testing.T) { out, _ := io.ReadAll(res.Body) require.Contains(t, string(out), "Unable to refresh") }) + + t.Run("CustomRoles", func(t *testing.T) { + t.Parallel() + var ( + db = dbmem.New() + org = dbgen.Organization(t, db, database.Organization{}) + customRole = dbgen.CustomRole(t, db, database.CustomRole{ + Name: "custom-role", + OrgPermissions: []database.CustomRolePermission{}, + OrganizationID: uuid.NullUUID{ + UUID: org.ID, + Valid: true, + }, + }) + user = dbgen.User(t, db, database.User{ + RBACRoles: []string{}, + }) + _ = dbgen.OrganizationMember(t, db, database.OrganizationMember{ + UserID: user.ID, + OrganizationID: org.ID, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + Roles: []string{ + rbac.RoleOrgAdmin(), + customRole.Name, + }, + }) + _, token = dbgen.APIKey(t, db, database.APIKey{ + UserID: user.ID, + ExpiresAt: dbtime.Now().AddDate(0, 0, 1), + }) + + r = httptest.NewRequest("GET", "/", nil) + rw = httptest.NewRecorder() + ) + r.Header.Set(codersdk.SessionTokenHeader, token) + + httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{ + DB: db, + RedirectToLogin: false, + })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + assertActorOk(t, r) + + auth := httpmw.UserAuthorization(r) + + roles, err := auth.Roles.Expand() + assert.NoError(t, err, "expand user roles") + // Assert built in org role + assert.True(t, slices.ContainsFunc(roles, func(role rbac.Role) bool { + return role.Identifier.Name == rbac.RoleOrgAdmin() && role.Identifier.OrganizationID == org.ID + }), "org admin role") + // Assert custom role + assert.True(t, slices.ContainsFunc(roles, func(role rbac.Role) bool { + return role.Identifier.Name == customRole.Name && role.Identifier.OrganizationID == org.ID + }), "custom org role") + + httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ + Message: "It worked!", + }) + })).ServeHTTP(rw, r) + res := rw.Result() + defer res.Body.Close() + require.Equal(t, http.StatusOK, res.StatusCode) + }) + + // There is no sql foreign key constraint to require all assigned roles + // still exist in the database. We need to handle deleted roles. + t.Run("RoleNotExists", func(t *testing.T) { + t.Parallel() + var ( + roleNotExistsName = "role-not-exists" + db = dbmem.New() + org = dbgen.Organization(t, db, database.Organization{}) + user = dbgen.User(t, db, database.User{ + RBACRoles: []string{ + // Also provide an org not exists. In practice this makes no sense + // to store org roles in the user table, but there is no org to + // store it in. So just throw this here for even more unexpected + // behavior handling! + rbac.RoleIdentifier{Name: roleNotExistsName, OrganizationID: uuid.New()}.String(), + }, + }) + _ = dbgen.OrganizationMember(t, db, database.OrganizationMember{ + UserID: user.ID, + OrganizationID: org.ID, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + Roles: []string{ + rbac.RoleOrgAdmin(), + roleNotExistsName, + }, + }) + _, token = dbgen.APIKey(t, db, database.APIKey{ + UserID: user.ID, + ExpiresAt: dbtime.Now().AddDate(0, 0, 1), + }) + + r = httptest.NewRequest("GET", "/", nil) + rw = httptest.NewRecorder() + ) + r.Header.Set(codersdk.SessionTokenHeader, token) + + httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{ + DB: db, + RedirectToLogin: false, + })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + assertActorOk(t, r) + auth := httpmw.UserAuthorization(r) + + roles, err := auth.Roles.Expand() + assert.NoError(t, err, "expand user roles") + // Assert built in org role + assert.True(t, slices.ContainsFunc(roles, func(role rbac.Role) bool { + return role.Identifier.Name == rbac.RoleOrgAdmin() && role.Identifier.OrganizationID == org.ID + }), "org admin role") + + // Assert the role-not-exists is not returned + assert.False(t, slices.ContainsFunc(roles, func(role rbac.Role) bool { + return role.Identifier.Name == roleNotExistsName + }), "role should not exist") + + httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ + Message: "It worked!", + }) + })).ServeHTTP(rw, r) + res := rw.Result() + defer res.Body.Close() + require.Equal(t, http.StatusOK, res.StatusCode) + }) } diff --git a/coderd/httpmw/authorize_test.go b/coderd/httpmw/authorize_test.go index c67be2ca2bdf7..5d04c5afacdb3 100644 --- a/coderd/httpmw/authorize_test.go +++ b/coderd/httpmw/authorize_test.go @@ -27,27 +27,26 @@ func TestExtractUserRoles(t *testing.T) { t.Parallel() testCases := []struct { Name string - AddUser func(db database.Store) (database.User, []string, string) + AddUser func(db database.Store) (database.User, []rbac.RoleIdentifier, string) }{ { Name: "Member", - AddUser: func(db database.Store) (database.User, []string, string) { - roles := []string{} - user, token := addUser(t, db, roles...) - return user, append(roles, rbac.RoleMember()), token + AddUser: func(db database.Store) (database.User, []rbac.RoleIdentifier, string) { + user, token := addUser(t, db) + return user, []rbac.RoleIdentifier{rbac.RoleMember()}, token }, }, { - Name: "Admin", - AddUser: func(db database.Store) (database.User, []string, string) { - roles := []string{rbac.RoleOwner()} + Name: "Owner", + AddUser: func(db database.Store) (database.User, []rbac.RoleIdentifier, string) { + roles := []string{codersdk.RoleOwner} user, token := addUser(t, db, roles...) - return user, append(roles, rbac.RoleMember()), token + return user, []rbac.RoleIdentifier{rbac.RoleOwner(), rbac.RoleMember()}, token }, }, { Name: "OrgMember", - AddUser: func(db database.Store) (database.User, []string, string) { + AddUser: func(db database.Store) (database.User, []rbac.RoleIdentifier, string) { roles := []string{} user, token := addUser(t, db, roles...) org, err := db.InsertOrganization(context.Background(), database.InsertOrganizationParams{ @@ -68,15 +67,15 @@ func TestExtractUserRoles(t *testing.T) { Roles: orgRoles, }) require.NoError(t, err) - return user, append(roles, append(orgRoles, rbac.RoleMember(), rbac.RoleOrgMember(org.ID))...), token + return user, []rbac.RoleIdentifier{rbac.RoleMember(), rbac.ScopedRoleOrgMember(org.ID)}, token }, }, { Name: "MultipleOrgMember", - AddUser: func(db database.Store) (database.User, []string, string) { - roles := []string{} - user, token := addUser(t, db, roles...) - roles = append(roles, rbac.RoleMember()) + AddUser: func(db database.Store) (database.User, []rbac.RoleIdentifier, string) { + expected := []rbac.RoleIdentifier{} + user, token := addUser(t, db) + expected = append(expected, rbac.RoleMember()) for i := 0; i < 3; i++ { organization, err := db.InsertOrganization(context.Background(), database.InsertOrganizationParams{ ID: uuid.New(), @@ -89,7 +88,8 @@ func TestExtractUserRoles(t *testing.T) { orgRoles := []string{} if i%2 == 0 { - orgRoles = append(orgRoles, rbac.RoleOrgAdmin(organization.ID)) + orgRoles = append(orgRoles, codersdk.RoleOrganizationAdmin) + expected = append(expected, rbac.ScopedRoleOrgAdmin(organization.ID)) } _, err = db.InsertOrganizationMember(context.Background(), database.InsertOrganizationMemberParams{ OrganizationID: organization.ID, @@ -99,10 +99,9 @@ func TestExtractUserRoles(t *testing.T) { Roles: orgRoles, }) require.NoError(t, err) - roles = append(roles, orgRoles...) - roles = append(roles, rbac.RoleOrgMember(organization.ID)) + expected = append(expected, rbac.ScopedRoleOrgMember(organization.ID)) } - return user, roles, token + return user, expected, token }, }, } @@ -147,6 +146,9 @@ func addUser(t *testing.T, db database.Store, roles ...string) (database.User, s id, secret = randomAPIKeyParts() hashed = sha256.Sum256([]byte(secret)) ) + if roles == nil { + roles = []string{} + } user, err := db.InsertUser(context.Background(), database.InsertUserParams{ ID: uuid.New(), diff --git a/coderd/httpmw/authz_test.go b/coderd/httpmw/authz_test.go index b469a8f23a5ed..706590e210c1f 100644 --- a/coderd/httpmw/authz_test.go +++ b/coderd/httpmw/authz_test.go @@ -11,6 +11,7 @@ import ( "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/rbac" ) func TestAsAuthzSystem(t *testing.T) { @@ -34,7 +35,7 @@ func TestAsAuthzSystem(t *testing.T) { actor, ok := dbauthz.ActorFromContext(req.Context()) assert.True(t, ok, "actor should exist") assert.False(t, userActor.Equal(actor), "systemActor should not be the user actor") - assert.Contains(t, actor.Roles.Names(), "system", "should have system role") + assert.Contains(t, actor.Roles.Names(), rbac.RoleIdentifier{Name: "system"}, "should have system role") }) mwAssertUser := mwAssert(func(req *http.Request) { diff --git a/coderd/httpmw/csp.go b/coderd/httpmw/csp.go index fde5c62d8bd6f..0862a0cd7cb2a 100644 --- a/coderd/httpmw/csp.go +++ b/coderd/httpmw/csp.go @@ -43,7 +43,9 @@ const ( // CSPHeaders returns a middleware that sets the Content-Security-Policy header // for coderd. It takes a function that allows adding supported external websocket // hosts. This is primarily to support the terminal connecting to a workspace proxy. -func CSPHeaders(websocketHosts func() []string) func(next http.Handler) http.Handler { +// +//nolint:revive +func CSPHeaders(telemetry bool, websocketHosts func() []string) func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Content-Security-Policy disables loading certain content types and can prevent XSS injections. @@ -83,6 +85,11 @@ func CSPHeaders(websocketHosts func() []string) func(next http.Handler) http.Han // "require-trusted-types-for" : []string{"'script'"}, } + if telemetry { + // If telemetry is enabled, we report to coder.com. + cspSrcs.Append(cspDirectiveConnectSrc, "https://coder.com") + } + // This extra connect-src addition is required to support old webkit // based browsers (Safari). // See issue: https://github.com/w3c/webappsec-csp/issues/7 diff --git a/coderd/httpmw/csp_test.go b/coderd/httpmw/csp_test.go index 2dca209faa5c3..d389d778eeba6 100644 --- a/coderd/httpmw/csp_test.go +++ b/coderd/httpmw/csp_test.go @@ -19,7 +19,7 @@ func TestCSPConnect(t *testing.T) { r := httptest.NewRequest(http.MethodGet, "/", nil) rw := httptest.NewRecorder() - httpmw.CSPHeaders(func() []string { + httpmw.CSPHeaders(false, func() []string { return expected })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusOK) diff --git a/coderd/httpmw/organizationparam.go b/coderd/httpmw/organizationparam.go index 0c8ccae96c519..a72b361b90d71 100644 --- a/coderd/httpmw/organizationparam.go +++ b/coderd/httpmw/organizationparam.go @@ -78,10 +78,6 @@ func ExtractOrganizationParam(db database.Store) func(http.Handler) http.Handler } if httpapi.Is404Error(dbErr) { httpapi.ResourceNotFound(rw) - httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ - Message: fmt.Sprintf("Organization %q not found.", arg), - Detail: "Provide either the organization id or name.", - }) return } if dbErr != nil { @@ -124,10 +120,10 @@ func ExtractOrganizationMemberParam(db database.Store) func(http.Handler) http.H } organization := OrganizationParam(r) - organizationMember, err := db.GetOrganizationMemberByUserID(ctx, database.GetOrganizationMemberByUserIDParams{ + organizationMember, err := database.ExpectOne(db.OrganizationMembers(ctx, database.OrganizationMembersParams{ OrganizationID: organization.ID, UserID: user.ID, - }) + })) if httpapi.Is404Error(err) { httpapi.ResourceNotFound(rw) return @@ -141,7 +137,7 @@ func ExtractOrganizationMemberParam(db database.Store) func(http.Handler) http.H } ctx = context.WithValue(ctx, organizationMemberParamContextKey{}, OrganizationMember{ - OrganizationMember: organizationMember, + OrganizationMember: organizationMember.OrganizationMember, // Here we're making two exceptions to the rule about not leaking data about the user // to the API handler, which is to include the username and avatar URL. // If the caller has permission to read the OrganizationMember, then we're explicitly diff --git a/coderd/httpmw/organizationparam_test.go b/coderd/httpmw/organizationparam_test.go index 02b7ce1e14ad8..ca3adcabbae01 100644 --- a/coderd/httpmw/organizationparam_test.go +++ b/coderd/httpmw/organizationparam_test.go @@ -16,7 +16,6 @@ import ( "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) @@ -152,11 +151,11 @@ func TestOrganizationParam(t *testing.T) { _ = dbgen.OrganizationMember(t, db, database.OrganizationMember{ OrganizationID: organization.ID, UserID: user.ID, - Roles: []string{rbac.RoleOrgMember(organization.ID)}, + Roles: []string{codersdk.RoleOrganizationMember}, }) _, err := db.UpdateUserRoles(ctx, database.UpdateUserRolesParams{ ID: user.ID, - GrantedRoles: []string{rbac.RoleTemplateAdmin()}, + GrantedRoles: []string{codersdk.RoleTemplateAdmin}, }) require.NoError(t, err) diff --git a/coderd/httpmw/ratelimit_test.go b/coderd/httpmw/ratelimit_test.go index a320e05af7ffe..1dd12da89df1a 100644 --- a/coderd/httpmw/ratelimit_test.go +++ b/coderd/httpmw/ratelimit_test.go @@ -16,7 +16,6 @@ import ( "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" ) @@ -117,7 +116,7 @@ func TestRateLimit(t *testing.T) { db := dbmem.New() u := dbgen.User(t, db, database.User{ - RBACRoles: []string{rbac.RoleOwner()}, + RBACRoles: []string{codersdk.RoleOwner}, }) _, key := dbgen.APIKey(t, db, database.APIKey{UserID: u.ID}) diff --git a/coderd/httpmw/workspaceagent.go b/coderd/httpmw/workspaceagent.go index a72d05caecbb2..99889c0bae5fc 100644 --- a/coderd/httpmw/workspaceagent.go +++ b/coderd/httpmw/workspaceagent.go @@ -119,9 +119,18 @@ func ExtractWorkspaceAgentAndLatestBuild(opts ExtractWorkspaceAgentAndLatestBuil return } + roleNames, err := roles.RoleNames() + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal server error", + Detail: err.Error(), + }) + return + } + subject := rbac.Subject{ ID: row.Workspace.OwnerID.String(), - Roles: rbac.RoleNames(roles.Roles), + Roles: rbac.RoleIdentifiers(roleNames), Groups: roles.Groups, Scope: rbac.WorkspaceAgentScope(rbac.WorkspaceAgentScopeParams{ WorkspaceID: row.Workspace.ID, diff --git a/coderd/identityprovider/tokens.go b/coderd/identityprovider/tokens.go index e9c9e743e7225..0e41ba940298f 100644 --- a/coderd/identityprovider/tokens.go +++ b/coderd/identityprovider/tokens.go @@ -209,21 +209,14 @@ func authorizationCodeGrant(ctx context.Context, db database.Store, app database } // Grab the user roles so we can perform the exchange as the user. - //nolint:gocritic // In the token exchange, there is no user actor. - roles, err := db.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), dbCode.UserID) + actor, _, err := httpmw.UserRBACSubject(ctx, db, dbCode.UserID, rbac.ScopeAll) if err != nil { - return oauth2.Token{}, err - } - userSubj := rbac.Subject{ - ID: dbCode.UserID.String(), - Roles: rbac.RoleNames(roles.Roles), - Groups: roles.Groups, - Scope: rbac.ScopeAll, + return oauth2.Token{}, xerrors.Errorf("fetch user actor: %w", err) } // Do the actual token exchange in the database. err = db.InTx(func(tx database.Store) error { - ctx := dbauthz.As(ctx, userSubj) + ctx := dbauthz.As(ctx, actor) err = tx.DeleteOAuth2ProviderAppCodeByID(ctx, dbCode.ID) if err != nil { return xerrors.Errorf("delete oauth2 app code: %w", err) @@ -305,16 +298,10 @@ func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAut if err != nil { return oauth2.Token{}, err } - //nolint:gocritic // There is no user yet so we must use the system. - roles, err := db.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), prevKey.UserID) + + actor, _, err := httpmw.UserRBACSubject(ctx, db, prevKey.UserID, rbac.ScopeAll) if err != nil { - return oauth2.Token{}, err - } - userSubj := rbac.Subject{ - ID: prevKey.UserID.String(), - Roles: rbac.RoleNames(roles.Roles), - Groups: roles.Groups, - Scope: rbac.ScopeAll, + return oauth2.Token{}, xerrors.Errorf("fetch user actor: %w", err) } // Generate a new refresh token. @@ -339,7 +326,7 @@ func refreshTokenGrant(ctx context.Context, db database.Store, app database.OAut // Replace the token. err = db.InTx(func(tx database.Store) error { - ctx := dbauthz.As(ctx, userSubj) + ctx := dbauthz.As(ctx, actor) err = tx.DeleteAPIKeyByID(ctx, prevKey.ID) // This cascades to the token. if err != nil { return xerrors.Errorf("delete oauth2 app token: %w", err) diff --git a/coderd/insights.go b/coderd/insights.go index a54e79a525644..7234a88d44fe9 100644 --- a/coderd/insights.go +++ b/coderd/insights.go @@ -30,6 +30,7 @@ const insightsTimeLayout = time.RFC3339 // @Security CoderSessionToken // @Produce json // @Tags Insights +// @Param tz_offset query int true "Time-zone offset (e.g. -2)" // @Success 200 {object} codersdk.DAUsResponse // @Router /insights/daus [get] func (api *API) deploymentDAUs(rw http.ResponseWriter, r *http.Request) { @@ -100,8 +101,9 @@ func (api *API) returnDAUsInternal(rw http.ResponseWriter, r *http.Request, temp // @Security CoderSessionToken // @Produce json // @Tags Insights -// @Param before query int true "Start time" -// @Param after query int true "End time" +// @Param start_time query string true "Start time" format(date-time) +// @Param end_time query string true "End time" format(date-time) +// @Param template_ids query []string false "Template IDs" collectionFormat(csv) // @Success 200 {object} codersdk.UserActivityInsightsResponse // @Router /insights/user-activity [get] func (api *API) insightsUserActivity(rw http.ResponseWriter, r *http.Request) { @@ -202,8 +204,9 @@ func (api *API) insightsUserActivity(rw http.ResponseWriter, r *http.Request) { // @Security CoderSessionToken // @Produce json // @Tags Insights -// @Param before query int true "Start time" -// @Param after query int true "End time" +// @Param start_time query string true "Start time" format(date-time) +// @Param end_time query string true "End time" format(date-time) +// @Param template_ids query []string false "Template IDs" collectionFormat(csv) // @Success 200 {object} codersdk.UserLatencyInsightsResponse // @Router /insights/user-latency [get] func (api *API) insightsUserLatency(rw http.ResponseWriter, r *http.Request) { @@ -294,8 +297,10 @@ func (api *API) insightsUserLatency(rw http.ResponseWriter, r *http.Request) { // @Security CoderSessionToken // @Produce json // @Tags Insights -// @Param before query int true "Start time" -// @Param after query int true "End time" +// @Param start_time query string true "Start time" format(date-time) +// @Param end_time query string true "End time" format(date-time) +// @Param interval query string true "Interval" enums(week,day) +// @Param template_ids query []string false "Template IDs" collectionFormat(csv) // @Success 200 {object} codersdk.TemplateInsightsResponse // @Router /insights/templates [get] func (api *API) insightsTemplates(rw http.ResponseWriter, r *http.Request) { diff --git a/coderd/insights_test.go b/coderd/insights_test.go index 22e7ed6947bac..2447ec37f3516 100644 --- a/coderd/insights_test.go +++ b/coderd/insights_test.go @@ -21,7 +21,6 @@ import ( "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/agent/agenttest" agentproto "github.com/coder/coder/v2/agent/proto" - "github.com/coder/coder/v2/coderd/batchstats" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" @@ -684,11 +683,11 @@ func TestTemplateInsights_Golden(t *testing.T) { // NOTE(mafredri): Ideally we would pass batcher as a coderd option and // insert using the agentClient, but we have a circular dependency on // the database. - batcher, batcherCloser, err := batchstats.New( + batcher, batcherCloser, err := workspacestats.NewBatcher( ctx, - batchstats.WithStore(db), - batchstats.WithLogger(logger.Named("batchstats")), - batchstats.WithInterval(time.Hour), + workspacestats.BatcherWithStore(db), + workspacestats.BatcherWithLogger(logger.Named("batchstats")), + workspacestats.BatcherWithInterval(time.Hour), ) require.NoError(t, err) defer batcherCloser() // Flushes the stats, this is to ensure they're written. @@ -1583,11 +1582,11 @@ func TestUserActivityInsights_Golden(t *testing.T) { // NOTE(mafredri): Ideally we would pass batcher as a coderd option and // insert using the agentClient, but we have a circular dependency on // the database. - batcher, batcherCloser, err := batchstats.New( + batcher, batcherCloser, err := workspacestats.NewBatcher( ctx, - batchstats.WithStore(db), - batchstats.WithLogger(logger.Named("batchstats")), - batchstats.WithInterval(time.Hour), + workspacestats.BatcherWithStore(db), + workspacestats.BatcherWithLogger(logger.Named("batchstats")), + workspacestats.BatcherWithInterval(time.Hour), ) require.NoError(t, err) defer batcherCloser() // Flushes the stats, this is to ensure they're written. diff --git a/coderd/members.go b/coderd/members.go index beae302ab3124..24f712b8154c7 100644 --- a/coderd/members.go +++ b/coderd/members.go @@ -1,17 +1,162 @@ package coderd import ( + "context" "net/http" - "github.com/coder/coder/v2/coderd/database/db2sdk" - "github.com/coder/coder/v2/coderd/rbac" + "github.com/google/uuid" + "golang.org/x/xerrors" + "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/db2sdk" + "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" + "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" ) +// @Summary Add organization member +// @ID add-organization-member +// @Security CoderSessionToken +// @Produce json +// @Tags Members +// @Param organization path string true "Organization ID" +// @Param user path string true "User ID, name, or me" +// @Success 200 {object} codersdk.OrganizationMember +// @Router /organizations/{organization}/members/{user} [post] +func (api *API) postOrganizationMember(rw http.ResponseWriter, r *http.Request) { + var ( + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + user = httpmw.UserParam(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.AuditableOrganizationMember](rw, &audit.RequestParams{ + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionCreate, + }) + ) + aReq.Old = database.AuditableOrganizationMember{} + defer commitAudit() + + member, err := api.Database.InsertOrganizationMember(ctx, database.InsertOrganizationMemberParams{ + OrganizationID: organization.ID, + UserID: user.ID, + CreatedAt: dbtime.Now(), + UpdatedAt: dbtime.Now(), + Roles: []string{}, + }) + if httpapi.Is404Error(err) { + httpapi.ResourceNotFound(rw) + return + } + if database.IsUniqueViolation(err, database.UniqueOrganizationMembersPkey) { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Organization member already exists in this organization", + }) + return + } + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + aReq.New = member.Auditable(user.Username) + resp, err := convertOrganizationMembers(ctx, api.Database, []database.OrganizationMember{member}) + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + if len(resp) == 0 { + httpapi.InternalServerError(rw, xerrors.Errorf("marshal member")) + return + } + + httpapi.Write(ctx, rw, http.StatusOK, resp[0]) +} + +// @Summary Remove organization member +// @ID remove-organization-member +// @Security CoderSessionToken +// @Produce json +// @Tags Members +// @Param organization path string true "Organization ID" +// @Param user path string true "User ID, name, or me" +// @Success 200 {object} codersdk.OrganizationMember +// @Router /organizations/{organization}/members/{user} [delete] +func (api *API) deleteOrganizationMember(rw http.ResponseWriter, r *http.Request) { + var ( + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + member = httpmw.OrganizationMemberParam(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.AuditableOrganizationMember](rw, &audit.RequestParams{ + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionDelete, + }) + ) + aReq.Old = member.OrganizationMember.Auditable(member.Username) + defer commitAudit() + + err := api.Database.DeleteOrganizationMember(ctx, database.DeleteOrganizationMemberParams{ + OrganizationID: organization.ID, + UserID: member.UserID, + }) + if httpapi.Is404Error(err) { + httpapi.ResourceNotFound(rw) + return + } + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + aReq.New = database.AuditableOrganizationMember{} + httpapi.Write(ctx, rw, http.StatusOK, "organization member removed") +} + +// @Summary List organization members +// @ID list-organization-members +// @Security CoderSessionToken +// @Produce json +// @Tags Members +// @Param organization path string true "Organization ID" +// @Success 200 {object} []codersdk.OrganizationMemberWithName +// @Router /organizations/{organization}/members [get] +func (api *API) listMembers(rw http.ResponseWriter, r *http.Request) { + var ( + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + ) + + members, err := api.Database.OrganizationMembers(ctx, database.OrganizationMembersParams{ + OrganizationID: organization.ID, + UserID: uuid.Nil, + }) + if httpapi.Is404Error(err) { + httpapi.ResourceNotFound(rw) + return + } + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + resp, err := convertOrganizationMemberRows(ctx, api.Database, members) + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + httpapi.Write(ctx, rw, http.StatusOK, resp) +} + // @Summary Assign role to organization member // @ID assign-role-to-organization-member // @Security CoderSessionToken @@ -25,13 +170,23 @@ import ( // @Router /organizations/{organization}/members/{user}/roles [put] func (api *API) putMemberRoles(rw http.ResponseWriter, r *http.Request) { var ( - ctx = r.Context() - organization = httpmw.OrganizationParam(r) - member = httpmw.OrganizationMemberParam(r) - apiKey = httpmw.APIKey(r) + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + member = httpmw.OrganizationMemberParam(r) + apiKey = httpmw.APIKey(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.AuditableOrganizationMember](rw, &audit.RequestParams{ + OrganizationID: organization.ID, + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionWrite, + }) ) + aReq.Old = member.OrganizationMember.Auditable(member.Username) + defer commitAudit() - if apiKey.UserID == member.UserID { + if apiKey.UserID == member.OrganizationMember.UserID { httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ Message: "You cannot change your own organization roles.", }) @@ -48,28 +203,116 @@ func (api *API) putMemberRoles(rw http.ResponseWriter, r *http.Request) { UserID: member.UserID, OrgID: organization.ID, }) + if httpapi.Is404Error(err) { + httpapi.Forbidden(rw) + return + } if err != nil { httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ Message: err.Error(), }) return } + aReq.New = database.AuditableOrganizationMember{ + OrganizationMember: updatedUser, + Username: member.Username, + } - httpapi.Write(ctx, rw, http.StatusOK, convertOrganizationMember(updatedUser)) + resp, err := convertOrganizationMembers(ctx, api.Database, []database.OrganizationMember{updatedUser}) + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + if len(resp) != 1 { + httpapi.InternalServerError(rw, xerrors.Errorf("failed to serialize member to response, update still succeeded")) + return + } + httpapi.Write(ctx, rw, http.StatusOK, resp[0]) +} + +// convertOrganizationMembers batches the role lookup to make only 1 sql call +// We +func convertOrganizationMembers(ctx context.Context, db database.Store, mems []database.OrganizationMember) ([]codersdk.OrganizationMember, error) { + converted := make([]codersdk.OrganizationMember, 0, len(mems)) + roleLookup := make([]database.NameOrganizationPair, 0) + + for _, m := range mems { + converted = append(converted, codersdk.OrganizationMember{ + UserID: m.UserID, + OrganizationID: m.OrganizationID, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + Roles: db2sdk.List(m.Roles, func(r string) codersdk.SlimRole { + // If it is a built-in role, no lookups are needed. + rbacRole, err := rbac.RoleByName(rbac.RoleIdentifier{Name: r, OrganizationID: m.OrganizationID}) + if err == nil { + return db2sdk.SlimRole(rbacRole) + } + + // We know the role name and the organization ID. We are missing the + // display name. Append the lookup parameter, so we can get the display name + roleLookup = append(roleLookup, database.NameOrganizationPair{ + Name: r, + OrganizationID: m.OrganizationID, + }) + return codersdk.SlimRole{ + Name: r, + DisplayName: "", + OrganizationID: m.OrganizationID.String(), + } + }), + }) + } + + customRoles, err := db.CustomRoles(ctx, database.CustomRolesParams{ + LookupRoles: roleLookup, + ExcludeOrgRoles: false, + OrganizationID: uuid.UUID{}, + }) + if err != nil { + // We are missing the display names, but that is not absolutely required. So just + // return the converted and the names will be used instead of the display names. + return converted, xerrors.Errorf("lookup custom roles: %w", err) + } + + // Now map the customRoles back to the slimRoles for their display name. + customRolesMap := make(map[string]database.CustomRole) + for _, role := range customRoles { + customRolesMap[role.RoleIdentifier().UniqueName()] = role + } + + for i := range converted { + for j, role := range converted[i].Roles { + if cr, ok := customRolesMap[role.UniqueName()]; ok { + converted[i].Roles[j].DisplayName = cr.DisplayName + } + } + } + + return converted, nil } -func convertOrganizationMember(mem database.OrganizationMember) codersdk.OrganizationMember { - convertedMember := codersdk.OrganizationMember{ - UserID: mem.UserID, - OrganizationID: mem.OrganizationID, - CreatedAt: mem.CreatedAt, - UpdatedAt: mem.UpdatedAt, - Roles: make([]codersdk.SlimRole, 0, len(mem.Roles)), +func convertOrganizationMemberRows(ctx context.Context, db database.Store, rows []database.OrganizationMembersRow) ([]codersdk.OrganizationMemberWithName, error) { + members := make([]database.OrganizationMember, 0) + for _, row := range rows { + members = append(members, row.OrganizationMember) } - for _, roleName := range mem.Roles { - rbacRole, _ := rbac.RoleByName(roleName) - convertedMember.Roles = append(convertedMember.Roles, db2sdk.SlimRole(rbacRole)) + convertedMembers, err := convertOrganizationMembers(ctx, db, members) + if err != nil { + return nil, err + } + if len(convertedMembers) != len(rows) { + return nil, xerrors.Errorf("conversion failed, mismatch slice lengths") + } + + converted := make([]codersdk.OrganizationMemberWithName, 0) + for i := range convertedMembers { + converted = append(converted, codersdk.OrganizationMemberWithName{ + Username: rows[i].Username, + OrganizationMember: convertedMembers[i], + }) } - return convertedMember + + return converted, nil } diff --git a/coderd/members_test.go b/coderd/members_test.go new file mode 100644 index 0000000000000..3db296ef6009a --- /dev/null +++ b/coderd/members_test.go @@ -0,0 +1,190 @@ +package coderd_test + +import ( + "net/http" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/database/db2sdk" + "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/testutil" +) + +func TestAddMember(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + ctx := testutil.Context(t, testutil.WaitMedium) + org, err := owner.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "other", + DisplayName: "", + Description: "", + Icon: "", + }) + require.NoError(t, err) + + // Make a user not in the second organization + _, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID) + + members, err := owner.OrganizationMembers(ctx, org.ID) + require.NoError(t, err) + require.Len(t, members, 1) // Verify just the 1 member + + // Add user to org + _, err = owner.PostOrganizationMember(ctx, org.ID, user.Username) + require.NoError(t, err) + + members, err = owner.OrganizationMembers(ctx, org.ID) + require.NoError(t, err) + // Owner + new member + require.Len(t, members, 2) + require.ElementsMatch(t, + []uuid.UUID{first.UserID, user.ID}, + db2sdk.List(members, onlyIDs)) + }) + + t.Run("AlreadyMember", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + _, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID) + + ctx := testutil.Context(t, testutil.WaitMedium) + // Add user to org, even though they already exist + // nolint:gocritic // must be an owner to see the user + _, err := owner.PostOrganizationMember(ctx, first.OrganizationID, user.Username) + require.ErrorContains(t, err, "already exists") + }) + + t.Run("UserNotExists", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, owner) + ctx := testutil.Context(t, testutil.WaitMedium) + + org, err := owner.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "other", + DisplayName: "", + Description: "", + Icon: "", + }) + require.NoError(t, err) + + // Add user to org + _, err = owner.PostOrganizationMember(ctx, org.ID, uuid.NewString()) + require.Error(t, err) + var apiErr *codersdk.Error + require.ErrorAs(t, err, &apiErr) + require.Contains(t, apiErr.Message, "must be an existing") + }) +} + +func TestListMembers(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + + client, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.ScopedRoleOrgAdmin(first.OrganizationID)) + + ctx := testutil.Context(t, testutil.WaitShort) + members, err := client.OrganizationMembers(ctx, first.OrganizationID) + require.NoError(t, err) + require.Len(t, members, 2) + require.ElementsMatch(t, + []uuid.UUID{first.UserID, user.ID}, + db2sdk.List(members, onlyIDs)) + }) + + // Calling it from a user without the org access. + t.Run("NotInOrg", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + + client, _ := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.ScopedRoleOrgAdmin(first.OrganizationID)) + + ctx := testutil.Context(t, testutil.WaitShort) + org, err := owner.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "test", + DisplayName: "", + Description: "", + }) + require.NoError(t, err, "create organization") + + // 404 error is expected instead of a 403/401 to not leak existence of + // an organization. + _, err = client.OrganizationMembers(ctx, org.ID) + require.ErrorContains(t, err, "404") + }) +} + +func TestRemoveMember(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + orgAdminClient, orgAdmin := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.ScopedRoleOrgAdmin(first.OrganizationID)) + _, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID) + + ctx := testutil.Context(t, testutil.WaitMedium) + // Verify the org of 3 members + members, err := orgAdminClient.OrganizationMembers(ctx, first.OrganizationID) + require.NoError(t, err) + require.Len(t, members, 3) + require.ElementsMatch(t, + []uuid.UUID{first.UserID, user.ID, orgAdmin.ID}, + db2sdk.List(members, onlyIDs)) + + // Delete a member + err = orgAdminClient.DeleteOrganizationMember(ctx, first.OrganizationID, user.Username) + require.NoError(t, err) + + members, err = orgAdminClient.OrganizationMembers(ctx, first.OrganizationID) + require.NoError(t, err) + require.Len(t, members, 2) + require.ElementsMatch(t, + []uuid.UUID{first.UserID, orgAdmin.ID}, + db2sdk.List(members, onlyIDs)) + }) + + t.Run("MemberNotInOrg", func(t *testing.T) { + t.Parallel() + owner := coderdtest.New(t, nil) + first := coderdtest.CreateFirstUser(t, owner) + orgAdminClient, _ := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.ScopedRoleOrgAdmin(first.OrganizationID)) + + ctx := testutil.Context(t, testutil.WaitMedium) + // nolint:gocritic // requires owner to make a new org + org, _ := owner.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "other", + DisplayName: "", + Description: "", + Icon: "", + }) + + _, user := coderdtest.CreateAnotherUser(t, owner, org.ID) + + // Delete a user that is not in the organization + err := orgAdminClient.DeleteOrganizationMember(ctx, first.OrganizationID, user.Username) + require.Error(t, err) + var apiError *codersdk.Error + require.ErrorAs(t, err, &apiError) + require.Equal(t, http.StatusNotFound, apiError.StatusCode()) + }) +} + +func onlyIDs(u codersdk.OrganizationMemberWithName) uuid.UUID { + return u.UserID +} diff --git a/coderd/organizations.go b/coderd/organizations.go index 2a43ed2a7011a..24d55fa950c65 100644 --- a/coderd/organizations.go +++ b/coderd/organizations.go @@ -9,11 +9,11 @@ import ( "github.com/google/uuid" "golang.org/x/xerrors" + "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpmw" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" ) @@ -42,8 +42,22 @@ func (*API) organization(rw http.ResponseWriter, r *http.Request) { // @Success 201 {object} codersdk.Organization // @Router /organizations [post] func (api *API) postOrganizations(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - apiKey := httpmw.APIKey(r) + var ( + // organizationID is required before the audit log entry is created. + organizationID = uuid.New() + ctx = r.Context() + apiKey = httpmw.APIKey(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.Organization](rw, &audit.RequestParams{ + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionCreate, + OrganizationID: organizationID, + }) + ) + aReq.Old = database.Organization{} + defer commitAudit() var req codersdk.CreateOrganizationRequest if !httpapi.Read(ctx, rw, r, &req) { @@ -74,12 +88,18 @@ func (api *API) postOrganizations(rw http.ResponseWriter, r *http.Request) { var organization database.Organization err = api.Database.InTx(func(tx database.Store) error { + if req.DisplayName == "" { + req.DisplayName = req.Name + } + organization, err = tx.InsertOrganization(ctx, database.InsertOrganizationParams{ - ID: uuid.New(), + ID: organizationID, Name: req.Name, + DisplayName: req.DisplayName, + Description: req.Description, + Icon: req.Icon, CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), - Description: "", }) if err != nil { return xerrors.Errorf("create organization: %w", err) @@ -89,12 +109,11 @@ func (api *API) postOrganizations(rw http.ResponseWriter, r *http.Request) { UserID: apiKey.UserID, CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), - Roles: []string{ + Roles: []string{ // TODO: When organizations are allowed to be created, we should // come back to determining the default role of the person who // creates the org. Until that happens, all users in an organization // should be just regular members. - rbac.RoleOrgMember(organization.ID), }, }) if err != nil { @@ -115,6 +134,7 @@ func (api *API) postOrganizations(rw http.ResponseWriter, r *http.Request) { return } + aReq.New = organization httpapi.Write(ctx, rw, http.StatusCreated, convertOrganization(organization)) } @@ -129,8 +149,20 @@ func (api *API) postOrganizations(rw http.ResponseWriter, r *http.Request) { // @Success 200 {object} codersdk.Organization // @Router /organizations/{organization} [patch] func (api *API) patchOrganization(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - organization := httpmw.OrganizationParam(r) + var ( + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.Organization](rw, &audit.RequestParams{ + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionWrite, + OrganizationID: organization.ID, + }) + ) + aReq.Old = organization + defer commitAudit() var req codersdk.UpdateOrganizationRequest if !httpapi.Read(ctx, rw, r, &req) { @@ -146,11 +178,42 @@ func (api *API) patchOrganization(rw http.ResponseWriter, r *http.Request) { return } - organization, err := api.Database.UpdateOrganization(ctx, database.UpdateOrganizationParams{ - ID: organization.ID, - UpdatedAt: dbtime.Now(), - Name: req.Name, + err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error { + var err error + organization, err = tx.GetOrganizationByID(ctx, organization.ID) + if err != nil { + return err + } + + updateOrgParams := database.UpdateOrganizationParams{ + UpdatedAt: dbtime.Now(), + ID: organization.ID, + Name: organization.Name, + DisplayName: organization.DisplayName, + Description: organization.Description, + Icon: organization.Icon, + } + + if req.Name != "" { + updateOrgParams.Name = req.Name + } + if req.DisplayName != "" { + updateOrgParams.DisplayName = req.DisplayName + } + if req.Description != nil { + updateOrgParams.Description = *req.Description + } + if req.Icon != nil { + updateOrgParams.Icon = *req.Icon + } + + organization, err = tx.UpdateOrganization(ctx, updateOrgParams) + if err != nil { + return err + } + return nil }) + if httpapi.Is404Error(err) { httpapi.ResourceNotFound(rw) return @@ -173,6 +236,7 @@ func (api *API) patchOrganization(rw http.ResponseWriter, r *http.Request) { return } + aReq.New = organization httpapi.Write(ctx, rw, http.StatusOK, convertOrganization(organization)) } @@ -185,8 +249,20 @@ func (api *API) patchOrganization(rw http.ResponseWriter, r *http.Request) { // @Success 200 {object} codersdk.Response // @Router /organizations/{organization} [delete] func (api *API) deleteOrganization(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - organization := httpmw.OrganizationParam(r) + var ( + ctx = r.Context() + organization = httpmw.OrganizationParam(r) + auditor = api.Auditor.Load() + aReq, commitAudit = audit.InitRequest[database.Organization](rw, &audit.RequestParams{ + Audit: *auditor, + Log: api.Logger, + Request: r, + Action: database.AuditActionDelete, + OrganizationID: organization.ID, + }) + ) + aReq.Old = organization + defer commitAudit() if organization.IsDefault { httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ @@ -204,6 +280,7 @@ func (api *API) deleteOrganization(rw http.ResponseWriter, r *http.Request) { return } + aReq.New = database.Organization{} httpapi.Write(ctx, rw, http.StatusOK, codersdk.Response{ Message: "Organization has been deleted.", }) @@ -212,10 +289,13 @@ func (api *API) deleteOrganization(rw http.ResponseWriter, r *http.Request) { // convertOrganization consumes the database representation and outputs an API friendly representation. func convertOrganization(organization database.Organization) codersdk.Organization { return codersdk.Organization{ - ID: organization.ID, - Name: organization.Name, - CreatedAt: organization.CreatedAt, - UpdatedAt: organization.UpdatedAt, - IsDefault: organization.IsDefault, + ID: organization.ID, + Name: organization.Name, + DisplayName: organization.DisplayName, + Description: organization.Description, + Icon: organization.Icon, + CreatedAt: organization.CreatedAt, + UpdatedAt: organization.UpdatedAt, + IsDefault: organization.IsDefault, } } diff --git a/coderd/organizations_test.go b/coderd/organizations_test.go index 8ce39c5593d90..347048ed67a5c 100644 --- a/coderd/organizations_test.go +++ b/coderd/organizations_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/util/ptr" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) @@ -20,7 +21,8 @@ func TestMultiOrgFetch(t *testing.T) { makeOrgs := []string{"foo", "bar", "baz"} for _, name := range makeOrgs { _, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: name, + Name: name, + DisplayName: name, }) require.NoError(t, err) } @@ -45,7 +47,8 @@ func TestOrganizationsByUser(t *testing.T) { // Make an extra org, and it should not be defaulted. notDefault, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "another", + Name: "another", + DisplayName: "Another", }) require.NoError(t, err) require.False(t, notDefault.IsDefault, "only 1 default org allowed") @@ -73,7 +76,8 @@ func TestOrganizationByUserAndName(t *testing.T) { ctx := testutil.Context(t, testutil.WaitLong) org, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "another", + Name: "another", + DisplayName: "Another", }) require.NoError(t, err) _, err = other.OrganizationByUserAndName(ctx, codersdk.Me, org.Name) @@ -106,23 +110,60 @@ func TestPostOrganizationsByUser(t *testing.T) { org, err := client.Organization(ctx, user.OrganizationID) require.NoError(t, err) _, err = client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: org.Name, + Name: org.Name, + DisplayName: org.DisplayName, }) var apiErr *codersdk.Error require.ErrorAs(t, err, &apiErr) require.Equal(t, http.StatusConflict, apiErr.StatusCode()) }) - t.Run("Create", func(t *testing.T) { + t.Run("InvalidName", func(t *testing.T) { t.Parallel() client := coderdtest.New(t, nil) _ = coderdtest.CreateFirstUser(t, client) ctx := testutil.Context(t, testutil.WaitLong) _, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "new", + Name: "A name which is definitely not url safe", + DisplayName: "New", + }) + var apiErr *codersdk.Error + require.ErrorAs(t, err, &apiErr) + require.Equal(t, http.StatusBadRequest, apiErr.StatusCode()) + }) + + t.Run("Create", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitLong) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "new-org", + DisplayName: "New organization", + Description: "A new organization to love and cherish forever.", + Icon: "/emojis/1f48f-1f3ff.png", + }) + require.NoError(t, err) + require.Equal(t, "new-org", o.Name) + require.Equal(t, "New organization", o.DisplayName) + require.Equal(t, "A new organization to love and cherish forever.", o.Description) + require.Equal(t, "/emojis/1f48f-1f3ff.png", o.Icon) + }) + + t.Run("CreateWithoutExplicitDisplayName", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitLong) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "new-org", }) require.NoError(t, err) + require.Equal(t, "new-org", o.Name) + require.Equal(t, "new-org", o.DisplayName) // should match the given `Name` }) } @@ -137,7 +178,8 @@ func TestPatchOrganizationsByUser(t *testing.T) { originalOrg, err := client.Organization(ctx, user.OrganizationID) require.NoError(t, err) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "something-unique", + Name: "something-unique", + DisplayName: "Something Unique", }) require.NoError(t, err) @@ -156,7 +198,8 @@ func TestPatchOrganizationsByUser(t *testing.T) { ctx := testutil.Context(t, testutil.WaitMedium) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "something-unique", + Name: "something-unique", + DisplayName: "Something Unique", }) require.NoError(t, err) @@ -168,6 +211,26 @@ func TestPatchOrganizationsByUser(t *testing.T) { require.Equal(t, http.StatusBadRequest, apiErr.StatusCode()) }) + t.Run("InvalidName", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitMedium) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "something-unique", + DisplayName: "Something Unique", + }) + require.NoError(t, err) + + _, err = client.UpdateOrganization(ctx, o.ID.String(), codersdk.UpdateOrganizationRequest{ + Name: "something unique but not url safe", + }) + var apiErr *codersdk.Error + require.ErrorAs(t, err, &apiErr) + require.Equal(t, http.StatusBadRequest, apiErr.StatusCode()) + }) + t.Run("UpdateById", func(t *testing.T) { t.Parallel() client := coderdtest.New(t, nil) @@ -175,15 +238,16 @@ func TestPatchOrganizationsByUser(t *testing.T) { ctx := testutil.Context(t, testutil.WaitMedium) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "new", + Name: "new-org", + DisplayName: "New organization", }) require.NoError(t, err) o, err = client.UpdateOrganization(ctx, o.ID.String(), codersdk.UpdateOrganizationRequest{ - Name: "new-new", + Name: "new-new-org", }) require.NoError(t, err) - require.Equal(t, "new-new", o.Name) + require.Equal(t, "new-new-org", o.Name) }) t.Run("UpdateByName", func(t *testing.T) { @@ -193,15 +257,81 @@ func TestPatchOrganizationsByUser(t *testing.T) { ctx := testutil.Context(t, testutil.WaitMedium) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "new", + Name: "new-org", + DisplayName: "New organization", + }) + require.NoError(t, err) + + o, err = client.UpdateOrganization(ctx, o.Name, codersdk.UpdateOrganizationRequest{ + Name: "new-new-org", + }) + require.NoError(t, err) + require.Equal(t, "new-new-org", o.Name) + require.Equal(t, "New organization", o.DisplayName) // didn't change + }) + + t.Run("UpdateDisplayName", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitMedium) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "new-org", + DisplayName: "New organization", + }) + require.NoError(t, err) + + o, err = client.UpdateOrganization(ctx, o.Name, codersdk.UpdateOrganizationRequest{ + DisplayName: "The Newest One", + }) + require.NoError(t, err) + require.Equal(t, "new-org", o.Name) // didn't change + require.Equal(t, "The Newest One", o.DisplayName) + }) + + t.Run("UpdateDescription", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitMedium) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "new-org", + DisplayName: "New organization", }) require.NoError(t, err) o, err = client.UpdateOrganization(ctx, o.Name, codersdk.UpdateOrganizationRequest{ - Name: "new-new", + Description: ptr.Ref("wow, this organization description is so updated!"), }) + + require.NoError(t, err) + require.Equal(t, "new-org", o.Name) // didn't change + require.Equal(t, "New organization", o.DisplayName) // didn't change + require.Equal(t, "wow, this organization description is so updated!", o.Description) + }) + + t.Run("UpdateIcon", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitMedium) + + o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ + Name: "new-org", + DisplayName: "New organization", + }) + require.NoError(t, err) + + o, err = client.UpdateOrganization(ctx, o.Name, codersdk.UpdateOrganizationRequest{ + Icon: ptr.Ref("/emojis/1f48f-1f3ff.png"), + }) + require.NoError(t, err) - require.Equal(t, "new-new", o.Name) + require.Equal(t, "new-org", o.Name) // didn't change + require.Equal(t, "New organization", o.DisplayName) // didn't change + require.Equal(t, "/emojis/1f48f-1f3ff.png", o.Icon) }) } @@ -229,7 +359,8 @@ func TestDeleteOrganizationsByUser(t *testing.T) { ctx := testutil.Context(t, testutil.WaitMedium) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "doomed", + Name: "doomed", + DisplayName: "Doomed", }) require.NoError(t, err) @@ -244,7 +375,8 @@ func TestDeleteOrganizationsByUser(t *testing.T) { ctx := testutil.Context(t, testutil.WaitMedium) o, err := client.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ - Name: "doomed", + Name: "doomed", + DisplayName: "Doomed", }) require.NoError(t, err) diff --git a/coderd/pagination.go b/coderd/pagination.go index 02199a390ec60..0d01220d195e7 100644 --- a/coderd/pagination.go +++ b/coderd/pagination.go @@ -17,8 +17,10 @@ func parsePagination(w http.ResponseWriter, r *http.Request) (p codersdk.Paginat parser := httpapi.NewQueryParamParser() params := codersdk.Pagination{ AfterID: parser.UUID(queryParams, uuid.Nil, "after_id"), - Limit: int(parser.PositiveInt32(queryParams, 0, "limit")), - Offset: int(parser.PositiveInt32(queryParams, 0, "offset")), + // A limit of 0 should be interpreted by the SQL query as "null" or + // "no limit". Do not make this value anything besides 0. + Limit: int(parser.PositiveInt32(queryParams, 0, "limit")), + Offset: int(parser.PositiveInt32(queryParams, 0, "offset")), } if len(parser.Errors) > 0 { httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{ diff --git a/coderd/prometheusmetrics/insights/metricscollector_test.go b/coderd/prometheusmetrics/insights/metricscollector_test.go index 91ef3c7ee88fa..9179c9896235d 100644 --- a/coderd/prometheusmetrics/insights/metricscollector_test.go +++ b/coderd/prometheusmetrics/insights/metricscollector_test.go @@ -18,6 +18,7 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" @@ -87,25 +88,37 @@ func TestCollectInsights(t *testing.T) { ) // Start an agent so that we can generate stats. - var agentClients []*agentsdk.Client + var agentClients []agentproto.DRPCAgentClient for i, agent := range []database.WorkspaceAgent{agent1, agent2} { agentClient := agentsdk.New(client.URL) agentClient.SetSessionToken(agent.AuthToken.String()) agentClient.SDK.SetLogger(logger.Leveled(slog.LevelDebug).Named(fmt.Sprintf("agent%d", i+1))) - agentClients = append(agentClients, agentClient) + conn, err := agentClient.ConnectRPC(context.Background()) + require.NoError(t, err) + agentAPI := agentproto.NewDRPCAgentClient(conn) + agentClients = append(agentClients, agentAPI) } + defer func() { + for a := range agentClients { + err := agentClients[a].DRPCConn().Close() + require.NoError(t, err) + } + }() + // Fake app stats - _, err = agentClients[0].PostStats(context.Background(), &agentsdk.Stats{ - // ConnectionCount must be positive as database query ignores stats with no active connections at the time frame - ConnectionsByProto: map[string]int64{"TCP": 1}, - ConnectionCount: 1, - ConnectionMedianLatencyMS: 15, - // Session counts must be positive, but the exact value is ignored. - // Database query approximates it to 60s of usage. - SessionCountSSH: 99, - SessionCountJetBrains: 47, - SessionCountVSCode: 34, + _, err = agentClients[0].UpdateStats(context.Background(), &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + // ConnectionCount must be positive as database query ignores stats with no active connections at the time frame + ConnectionsByProto: map[string]int64{"TCP": 1}, + ConnectionCount: 1, + ConnectionMedianLatencyMs: 15, + // Session counts must be positive, but the exact value is ignored. + // Database query approximates it to 60s of usage. + SessionCountSsh: 99, + SessionCountJetbrains: 47, + SessionCountVscode: 34, + }, }) require.NoError(t, err, "unable to post fake stats") diff --git a/coderd/prometheusmetrics/prometheusmetrics.go b/coderd/prometheusmetrics/prometheusmetrics.go index fcc6958f39e84..b9a54633a5b13 100644 --- a/coderd/prometheusmetrics/prometheusmetrics.go +++ b/coderd/prometheusmetrics/prometheusmetrics.go @@ -120,9 +120,9 @@ func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.R if errors.Is(err, sql.ErrNoRows) { // clear all series if there are no database entries workspaceLatestBuildTotals.Reset() + } else { + logger.Warn(ctx, "failed to load latest workspace builds", slog.Error(err)) } - - logger.Warn(ctx, "failed to load latest workspace builds", slog.Error(err)) return } jobIDs := make([]uuid.UUID, 0, len(builds)) diff --git a/coderd/prometheusmetrics/prometheusmetrics_test.go b/coderd/prometheusmetrics/prometheusmetrics_test.go index 9c4c9fca0b66f..8a4a152a86b4c 100644 --- a/coderd/prometheusmetrics/prometheusmetrics_test.go +++ b/coderd/prometheusmetrics/prometheusmetrics_test.go @@ -20,8 +20,8 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/agentmetrics" - "github.com/coder/coder/v2/coderd/batchstats" "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbgen" @@ -29,6 +29,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/prometheusmetrics" + "github.com/coder/coder/v2/coderd/workspacestats" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/cryptorand" @@ -391,14 +392,14 @@ func TestAgentStats(t *testing.T) { db, pubsub := dbtestutil.NewDB(t) log := slogtest.Make(t, nil).Leveled(slog.LevelDebug) - batcher, closeBatcher, err := batchstats.New(ctx, + batcher, closeBatcher, err := workspacestats.NewBatcher(ctx, // We had previously set the batch size to 1 here, but that caused // intermittent test flakes due to a race between the batcher completing // its flush and the test asserting that the metrics were collected. // Instead, we close the batcher after all stats have been posted, which // forces a flush. - batchstats.WithStore(db), - batchstats.WithLogger(log), + workspacestats.BatcherWithStore(db), + workspacestats.BatcherWithLogger(log), ) require.NoError(t, err, "create stats batcher failed") t.Cleanup(closeBatcher) @@ -415,36 +416,45 @@ func TestAgentStats(t *testing.T) { user := coderdtest.CreateFirstUser(t, client) - agent1 := prepareWorkspaceAndAgent(t, client, user, 1) - agent2 := prepareWorkspaceAndAgent(t, client, user, 2) - agent3 := prepareWorkspaceAndAgent(t, client, user, 3) + agent1 := prepareWorkspaceAndAgent(ctx, t, client, user, 1) + agent2 := prepareWorkspaceAndAgent(ctx, t, client, user, 2) + agent3 := prepareWorkspaceAndAgent(ctx, t, client, user, 3) + defer agent1.DRPCConn().Close() + defer agent2.DRPCConn().Close() + defer agent3.DRPCConn().Close() registry := prometheus.NewRegistry() // given var i int64 for i = 0; i < 3; i++ { - _, err = agent1.PostStats(ctx, &agentsdk.Stats{ - TxBytes: 1 + i, RxBytes: 2 + i, - SessionCountVSCode: 3 + i, SessionCountJetBrains: 4 + i, SessionCountReconnectingPTY: 5 + i, SessionCountSSH: 6 + i, - ConnectionCount: 7 + i, ConnectionMedianLatencyMS: 8000, - ConnectionsByProto: map[string]int64{"TCP": 1}, + _, err = agent1.UpdateStats(ctx, &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + TxBytes: 1 + i, RxBytes: 2 + i, + SessionCountVscode: 3 + i, SessionCountJetbrains: 4 + i, SessionCountReconnectingPty: 5 + i, SessionCountSsh: 6 + i, + ConnectionCount: 7 + i, ConnectionMedianLatencyMs: 8000, + ConnectionsByProto: map[string]int64{"TCP": 1}, + }, }) require.NoError(t, err) - _, err = agent2.PostStats(ctx, &agentsdk.Stats{ - TxBytes: 2 + i, RxBytes: 4 + i, - SessionCountVSCode: 6 + i, SessionCountJetBrains: 8 + i, SessionCountReconnectingPTY: 10 + i, SessionCountSSH: 12 + i, - ConnectionCount: 8 + i, ConnectionMedianLatencyMS: 10000, - ConnectionsByProto: map[string]int64{"TCP": 1}, + _, err = agent2.UpdateStats(ctx, &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + TxBytes: 2 + i, RxBytes: 4 + i, + SessionCountVscode: 6 + i, SessionCountJetbrains: 8 + i, SessionCountReconnectingPty: 10 + i, SessionCountSsh: 12 + i, + ConnectionCount: 8 + i, ConnectionMedianLatencyMs: 10000, + ConnectionsByProto: map[string]int64{"TCP": 1}, + }, }) require.NoError(t, err) - _, err = agent3.PostStats(ctx, &agentsdk.Stats{ - TxBytes: 3 + i, RxBytes: 6 + i, - SessionCountVSCode: 12 + i, SessionCountJetBrains: 14 + i, SessionCountReconnectingPTY: 16 + i, SessionCountSSH: 18 + i, - ConnectionCount: 9 + i, ConnectionMedianLatencyMS: 12000, - ConnectionsByProto: map[string]int64{"TCP": 1}, + _, err = agent3.UpdateStats(ctx, &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + TxBytes: 3 + i, RxBytes: 6 + i, + SessionCountVscode: 12 + i, SessionCountJetbrains: 14 + i, SessionCountReconnectingPty: 16 + i, SessionCountSsh: 18 + i, + ConnectionCount: 9 + i, ConnectionMedianLatencyMs: 12000, + ConnectionsByProto: map[string]int64{"TCP": 1}, + }, }) require.NoError(t, err) } @@ -596,7 +606,7 @@ func TestExperimentsMetric(t *testing.T) { } } -func prepareWorkspaceAndAgent(t *testing.T, client *codersdk.Client, user codersdk.CreateFirstUserResponse, workspaceNum int) *agentsdk.Client { +func prepareWorkspaceAndAgent(ctx context.Context, t *testing.T, client *codersdk.Client, user codersdk.CreateFirstUserResponse, workspaceNum int) agentproto.DRPCAgentClient { authToken := uuid.NewString() version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{ @@ -611,9 +621,12 @@ func prepareWorkspaceAndAgent(t *testing.T, client *codersdk.Client, user coders }) coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) - agentClient := agentsdk.New(client.URL) - agentClient.SetSessionToken(authToken) - return agentClient + ac := agentsdk.New(client.URL) + ac.SetSessionToken(authToken) + conn, err := ac.ConnectRPC(ctx) + require.NoError(t, err) + agentAPI := agentproto.NewDRPCAgentClient(conn) + return agentAPI } var ( diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go index 3f5876d644617..413ed999aa6a6 100644 --- a/coderd/provisionerdserver/provisionerdserver.go +++ b/coderd/provisionerdserver/provisionerdserver.go @@ -559,16 +559,17 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo continue } - link, valid, err := config.RefreshToken(ctx, s.Database, link) - if err != nil { + refreshed, err := config.RefreshToken(ctx, s.Database, link) + if err != nil && !externalauth.IsInvalidTokenError(err) { return nil, failJob(fmt.Sprintf("refresh external auth link %q: %s", p.ID, err)) } - if !valid { + if err != nil { + // Invalid tokens are skipped continue } externalAuthProviders = append(externalAuthProviders, &sdkproto.ExternalAuthProvider{ Id: p.ID, - AccessToken: link.OAuthAccessToken, + AccessToken: refreshed.OAuthAccessToken, }) } @@ -597,6 +598,7 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo WorkspaceOwnerSessionToken: sessionToken, WorkspaceOwnerSshPublicKey: ownerSSHPublicKey, WorkspaceOwnerSshPrivateKey: ownerSSHPrivateKey, + WorkspaceBuildId: workspaceBuild.ID.String(), }, LogLevel: input.LogLevel, }, diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go index e0403b7c7db2d..36f2ac5f601ce 100644 --- a/coderd/provisionerdserver/provisionerdserver_test.go +++ b/coderd/provisionerdserver/provisionerdserver_test.go @@ -365,6 +365,7 @@ func TestAcquireJob(t *testing.T) { WorkspaceOwnerSessionToken: sessionToken, WorkspaceOwnerSshPublicKey: sshKey.PublicKey, WorkspaceOwnerSshPrivateKey: sshKey.PrivateKey, + WorkspaceBuildId: build.ID.String(), }, }, }) diff --git a/coderd/rbac/authz.go b/coderd/rbac/authz.go index 859782d0286b1..224e153a8b4b7 100644 --- a/coderd/rbac/authz.go +++ b/coderd/rbac/authz.go @@ -75,6 +75,17 @@ type Subject struct { cachedASTValue ast.Value } +// RegoValueOk is only used for unit testing. There is no easy way +// to get the error for the unexported method, and this is intentional. +// Failed rego values can default to the backup json marshal method, +// so errors are not fatal. Unit tests should be aware when the custom +// rego marshaller fails. +func (s Subject) RegoValueOk() error { + tmp := s + _, err := tmp.regoValue() + return err +} + // WithCachedASTValue can be called if the subject is static. This will compute // the ast value once and cache it for future calls. func (s Subject) WithCachedASTValue() Subject { @@ -110,13 +121,13 @@ func (s Subject) SafeScopeName() string { if s.Scope == nil { return "no-scope" } - return s.Scope.Name() + return s.Scope.Name().String() } // SafeRoleNames prevent nil pointer dereference. -func (s Subject) SafeRoleNames() []string { +func (s Subject) SafeRoleNames() []RoleIdentifier { if s.Roles == nil { - return []string{} + return []RoleIdentifier{} } return s.Roles.Names() } @@ -707,9 +718,15 @@ func (c *authCache) Prepare(ctx context.Context, subject Subject, action policy. // rbacTraceAttributes are the attributes that are added to all spans created by // the rbac package. These attributes should help to debug slow spans. func rbacTraceAttributes(actor Subject, action policy.Action, objectType string, extra ...attribute.KeyValue) trace.SpanStartOption { + uniqueRoleNames := actor.SafeRoleNames() + roleStrings := make([]string, 0, len(uniqueRoleNames)) + for _, roleName := range uniqueRoleNames { + roleName := roleName + roleStrings = append(roleStrings, roleName.String()) + } return trace.WithAttributes( append(extra, - attribute.StringSlice("subject_roles", actor.SafeRoleNames()), + attribute.StringSlice("subject_roles", roleStrings), attribute.Int("num_subject_roles", len(actor.SafeRoleNames())), attribute.Int("num_groups", len(actor.Groups)), attribute.String("scope", actor.SafeScopeName()), diff --git a/coderd/rbac/authz_internal_test.go b/coderd/rbac/authz_internal_test.go index 7b53939a3651b..79fe9af67a607 100644 --- a/coderd/rbac/authz_internal_test.go +++ b/coderd/rbac/authz_internal_test.go @@ -56,7 +56,7 @@ func TestFilterError(t *testing.T) { auth := NewAuthorizer(prometheus.NewRegistry()) subject := Subject{ ID: uuid.NewString(), - Roles: RoleNames{}, + Roles: RoleIdentifiers{}, Groups: []string{}, Scope: ScopeAll, } @@ -77,7 +77,7 @@ func TestFilterError(t *testing.T) { subject := Subject{ ID: uuid.NewString(), - Roles: RoleNames{ + Roles: RoleIdentifiers{ RoleOwner(), }, Groups: []string{}, @@ -159,7 +159,7 @@ func TestFilter(t *testing.T) { Name: "NoRoles", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{}, + Roles: RoleIdentifiers{}, }, ObjectType: ResourceWorkspace.Type, Action: policy.ActionRead, @@ -168,7 +168,7 @@ func TestFilter(t *testing.T) { Name: "Admin", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{RoleOrgMember(orgIDs[0]), "auditor", RoleOwner(), RoleMember()}, + Roles: RoleIdentifiers{ScopedRoleOrgMember(orgIDs[0]), RoleAuditor(), RoleOwner(), RoleMember()}, }, ObjectType: ResourceWorkspace.Type, Action: policy.ActionRead, @@ -177,7 +177,7 @@ func TestFilter(t *testing.T) { Name: "OrgAdmin", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{RoleOrgMember(orgIDs[0]), RoleOrgAdmin(orgIDs[0]), RoleMember()}, + Roles: RoleIdentifiers{ScopedRoleOrgMember(orgIDs[0]), ScopedRoleOrgAdmin(orgIDs[0]), RoleMember()}, }, ObjectType: ResourceWorkspace.Type, Action: policy.ActionRead, @@ -186,7 +186,7 @@ func TestFilter(t *testing.T) { Name: "OrgMember", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{RoleOrgMember(orgIDs[0]), RoleOrgMember(orgIDs[1]), RoleMember()}, + Roles: RoleIdentifiers{ScopedRoleOrgMember(orgIDs[0]), ScopedRoleOrgMember(orgIDs[1]), RoleMember()}, }, ObjectType: ResourceWorkspace.Type, Action: policy.ActionRead, @@ -195,12 +195,12 @@ func TestFilter(t *testing.T) { Name: "ManyRoles", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{ - RoleOrgMember(orgIDs[0]), RoleOrgAdmin(orgIDs[0]), - RoleOrgMember(orgIDs[1]), RoleOrgAdmin(orgIDs[1]), - RoleOrgMember(orgIDs[2]), RoleOrgAdmin(orgIDs[2]), - RoleOrgMember(orgIDs[4]), - RoleOrgMember(orgIDs[5]), + Roles: RoleIdentifiers{ + ScopedRoleOrgMember(orgIDs[0]), ScopedRoleOrgAdmin(orgIDs[0]), + ScopedRoleOrgMember(orgIDs[1]), ScopedRoleOrgAdmin(orgIDs[1]), + ScopedRoleOrgMember(orgIDs[2]), ScopedRoleOrgAdmin(orgIDs[2]), + ScopedRoleOrgMember(orgIDs[4]), + ScopedRoleOrgMember(orgIDs[5]), RoleMember(), }, }, @@ -211,7 +211,7 @@ func TestFilter(t *testing.T) { Name: "SiteMember", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{RoleMember()}, + Roles: RoleIdentifiers{RoleMember()}, }, ObjectType: ResourceUser.Type, Action: policy.ActionRead, @@ -220,11 +220,11 @@ func TestFilter(t *testing.T) { Name: "ReadOrgs", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{ - RoleOrgMember(orgIDs[0]), - RoleOrgMember(orgIDs[1]), - RoleOrgMember(orgIDs[2]), - RoleOrgMember(orgIDs[3]), + Roles: RoleIdentifiers{ + ScopedRoleOrgMember(orgIDs[0]), + ScopedRoleOrgMember(orgIDs[1]), + ScopedRoleOrgMember(orgIDs[2]), + ScopedRoleOrgMember(orgIDs[3]), RoleMember(), }, }, @@ -235,7 +235,7 @@ func TestFilter(t *testing.T) { Name: "ScopeApplicationConnect", Actor: Subject{ ID: userIDs[0].String(), - Roles: RoleNames{RoleOrgMember(orgIDs[0]), "auditor", RoleOwner(), RoleMember()}, + Roles: RoleIdentifiers{ScopedRoleOrgMember(orgIDs[0]), RoleAuditor(), RoleOwner(), RoleMember()}, }, ObjectType: ResourceWorkspace.Type, Action: policy.ActionRead, @@ -297,7 +297,7 @@ func TestAuthorizeDomain(t *testing.T) { Groups: []string{allUsersGroup}, Roles: Roles{ must(RoleByName(RoleMember())), - must(RoleByName(RoleOrgMember(defOrg))), + must(RoleByName(ScopedRoleOrgMember(defOrg))), }, } @@ -394,7 +394,7 @@ func TestAuthorizeDomain(t *testing.T) { ID: "me", Scope: must(ExpandScope(ScopeAll)), Roles: Roles{{ - Name: "deny-all", + Identifier: RoleIdentifier{Name: "deny-all"}, // List out deny permissions explicitly Site: []Permission{ { @@ -435,7 +435,7 @@ func TestAuthorizeDomain(t *testing.T) { ID: "me", Scope: must(ExpandScope(ScopeAll)), Roles: Roles{ - must(RoleByName(RoleOrgAdmin(defOrg))), + must(RoleByName(ScopedRoleOrgAdmin(defOrg))), must(RoleByName(RoleMember())), }, } @@ -507,7 +507,7 @@ func TestAuthorizeDomain(t *testing.T) { ID: "me", Scope: must(ExpandScope(ScopeApplicationConnect)), Roles: Roles{ - must(RoleByName(RoleOrgMember(defOrg))), + must(RoleByName(ScopedRoleOrgMember(defOrg))), must(RoleByName(RoleMember())), }, } @@ -607,8 +607,8 @@ func TestAuthorizeDomain(t *testing.T) { Scope: must(ExpandScope(ScopeAll)), Roles: Roles{ { - Name: "ReadOnlyOrgAndUser", - Site: []Permission{}, + Identifier: RoleIdentifier{Name: "ReadOnlyOrgAndUser"}, + Site: []Permission{}, Org: map[string][]Permission{ defOrg.String(): {{ Negate: false, @@ -701,7 +701,7 @@ func TestAuthorizeLevels(t *testing.T) { Roles: Roles{ must(RoleByName(RoleOwner())), { - Name: "org-deny:" + defOrg.String(), + Identifier: RoleIdentifier{Name: "org-deny:", OrganizationID: defOrg}, Org: map[string][]Permission{ defOrg.String(): { { @@ -713,7 +713,7 @@ func TestAuthorizeLevels(t *testing.T) { }, }, { - Name: "user-deny-all", + Identifier: RoleIdentifier{Name: "user-deny-all"}, // List out deny permissions explicitly User: []Permission{ { @@ -761,7 +761,7 @@ func TestAuthorizeLevels(t *testing.T) { Scope: must(ExpandScope(ScopeAll)), Roles: Roles{ { - Name: "site-noise", + Identifier: RoleIdentifier{Name: "site-noise"}, Site: []Permission{ { Negate: true, @@ -770,9 +770,9 @@ func TestAuthorizeLevels(t *testing.T) { }, }, }, - must(RoleByName(RoleOrgAdmin(defOrg))), + must(RoleByName(ScopedRoleOrgAdmin(defOrg))), { - Name: "user-deny-all", + Identifier: RoleIdentifier{Name: "user-deny-all"}, // List out deny permissions explicitly User: []Permission{ { @@ -856,7 +856,7 @@ func TestAuthorizeScope(t *testing.T) { ID: "me", Roles: Roles{ must(RoleByName(RoleMember())), - must(RoleByName(RoleOrgMember(defOrg))), + must(RoleByName(ScopedRoleOrgMember(defOrg))), }, Scope: must(ExpandScope(ScopeApplicationConnect)), } @@ -892,11 +892,11 @@ func TestAuthorizeScope(t *testing.T) { ID: "me", Roles: Roles{ must(RoleByName(RoleMember())), - must(RoleByName(RoleOrgMember(defOrg))), + must(RoleByName(ScopedRoleOrgMember(defOrg))), }, Scope: Scope{ Role: Role{ - Name: "workspace_agent", + Identifier: RoleIdentifier{Name: "workspace_agent"}, DisplayName: "Workspace Agent", Site: Permissions(map[string][]policy.Action{ // Only read access for workspaces. @@ -981,11 +981,11 @@ func TestAuthorizeScope(t *testing.T) { ID: "me", Roles: Roles{ must(RoleByName(RoleMember())), - must(RoleByName(RoleOrgMember(defOrg))), + must(RoleByName(ScopedRoleOrgMember(defOrg))), }, Scope: Scope{ Role: Role{ - Name: "create_workspace", + Identifier: RoleIdentifier{Name: "create_workspace"}, DisplayName: "Create Workspace", Site: Permissions(map[string][]policy.Action{ // Only read access for workspaces. diff --git a/coderd/rbac/authz_test.go b/coderd/rbac/authz_test.go index 05940856ec583..0c46096c74e6f 100644 --- a/coderd/rbac/authz_test.go +++ b/coderd/rbac/authz_test.go @@ -41,7 +41,7 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "NoRoles", Actor: rbac.Subject{ ID: user.String(), - Roles: rbac.RoleNames{}, + Roles: rbac.RoleIdentifiers{}, Scope: rbac.ScopeAll, }, }, @@ -49,7 +49,7 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "Admin", Actor: rbac.Subject{ // Give some extra roles that an admin might have - Roles: rbac.RoleNames{rbac.RoleOrgMember(orgs[0]), "auditor", rbac.RoleOwner(), rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.ScopedRoleOrgMember(orgs[0]), rbac.RoleAuditor(), rbac.RoleOwner(), rbac.RoleMember()}, ID: user.String(), Scope: rbac.ScopeAll, Groups: noiseGroups, @@ -58,7 +58,7 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U { Name: "OrgAdmin", Actor: rbac.Subject{ - Roles: rbac.RoleNames{rbac.RoleOrgMember(orgs[0]), rbac.RoleOrgAdmin(orgs[0]), rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.ScopedRoleOrgMember(orgs[0]), rbac.ScopedRoleOrgAdmin(orgs[0]), rbac.RoleMember()}, ID: user.String(), Scope: rbac.ScopeAll, Groups: noiseGroups, @@ -68,7 +68,7 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "OrgMember", Actor: rbac.Subject{ // Member of 2 orgs - Roles: rbac.RoleNames{rbac.RoleOrgMember(orgs[0]), rbac.RoleOrgMember(orgs[1]), rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.ScopedRoleOrgMember(orgs[0]), rbac.ScopedRoleOrgMember(orgs[1]), rbac.RoleMember()}, ID: user.String(), Scope: rbac.ScopeAll, Groups: noiseGroups, @@ -78,10 +78,10 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "ManyRoles", Actor: rbac.Subject{ // Admin of many orgs - Roles: rbac.RoleNames{ - rbac.RoleOrgMember(orgs[0]), rbac.RoleOrgAdmin(orgs[0]), - rbac.RoleOrgMember(orgs[1]), rbac.RoleOrgAdmin(orgs[1]), - rbac.RoleOrgMember(orgs[2]), rbac.RoleOrgAdmin(orgs[2]), + Roles: rbac.RoleIdentifiers{ + rbac.ScopedRoleOrgMember(orgs[0]), rbac.ScopedRoleOrgAdmin(orgs[0]), + rbac.ScopedRoleOrgMember(orgs[1]), rbac.ScopedRoleOrgAdmin(orgs[1]), + rbac.ScopedRoleOrgMember(orgs[2]), rbac.ScopedRoleOrgAdmin(orgs[2]), rbac.RoleMember(), }, ID: user.String(), @@ -93,10 +93,10 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "ManyRolesCachedSubject", Actor: rbac.Subject{ // Admin of many orgs - Roles: rbac.RoleNames{ - rbac.RoleOrgMember(orgs[0]), rbac.RoleOrgAdmin(orgs[0]), - rbac.RoleOrgMember(orgs[1]), rbac.RoleOrgAdmin(orgs[1]), - rbac.RoleOrgMember(orgs[2]), rbac.RoleOrgAdmin(orgs[2]), + Roles: rbac.RoleIdentifiers{ + rbac.ScopedRoleOrgMember(orgs[0]), rbac.ScopedRoleOrgAdmin(orgs[0]), + rbac.ScopedRoleOrgMember(orgs[1]), rbac.ScopedRoleOrgAdmin(orgs[1]), + rbac.ScopedRoleOrgMember(orgs[2]), rbac.ScopedRoleOrgAdmin(orgs[2]), rbac.RoleMember(), }, ID: user.String(), @@ -108,7 +108,7 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "AdminWithScope", Actor: rbac.Subject{ // Give some extra roles that an admin might have - Roles: rbac.RoleNames{rbac.RoleOrgMember(orgs[0]), "auditor", rbac.RoleOwner(), rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.ScopedRoleOrgMember(orgs[0]), rbac.RoleAuditor(), rbac.RoleOwner(), rbac.RoleMember()}, ID: user.String(), Scope: rbac.ScopeApplicationConnect, Groups: noiseGroups, @@ -119,8 +119,8 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "StaticRoles", Actor: rbac.Subject{ // Give some extra roles that an admin might have - Roles: rbac.RoleNames{ - "auditor", rbac.RoleOwner(), rbac.RoleMember(), + Roles: rbac.RoleIdentifiers{ + rbac.RoleAuditor(), rbac.RoleOwner(), rbac.RoleMember(), rbac.RoleTemplateAdmin(), rbac.RoleUserAdmin(), }, ID: user.String(), @@ -133,8 +133,8 @@ func benchmarkUserCases() (cases []benchmarkCase, users uuid.UUID, orgs []uuid.U Name: "StaticRolesWithCache", Actor: rbac.Subject{ // Give some extra roles that an admin might have - Roles: rbac.RoleNames{ - "auditor", rbac.RoleOwner(), rbac.RoleMember(), + Roles: rbac.RoleIdentifiers{ + rbac.RoleAuditor(), rbac.RoleOwner(), rbac.RoleMember(), rbac.RoleTemplateAdmin(), rbac.RoleUserAdmin(), }, ID: user.String(), diff --git a/coderd/rbac/object_gen.go b/coderd/rbac/object_gen.go index 9ab848d795b1c..5b39b846195dd 100644 --- a/coderd/rbac/object_gen.go +++ b/coderd/rbac/object_gen.go @@ -28,6 +28,7 @@ var ( // ResourceAssignOrgRole // Valid Actions // - "ActionAssign" :: ability to assign org scoped roles + // - "ActionCreate" :: ability to create/delete/edit custom roles within an organization // - "ActionDelete" :: ability to delete org scoped roles // - "ActionRead" :: view what roles are assignable ResourceAssignOrgRole = Object{ diff --git a/coderd/rbac/policy/policy.go b/coderd/rbac/policy/policy.go index 2d3213264a514..eec8865d09317 100644 --- a/coderd/rbac/policy/policy.go +++ b/coderd/rbac/policy/policy.go @@ -218,6 +218,7 @@ var RBACPermissions = map[string]PermissionDefinition{ ActionAssign: actDef("ability to assign org scoped roles"), ActionRead: actDef("view what roles are assignable"), ActionDelete: actDef("ability to delete org scoped roles"), + ActionCreate: actDef("ability to create/delete/edit custom roles within an organization"), }, }, "oauth2_app": { diff --git a/coderd/rbac/roles.go b/coderd/rbac/roles.go index 137d2c0c1258b..4804cdce2eae1 100644 --- a/coderd/rbac/roles.go +++ b/coderd/rbac/roles.go @@ -1,6 +1,7 @@ package rbac import ( + "encoding/json" "errors" "sort" "strings" @@ -23,7 +24,8 @@ const ( // customSiteRole is a placeholder for all custom site roles. // This is used for what roles can assign other roles. // TODO: Make this more dynamic to allow other roles to grant. - customSiteRole string = "custom-site-role" + customSiteRole string = "custom-site-role" + customOrganizationRole string = "custom-organization-role" orgAdmin string = "organization-admin" orgMember string = "organization-member" @@ -34,48 +36,126 @@ func init() { ReloadBuiltinRoles(nil) } -// RoleNames is a list of user assignable role names. The role names must be +// RoleIdentifiers is a list of user assignable role names. The role names must be // in the builtInRoles map. Any non-user assignable roles will generate an // error on Expand. -type RoleNames []string +type RoleIdentifiers []RoleIdentifier -func (names RoleNames) Expand() ([]Role, error) { +func (names RoleIdentifiers) Expand() ([]Role, error) { return rolesByNames(names) } -func (names RoleNames) Names() []string { +func (names RoleIdentifiers) Names() []RoleIdentifier { return names } +// RoleIdentifier contains both the name of the role, and any organizational scope. +// Both fields are required to be globally unique and identifiable. +type RoleIdentifier struct { + Name string + // OrganizationID is uuid.Nil for unscoped roles (aka deployment wide) + OrganizationID uuid.UUID +} + +func (r RoleIdentifier) IsOrgRole() bool { + return r.OrganizationID != uuid.Nil +} + +// RoleNameFromString takes a formatted string '[:org_id]'. +func RoleNameFromString(input string) (RoleIdentifier, error) { + var role RoleIdentifier + + arr := strings.Split(input, ":") + if len(arr) > 2 { + return role, xerrors.Errorf("too many colons in role name") + } + + if len(arr) == 0 { + return role, xerrors.Errorf("empty string not a valid role") + } + + if arr[0] == "" { + return role, xerrors.Errorf("role cannot be the empty string") + } + + role.Name = arr[0] + + if len(arr) == 2 { + orgID, err := uuid.Parse(arr[1]) + if err != nil { + return role, xerrors.Errorf("%q not a valid uuid: %w", arr[1], err) + } + role.OrganizationID = orgID + } + return role, nil +} + +func (r RoleIdentifier) String() string { + if r.OrganizationID != uuid.Nil { + return r.Name + ":" + r.OrganizationID.String() + } + return r.Name +} + +func (r RoleIdentifier) UniqueName() string { + return r.String() +} + +func (r *RoleIdentifier) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +func (r *RoleIdentifier) UnmarshalJSON(data []byte) error { + var str string + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + + v, err := RoleNameFromString(str) + if err != nil { + return err + } + + *r = v + return nil +} + // The functions below ONLY need to exist for roles that are "defaulted" in some way. // Any other roles (like auditor), can be listed and let the user select/assigned. // Once we have a database implementation, the "default" roles can be defined on the // site and orgs, and these functions can be removed. -func RoleOwner() string { - return RoleName(owner, "") -} - -func CustomSiteRole() string { return RoleName(customSiteRole, "") } - -func RoleTemplateAdmin() string { - return RoleName(templateAdmin, "") +func RoleOwner() RoleIdentifier { return RoleIdentifier{Name: owner} } +func CustomSiteRole() RoleIdentifier { return RoleIdentifier{Name: customSiteRole} } +func CustomOrganizationRole(orgID uuid.UUID) RoleIdentifier { + return RoleIdentifier{Name: customOrganizationRole, OrganizationID: orgID} } +func RoleTemplateAdmin() RoleIdentifier { return RoleIdentifier{Name: templateAdmin} } +func RoleUserAdmin() RoleIdentifier { return RoleIdentifier{Name: userAdmin} } +func RoleMember() RoleIdentifier { return RoleIdentifier{Name: member} } +func RoleAuditor() RoleIdentifier { return RoleIdentifier{Name: auditor} } -func RoleUserAdmin() string { - return RoleName(userAdmin, "") +func RoleOrgAdmin() string { + return orgAdmin } -func RoleMember() string { - return RoleName(member, "") +func RoleOrgMember() string { + return orgMember } -func RoleOrgAdmin(organizationID uuid.UUID) string { - return RoleName(orgAdmin, organizationID.String()) +// ScopedRoleOrgAdmin is the org role with the organization ID +// Deprecated This was used before organization scope was included as a +// field in all user facing APIs. Usage of 'ScopedRoleOrgAdmin()' is preferred. +func ScopedRoleOrgAdmin(organizationID uuid.UUID) RoleIdentifier { + return RoleIdentifier{Name: orgAdmin, OrganizationID: organizationID} } -func RoleOrgMember(organizationID uuid.UUID) string { - return RoleName(orgMember, organizationID.String()) +// ScopedRoleOrgMember is the org role with the organization ID +// Deprecated This was used before organization scope was included as a +// field in all user facing APIs. Usage of 'ScopedRoleOrgMember()' is preferred. +func ScopedRoleOrgMember(organizationID uuid.UUID) RoleIdentifier { + return RoleIdentifier{Name: orgMember, OrganizationID: organizationID} } func allPermsExcept(excepts ...Objecter) []Permission { @@ -113,12 +193,19 @@ func allPermsExcept(excepts ...Objecter) []Permission { // // This map will be replaced by database storage defined by this ticket. // https://github.com/coder/coder/issues/1194 -var builtInRoles map[string]func(orgID string) Role +var builtInRoles map[string]func(orgID uuid.UUID) Role type RoleOptions struct { NoOwnerWorkspaceExec bool } +// ReservedRoleName exists because the database should only allow unique role +// names, but some roles are built in. So these names are reserved +func ReservedRoleName(name string) bool { + _, ok := builtInRoles[name] + return ok +} + // ReloadBuiltinRoles loads the static roles into the builtInRoles map. // This can be called again with a different config to change the behavior. // @@ -144,7 +231,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) { // on every authorize call. 'withCachedRegoValue' can be used as well to // preallocate the rego value that is used by the rego eval engine. ownerRole := Role{ - Name: owner, + Identifier: RoleOwner(), DisplayName: "Owner", Site: append( // Workspace dormancy and workspace are omitted. @@ -160,7 +247,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) { }.withCachedRegoValue() memberRole := Role{ - Name: member, + Identifier: RoleMember(), DisplayName: "Member", Site: Permissions(map[string][]policy.Action{ ResourceAssignRole.Type: {policy.ActionRead}, @@ -186,7 +273,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) { }.withCachedRegoValue() auditorRole := Role{ - Name: auditor, + Identifier: RoleAuditor(), DisplayName: "Auditor", Site: Permissions(map[string][]policy.Action{ // Should be able to read all template details, even in orgs they @@ -206,7 +293,7 @@ func ReloadBuiltinRoles(opts *RoleOptions) { }.withCachedRegoValue() templateAdminRole := Role{ - Name: templateAdmin, + Identifier: RoleTemplateAdmin(), DisplayName: "Template Admin", Site: Permissions(map[string][]policy.Action{ ResourceTemplate.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete, policy.ActionViewInsights}, @@ -227,10 +314,13 @@ func ReloadBuiltinRoles(opts *RoleOptions) { }.withCachedRegoValue() userAdminRole := Role{ - Name: userAdmin, + Identifier: RoleUserAdmin(), DisplayName: "User Admin", Site: Permissions(map[string][]policy.Action{ ResourceAssignRole.Type: {policy.ActionAssign, policy.ActionDelete, policy.ActionRead}, + // Need organization assign as well to create users. At present, creating a user + // will always assign them to some organization. + ResourceAssignOrgRole.Type: {policy.ActionAssign, policy.ActionDelete, policy.ActionRead}, ResourceUser.Type: { policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete, policy.ActionUpdatePersonal, policy.ActionReadPersonal, @@ -243,42 +333,42 @@ func ReloadBuiltinRoles(opts *RoleOptions) { User: []Permission{}, }.withCachedRegoValue() - builtInRoles = map[string]func(orgID string) Role{ + builtInRoles = map[string]func(orgID uuid.UUID) Role{ // admin grants all actions to all resources. - owner: func(_ string) Role { + owner: func(_ uuid.UUID) Role { return ownerRole }, // member grants all actions to all resources owned by the user - member: func(_ string) Role { + member: func(_ uuid.UUID) Role { return memberRole }, // auditor provides all permissions required to effectively read and understand // audit log events. // TODO: Finish the auditor as we add resources. - auditor: func(_ string) Role { + auditor: func(_ uuid.UUID) Role { return auditorRole }, - templateAdmin: func(_ string) Role { + templateAdmin: func(_ uuid.UUID) Role { return templateAdminRole }, - userAdmin: func(_ string) Role { + userAdmin: func(_ uuid.UUID) Role { return userAdminRole }, // orgAdmin returns a role with all actions allows in a given // organization scope. - orgAdmin: func(organizationID string) Role { + orgAdmin: func(organizationID uuid.UUID) Role { return Role{ - Name: RoleName(orgAdmin, organizationID), + Identifier: RoleIdentifier{Name: orgAdmin, OrganizationID: organizationID}, DisplayName: "Organization Admin", Site: []Permission{}, Org: map[string][]Permission{ // Org admins should not have workspace exec perms. - organizationID: append(allPermsExcept(ResourceWorkspace, ResourceWorkspaceDormant), Permissions(map[string][]policy.Action{ + organizationID.String(): append(allPermsExcept(ResourceWorkspace, ResourceWorkspaceDormant, ResourceAssignRole), Permissions(map[string][]policy.Action{ ResourceWorkspaceDormant.Type: {policy.ActionRead, policy.ActionDelete, policy.ActionCreate, policy.ActionUpdate, policy.ActionWorkspaceStop}, ResourceWorkspace.Type: slice.Omit(ResourceWorkspace.AvailableActions(), policy.ActionApplicationConnect, policy.ActionSSH), })...), @@ -289,13 +379,13 @@ func ReloadBuiltinRoles(opts *RoleOptions) { // orgMember has an empty set of permissions, this just implies their membership // in an organization. - orgMember: func(organizationID string) Role { + orgMember: func(organizationID uuid.UUID) Role { return Role{ - Name: RoleName(orgMember, organizationID), + Identifier: RoleIdentifier{Name: orgMember, OrganizationID: organizationID}, DisplayName: "", Site: []Permission{}, Org: map[string][]Permission{ - organizationID: { + organizationID.String(): { { // All org members can read the organization ResourceType: ResourceOrganization.Type, @@ -326,37 +416,40 @@ func ReloadBuiltinRoles(opts *RoleOptions) { // map[actor_role][assign_role] var assignRoles = map[string]map[string]bool{ "system": { - owner: true, - auditor: true, - member: true, - orgAdmin: true, - orgMember: true, - templateAdmin: true, - userAdmin: true, - customSiteRole: true, + owner: true, + auditor: true, + member: true, + orgAdmin: true, + orgMember: true, + templateAdmin: true, + userAdmin: true, + customSiteRole: true, + customOrganizationRole: true, }, owner: { - owner: true, - auditor: true, - member: true, - orgAdmin: true, - orgMember: true, - templateAdmin: true, - userAdmin: true, - customSiteRole: true, + owner: true, + auditor: true, + member: true, + orgAdmin: true, + orgMember: true, + templateAdmin: true, + userAdmin: true, + customSiteRole: true, + customOrganizationRole: true, }, userAdmin: { member: true, orgMember: true, }, orgAdmin: { - orgAdmin: true, - orgMember: true, + orgAdmin: true, + orgMember: true, + customOrganizationRole: true, }, } // ExpandableRoles is any type that can be expanded into a []Role. This is implemented -// as an interface so we can have RoleNames for user defined roles, and implement +// as an interface so we can have RoleIdentifiers for user defined roles, and implement // custom ExpandableRoles for system type users (eg autostart/autostop system role). // We want a clear divide between the two types of roles so users have no codepath // to interact or assign system roles. @@ -367,7 +460,7 @@ type ExpandableRoles interface { Expand() ([]Role, error) // Names is for logging and tracing purposes, we want to know the human // names of the expanded roles. - Names() []string + Names() []RoleIdentifier } // Permission is the format passed into the rego. @@ -410,7 +503,7 @@ func (perm Permission) Valid() error { // Users of this package should instead **only** use the role names, and // this package will expand the role names into their json payloads. type Role struct { - Name string `json:"name"` + Identifier RoleIdentifier `json:"name"` // DisplayName is used for UI purposes. If the role has no display name, // that means the UI should never display it. DisplayName string `json:"display_name"` @@ -460,10 +553,10 @@ func (roles Roles) Expand() ([]Role, error) { return roles, nil } -func (roles Roles) Names() []string { - names := make([]string, 0, len(roles)) +func (roles Roles) Names() []RoleIdentifier { + names := make([]RoleIdentifier, 0, len(roles)) for _, r := range roles { - names = append(names, r.Name) + names = append(names, r.Identifier) } return names } @@ -471,32 +564,22 @@ func (roles Roles) Names() []string { // CanAssignRole is a helper function that returns true if the user can assign // the specified role. This also can be used for removing a role. // This is a simple implementation for now. -func CanAssignRole(expandable ExpandableRoles, assignedRole string) bool { +func CanAssignRole(subjectHasRoles ExpandableRoles, assignedRole RoleIdentifier) bool { // For CanAssignRole, we only care about the names of the roles. - roles := expandable.Names() - - assigned, assignedOrg, err := RoleSplit(assignedRole) - if err != nil { - return false - } - - for _, longRole := range roles { - role, orgID, err := RoleSplit(longRole) - if err != nil { - continue - } + roles := subjectHasRoles.Names() - if orgID != "" && orgID != assignedOrg { + for _, myRole := range roles { + if myRole.OrganizationID != uuid.Nil && myRole.OrganizationID != assignedRole.OrganizationID { // Org roles only apply to the org they are assigned to. continue } - allowed, ok := assignRoles[role] + allowedAssignList, ok := assignRoles[myRole.Name] if !ok { continue } - if allowed[assigned] { + if allowedAssignList[assignedRole.Name] { return true } } @@ -509,29 +592,31 @@ func CanAssignRole(expandable ExpandableRoles, assignedRole string) bool { // This function is exported so that the Display name can be returned to the // api. We should maybe make an exported function that returns just the // human-readable content of the Role struct (name + display name). -func RoleByName(name string) (Role, error) { - roleName, orgID, err := RoleSplit(name) - if err != nil { - return Role{}, xerrors.Errorf("parse role name: %w", err) - } - - roleFunc, ok := builtInRoles[roleName] +func RoleByName(name RoleIdentifier) (Role, error) { + roleFunc, ok := builtInRoles[name.Name] if !ok { // No role found - return Role{}, xerrors.Errorf("role %q not found", roleName) + return Role{}, xerrors.Errorf("role %q not found", name.String()) } // Ensure all org roles are properly scoped a non-empty organization id. // This is just some defensive programming. - role := roleFunc(orgID) - if len(role.Org) > 0 && orgID == "" { - return Role{}, xerrors.Errorf("expect a org id for role %q", roleName) + role := roleFunc(name.OrganizationID) + if len(role.Org) > 0 && name.OrganizationID == uuid.Nil { + return Role{}, xerrors.Errorf("expect a org id for role %q", name.String()) + } + + // This can happen if a custom role shares the same name as a built-in role. + // You could make an org role called "owner", and we should not return the + // owner role itself. + if name.OrganizationID != role.Identifier.OrganizationID { + return Role{}, xerrors.Errorf("role %q not found", name.String()) } return role, nil } -func rolesByNames(roleNames []string) ([]Role, error) { +func rolesByNames(roleNames []RoleIdentifier) ([]Role, error) { roles := make([]Role, 0, len(roleNames)) for _, n := range roleNames { r, err := RoleByName(n) @@ -543,14 +628,6 @@ func rolesByNames(roleNames []string) ([]Role, error) { return roles, nil } -func IsOrgRole(roleName string) (string, bool) { - _, orgID, err := RoleSplit(roleName) - if err == nil && orgID != "" { - return orgID, true - } - return "", false -} - // OrganizationRoles lists all roles that can be applied to an organization user // in the given organization. This is the list of available roles, // and specific to an organization. @@ -560,13 +637,8 @@ func IsOrgRole(roleName string) (string, bool) { func OrganizationRoles(organizationID uuid.UUID) []Role { var roles []Role for _, roleF := range builtInRoles { - role := roleF(organizationID.String()) - _, scope, err := RoleSplit(role.Name) - if err != nil { - // This should never happen - continue - } - if scope == organizationID.String() { + role := roleF(organizationID) + if role.Identifier.OrganizationID == organizationID { roles = append(roles, role) } } @@ -581,13 +653,9 @@ func OrganizationRoles(organizationID uuid.UUID) []Role { func SiteRoles() []Role { var roles []Role for _, roleF := range builtInRoles { - role := roleF("random") - _, scope, err := RoleSplit(role.Name) - if err != nil { - // This should never happen - continue - } - if scope == "" { + // Must provide some non-nil uuid to filter out org roles. + role := roleF(uuid.New()) + if !role.Identifier.IsOrgRole() { roles = append(roles, role) } } @@ -599,8 +667,8 @@ func SiteRoles() []Role { // removing roles. This set determines the changes, so that the appropriate // RBAC checks can be applied using "ActionCreate" and "ActionDelete" for // "added" and "removed" roles respectively. -func ChangeRoleSet(from []string, to []string) (added []string, removed []string) { - has := make(map[string]struct{}) +func ChangeRoleSet(from []RoleIdentifier, to []RoleIdentifier) (added []RoleIdentifier, removed []RoleIdentifier) { + has := make(map[RoleIdentifier]struct{}) for _, exists := range from { has[exists] = struct{}{} } @@ -625,34 +693,6 @@ func ChangeRoleSet(from []string, to []string) (added []string, removed []string return added, removed } -// RoleName is a quick helper function to return -// -// role_name:scopeID -// -// If no scopeID is required, only 'role_name' is returned -func RoleName(name string, orgID string) string { - if orgID == "" { - return name - } - return name + ":" + orgID -} - -func RoleSplit(role string) (name string, orgID string, err error) { - arr := strings.Split(role, ":") - if len(arr) > 2 { - return "", "", xerrors.Errorf("too many colons in role name") - } - - if arr[0] == "" { - return "", "", xerrors.Errorf("role cannot be the empty string") - } - - if len(arr) == 2 { - return arr[0], arr[1], nil - } - return arr[0], "", nil -} - // Permissions is just a helper function to make building roles that list out resources // and actions a bit easier. func Permissions(perms map[string][]policy.Action) []Permission { diff --git a/coderd/rbac/roles_internal_test.go b/coderd/rbac/roles_internal_test.go index 07126981081d8..3f2d0d89fe455 100644 --- a/coderd/rbac/roles_internal_test.go +++ b/coderd/rbac/roles_internal_test.go @@ -20,7 +20,7 @@ import ( // A possible large improvement would be to implement the ast.Value interface directly. func BenchmarkRBACValueAllocation(b *testing.B) { actor := Subject{ - Roles: RoleNames{RoleOrgMember(uuid.New()), RoleOrgAdmin(uuid.New()), RoleMember()}, + Roles: RoleIdentifiers{ScopedRoleOrgMember(uuid.New()), ScopedRoleOrgAdmin(uuid.New()), RoleMember()}, ID: uuid.NewString(), Scope: ScopeAll, Groups: []string{uuid.NewString(), uuid.NewString(), uuid.NewString()}, @@ -73,7 +73,7 @@ func TestRegoInputValue(t *testing.T) { // Expand all roles and make sure we have a good copy. // This is because these tests modify the roles, and we don't want to // modify the original roles. - roles, err := RoleNames{RoleOrgMember(uuid.New()), RoleOrgAdmin(uuid.New()), RoleMember()}.Expand() + roles, err := RoleIdentifiers{ScopedRoleOrgMember(uuid.New()), ScopedRoleOrgAdmin(uuid.New()), RoleMember()}.Expand() require.NoError(t, err, "failed to expand roles") for i := range roles { // If all cached values are nil, then the role will not use @@ -213,25 +213,25 @@ func TestRoleByName(t *testing.T) { testCases := []struct { Role Role }{ - {Role: builtInRoles[owner]("")}, - {Role: builtInRoles[member]("")}, - {Role: builtInRoles[templateAdmin]("")}, - {Role: builtInRoles[userAdmin]("")}, - {Role: builtInRoles[auditor]("")}, - - {Role: builtInRoles[orgAdmin]("4592dac5-0945-42fd-828d-a903957d3dbb")}, - {Role: builtInRoles[orgAdmin]("24c100c5-1920-49c0-8c38-1b640ac4b38c")}, - {Role: builtInRoles[orgAdmin]("4a00f697-0040-4079-b3ce-d24470281a62")}, - - {Role: builtInRoles[orgMember]("3293c50e-fa5d-414f-a461-01112a4dfb6f")}, - {Role: builtInRoles[orgMember]("f88dd23d-bdbd-469d-b82e-36ee06c3d1e1")}, - {Role: builtInRoles[orgMember]("02cfd2a5-016c-4d8d-8290-301f5f18023d")}, + {Role: builtInRoles[owner](uuid.Nil)}, + {Role: builtInRoles[member](uuid.Nil)}, + {Role: builtInRoles[templateAdmin](uuid.Nil)}, + {Role: builtInRoles[userAdmin](uuid.Nil)}, + {Role: builtInRoles[auditor](uuid.Nil)}, + + {Role: builtInRoles[orgAdmin](uuid.New())}, + {Role: builtInRoles[orgAdmin](uuid.New())}, + {Role: builtInRoles[orgAdmin](uuid.New())}, + + {Role: builtInRoles[orgMember](uuid.New())}, + {Role: builtInRoles[orgMember](uuid.New())}, + {Role: builtInRoles[orgMember](uuid.New())}, } for _, c := range testCases { c := c - t.Run(c.Role.Name, func(t *testing.T) { - role, err := RoleByName(c.Role.Name) + t.Run(c.Role.Identifier.String(), func(t *testing.T) { + role, err := RoleByName(c.Role.Identifier) require.NoError(t, err, "role exists") equalRoles(t, c.Role, role) }) @@ -242,20 +242,17 @@ func TestRoleByName(t *testing.T) { t.Run("Errors", func(t *testing.T) { var err error - _, err = RoleByName("") + _, err = RoleByName(RoleIdentifier{}) require.Error(t, err, "empty role") - _, err = RoleByName("too:many:colons") - require.Error(t, err, "too many colons") - - _, err = RoleByName(orgMember) + _, err = RoleByName(RoleIdentifier{Name: orgMember}) require.Error(t, err, "expect orgID") }) } // SameAs compares 2 roles for equality. func equalRoles(t *testing.T, a, b Role) { - require.Equal(t, a.Name, b.Name, "role names") + require.Equal(t, a.Identifier, b.Identifier, "role names") require.Equal(t, a.DisplayName, b.DisplayName, "role display names") require.ElementsMatch(t, a.Site, b.Site, "site permissions") require.ElementsMatch(t, a.User, b.User, "user permissions") diff --git a/coderd/rbac/roles_test.go b/coderd/rbac/roles_test.go index e6680d4d628cc..c49f161760235 100644 --- a/coderd/rbac/roles_test.go +++ b/coderd/rbac/roles_test.go @@ -26,7 +26,7 @@ func TestBuiltInRoles(t *testing.T) { t.Parallel() for _, r := range rbac.SiteRoles() { r := r - t.Run(r.Name, func(t *testing.T) { + t.Run(r.Identifier.String(), func(t *testing.T) { t.Parallel() require.NoError(t, r.Valid(), "invalid role") }) @@ -34,7 +34,7 @@ func TestBuiltInRoles(t *testing.T) { for _, r := range rbac.OrganizationRoles(uuid.New()) { r := r - t.Run(r.Name, func(t *testing.T) { + t.Run(r.Identifier.String(), func(t *testing.T) { t.Parallel() require.NoError(t, r.Valid(), "invalid role") }) @@ -45,7 +45,7 @@ func TestBuiltInRoles(t *testing.T) { func TestOwnerExec(t *testing.T) { owner := rbac.Subject{ ID: uuid.NewString(), - Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleOwner()}, Scope: rbac.ScopeAll, } @@ -98,17 +98,17 @@ func TestRolePermissions(t *testing.T) { apiKeyID := uuid.New() // Subjects to user - memberMe := authSubject{Name: "member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleNames{rbac.RoleMember()}}} - orgMemberMe := authSubject{Name: "org_member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOrgMember(orgID)}}} + memberMe := authSubject{Name: "member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember()}}} + orgMemberMe := authSubject{Name: "org_member_me", Actor: rbac.Subject{ID: currentUser.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(orgID)}}} - owner := authSubject{Name: "owner", Actor: rbac.Subject{ID: adminID.String(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOwner()}}} - orgAdmin := authSubject{Name: "org_admin", Actor: rbac.Subject{ID: adminID.String(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOrgMember(orgID), rbac.RoleOrgAdmin(orgID)}}} + owner := authSubject{Name: "owner", Actor: rbac.Subject{ID: adminID.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleOwner()}}} + orgAdmin := authSubject{Name: "org_admin", Actor: rbac.Subject{ID: adminID.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(orgID), rbac.ScopedRoleOrgAdmin(orgID)}}} - otherOrgMember := authSubject{Name: "org_member_other", Actor: rbac.Subject{ID: uuid.NewString(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOrgMember(otherOrg)}}} - otherOrgAdmin := authSubject{Name: "org_admin_other", Actor: rbac.Subject{ID: uuid.NewString(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOrgMember(otherOrg), rbac.RoleOrgAdmin(otherOrg)}}} + otherOrgMember := authSubject{Name: "org_member_other", Actor: rbac.Subject{ID: uuid.NewString(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(otherOrg)}}} + otherOrgAdmin := authSubject{Name: "org_admin_other", Actor: rbac.Subject{ID: uuid.NewString(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.ScopedRoleOrgMember(otherOrg), rbac.ScopedRoleOrgAdmin(otherOrg)}}} - templateAdmin := authSubject{Name: "template-admin", Actor: rbac.Subject{ID: templateAdminID.String(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleTemplateAdmin()}}} - userAdmin := authSubject{Name: "user-admin", Actor: rbac.Subject{ID: templateAdminID.String(), Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleUserAdmin()}}} + templateAdmin := authSubject{Name: "template-admin", Actor: rbac.Subject{ID: templateAdminID.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleTemplateAdmin()}}} + userAdmin := authSubject{Name: "user-admin", Actor: rbac.Subject{ID: templateAdminID.String(), Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleUserAdmin()}}} // requiredSubjects are required to be asserted in each test case. This is // to make sure one is not forgotten. @@ -279,6 +279,15 @@ func TestRolePermissions(t *testing.T) { Name: "OrgRoleAssignment", Actions: []policy.Action{policy.ActionAssign, policy.ActionDelete}, Resource: rbac.ResourceAssignOrgRole.InOrg(orgID), + AuthorizeMap: map[bool][]authSubject{ + true: {owner, orgAdmin, userAdmin}, + false: {orgMemberMe, otherOrgAdmin, otherOrgMember, memberMe, templateAdmin}, + }, + }, + { + Name: "CreateOrgRoleAssignment", + Actions: []policy.Action{policy.ActionCreate}, + Resource: rbac.ResourceAssignOrgRole.InOrg(orgID), AuthorizeMap: map[bool][]authSubject{ true: {owner, orgAdmin}, false: {orgMemberMe, otherOrgAdmin, otherOrgMember, memberMe, templateAdmin, userAdmin}, @@ -289,8 +298,8 @@ func TestRolePermissions(t *testing.T) { Actions: []policy.Action{policy.ActionRead}, Resource: rbac.ResourceAssignOrgRole.InOrg(orgID), AuthorizeMap: map[bool][]authSubject{ - true: {owner, orgAdmin, orgMemberMe}, - false: {otherOrgAdmin, otherOrgMember, memberMe, templateAdmin, userAdmin}, + true: {owner, orgAdmin, orgMemberMe, userAdmin, userAdmin}, + false: {otherOrgAdmin, otherOrgMember, memberMe, templateAdmin}, }, }, { @@ -616,50 +625,40 @@ func TestIsOrgRole(t *testing.T) { require.NoError(t, err) testCases := []struct { - RoleName string - OrgRole bool - OrgID string + Identifier rbac.RoleIdentifier + OrgRole bool + OrgID uuid.UUID }{ // Not org roles - {RoleName: rbac.RoleOwner()}, - {RoleName: rbac.RoleMember()}, - {RoleName: "auditor"}, - + {Identifier: rbac.RoleOwner()}, + {Identifier: rbac.RoleMember()}, + {Identifier: rbac.RoleAuditor()}, { - RoleName: "a:bad:role", - OrgRole: false, - }, - { - RoleName: "", - OrgRole: false, + Identifier: rbac.RoleIdentifier{}, + OrgRole: false, }, // Org roles { - RoleName: rbac.RoleOrgAdmin(randomUUID), - OrgRole: true, - OrgID: randomUUID.String(), - }, - { - RoleName: rbac.RoleOrgMember(randomUUID), - OrgRole: true, - OrgID: randomUUID.String(), + Identifier: rbac.ScopedRoleOrgAdmin(randomUUID), + OrgRole: true, + OrgID: randomUUID, }, { - RoleName: "test:example", - OrgRole: true, - OrgID: "example", + Identifier: rbac.ScopedRoleOrgMember(randomUUID), + OrgRole: true, + OrgID: randomUUID, }, } // nolint:paralleltest for _, c := range testCases { c := c - t.Run(c.RoleName, func(t *testing.T) { + t.Run(c.Identifier.String(), func(t *testing.T) { t.Parallel() - orgID, ok := rbac.IsOrgRole(c.RoleName) + ok := c.Identifier.IsOrgRole() require.Equal(t, c.OrgRole, ok, "match expected org role") - require.Equal(t, c.OrgID, orgID, "match expected org id") + require.Equal(t, c.OrgID, c.Identifier.OrganizationID, "match expected org id") }) } } @@ -670,7 +669,7 @@ func TestListRoles(t *testing.T) { siteRoles := rbac.SiteRoles() siteRoleNames := make([]string, 0, len(siteRoles)) for _, role := range siteRoles { - siteRoleNames = append(siteRoleNames, role.Name) + siteRoleNames = append(siteRoleNames, role.Identifier.Name) } // If this test is ever failing, just update the list to the roles @@ -690,7 +689,7 @@ func TestListRoles(t *testing.T) { orgRoles := rbac.OrganizationRoles(orgID) orgRoleNames := make([]string, 0, len(orgRoles)) for _, role := range orgRoles { - orgRoleNames = append(orgRoleNames, role.Name) + orgRoleNames = append(orgRoleNames, role.Identifier.String()) } require.ElementsMatch(t, []string{ @@ -738,13 +737,22 @@ func TestChangeSet(t *testing.T) { }, } + convert := func(s []string) rbac.RoleIdentifiers { + tmp := make([]rbac.RoleIdentifier, 0, len(s)) + for _, e := range s { + tmp = append(tmp, rbac.RoleIdentifier{Name: e}) + } + return tmp + } + for _, c := range testCases { c := c t.Run(c.Name, func(t *testing.T) { t.Parallel() - add, remove := rbac.ChangeRoleSet(c.From, c.To) - require.ElementsMatch(t, c.ExpAdd, add, "expect added") - require.ElementsMatch(t, c.ExpRemove, remove, "expect removed") + + add, remove := rbac.ChangeRoleSet(convert(c.From), convert(c.To)) + require.ElementsMatch(t, convert(c.ExpAdd), add, "expect added") + require.ElementsMatch(t, convert(c.ExpRemove), remove, "expect removed") }) } } diff --git a/coderd/rbac/rolestore/rolestore.go b/coderd/rbac/rolestore/rolestore.go index e0d199241fc9f..610b04c06aa19 100644 --- a/coderd/rbac/rolestore/rolestore.go +++ b/coderd/rbac/rolestore/rolestore.go @@ -2,7 +2,6 @@ package rolestore import ( "context" - "encoding/json" "net/http" "github.com/google/uuid" @@ -40,14 +39,16 @@ func roleCache(ctx context.Context) *syncmap.Map[string, rbac.Role] { } // Expand will expand built in roles, and fetch custom roles from the database. -func Expand(ctx context.Context, db database.Store, names []string) (rbac.Roles, error) { +// If a custom role is defined, but does not exist, the role will be omitted on +// the response. This means deleted roles are silently dropped. +func Expand(ctx context.Context, db database.Store, names []rbac.RoleIdentifier) (rbac.Roles, error) { if len(names) == 0 { // That was easy return []rbac.Role{}, nil } cache := roleCache(ctx) - lookup := make([]string, 0) + lookup := make([]rbac.RoleIdentifier, 0) roles := make([]rbac.Role, 0, len(names)) for _, name := range names { @@ -59,7 +60,7 @@ func Expand(ctx context.Context, db database.Store, names []string) (rbac.Roles, } // Check custom role cache - customRole, ok := cache.Load(name) + customRole, ok := cache.Load(name.String()) if ok { roles = append(roles, customRole) continue @@ -70,11 +71,19 @@ func Expand(ctx context.Context, db database.Store, names []string) (rbac.Roles, } if len(lookup) > 0 { + lookupArgs := make([]database.NameOrganizationPair, 0, len(lookup)) + for _, name := range lookup { + lookupArgs = append(lookupArgs, database.NameOrganizationPair{ + Name: name.Name, + OrganizationID: name.OrganizationID, + }) + } + // If some roles are missing from the database, they are omitted from // the expansion. These roles are no-ops. Should we raise some kind of // warning when this happens? dbroles, err := db.CustomRoles(ctx, database.CustomRolesParams{ - LookupRoles: lookup, + LookupRoles: lookupArgs, ExcludeOrgRoles: false, OrganizationID: uuid.Nil, }) @@ -89,83 +98,46 @@ func Expand(ctx context.Context, db database.Store, names []string) (rbac.Roles, return nil, xerrors.Errorf("convert db role %q: %w", dbrole.Name, err) } roles = append(roles, converted) - cache.Store(dbrole.Name, converted) + cache.Store(dbrole.RoleIdentifier().String(), converted) } } return roles, nil } -func ConvertDBRole(dbRole database.CustomRole) (rbac.Role, error) { - name := dbRole.Name - if dbRole.OrganizationID.Valid { - name = rbac.RoleName(dbRole.Name, dbRole.OrganizationID.UUID.String()) +func convertPermissions(dbPerms []database.CustomRolePermission) []rbac.Permission { + n := make([]rbac.Permission, 0, len(dbPerms)) + for _, dbPerm := range dbPerms { + n = append(n, rbac.Permission{ + Negate: dbPerm.Negate, + ResourceType: dbPerm.ResourceType, + Action: dbPerm.Action, + }) } + return n +} + +// ConvertDBRole should not be used by any human facing apis. It is used +// for authz purposes. +func ConvertDBRole(dbRole database.CustomRole) (rbac.Role, error) { role := rbac.Role{ - Name: name, + Identifier: dbRole.RoleIdentifier(), DisplayName: dbRole.DisplayName, - Site: nil, + Site: convertPermissions(dbRole.SitePermissions), Org: nil, - User: nil, + User: convertPermissions(dbRole.UserPermissions), } - err := json.Unmarshal(dbRole.SitePermissions, &role.Site) - if err != nil { - return role, xerrors.Errorf("unmarshal site permissions: %w", err) + // Org permissions only make sense if an org id is specified. + if len(dbRole.OrgPermissions) > 0 && dbRole.OrganizationID.UUID == uuid.Nil { + return rbac.Role{}, xerrors.Errorf("role has organization perms without an org id specified") } - err = json.Unmarshal(dbRole.OrgPermissions, &role.Org) - if err != nil { - return role, xerrors.Errorf("unmarshal org permissions: %w", err) - } - - err = json.Unmarshal(dbRole.UserPermissions, &role.User) - if err != nil { - return role, xerrors.Errorf("unmarshal user permissions: %w", err) - } - - return role, nil -} - -func ConvertRoleToDB(role rbac.Role) (database.CustomRole, error) { - roleName, orgIDStr, err := rbac.RoleSplit(role.Name) - if err != nil { - return database.CustomRole{}, xerrors.Errorf("split role %q: %w", role.Name, err) - } - - dbRole := database.CustomRole{ - Name: roleName, - DisplayName: role.DisplayName, - } - - if orgIDStr != "" { - orgID, err := uuid.Parse(orgIDStr) - if err != nil { - return database.CustomRole{}, xerrors.Errorf("parse org id %q: %w", orgIDStr, err) - } - dbRole.OrganizationID = uuid.NullUUID{ - UUID: orgID, - Valid: true, + if dbRole.OrganizationID.UUID != uuid.Nil { + role.Org = map[string][]rbac.Permission{ + dbRole.OrganizationID.UUID.String(): convertPermissions(dbRole.OrgPermissions), } } - siteData, err := json.Marshal(role.Site) - if err != nil { - return dbRole, xerrors.Errorf("marshal site permissions: %w", err) - } - dbRole.SitePermissions = siteData - - orgData, err := json.Marshal(role.Org) - if err != nil { - return dbRole, xerrors.Errorf("marshal org permissions: %w", err) - } - dbRole.OrgPermissions = orgData - - userData, err := json.Marshal(role.User) - if err != nil { - return dbRole, xerrors.Errorf("marshal user permissions: %w", err) - } - dbRole.UserPermissions = userData - - return dbRole, nil + return role, nil } diff --git a/coderd/rbac/rolestore/rolestore_test.go b/coderd/rbac/rolestore/rolestore_test.go index 318f2f579b340..b7712357d0721 100644 --- a/coderd/rbac/rolestore/rolestore_test.go +++ b/coderd/rbac/rolestore/rolestore_test.go @@ -35,7 +35,7 @@ func TestExpandCustomRoleRoles(t *testing.T) { }) ctx := testutil.Context(t, testutil.WaitShort) - roles, err := rolestore.Expand(ctx, db, []string{rbac.RoleName(roleName, org.ID.String())}) + roles, err := rolestore.Expand(ctx, db, []rbac.RoleIdentifier{{Name: roleName, OrganizationID: org.ID}}) require.NoError(t, err) require.Len(t, roles, 1, "role found") } diff --git a/coderd/rbac/scopes.go b/coderd/rbac/scopes.go index 3eccd8194f31a..d6a95ccec1b35 100644 --- a/coderd/rbac/scopes.go +++ b/coderd/rbac/scopes.go @@ -58,7 +58,7 @@ var builtinScopes = map[ScopeName]Scope{ // authorize checks it is usually not used directly and skips scope checks. ScopeAll: { Role: Role{ - Name: fmt.Sprintf("Scope_%s", ScopeAll), + Identifier: RoleIdentifier{Name: fmt.Sprintf("Scope_%s", ScopeAll)}, DisplayName: "All operations", Site: Permissions(map[string][]policy.Action{ ResourceWildcard.Type: {policy.WildcardSymbol}, @@ -71,7 +71,7 @@ var builtinScopes = map[ScopeName]Scope{ ScopeApplicationConnect: { Role: Role{ - Name: fmt.Sprintf("Scope_%s", ScopeApplicationConnect), + Identifier: RoleIdentifier{Name: fmt.Sprintf("Scope_%s", ScopeApplicationConnect)}, DisplayName: "Ability to connect to applications", Site: Permissions(map[string][]policy.Action{ ResourceWorkspace.Type: {policy.ActionApplicationConnect}, @@ -87,7 +87,7 @@ type ExpandableScope interface { Expand() (Scope, error) // Name is for logging and tracing purposes, we want to know the human // name of the scope. - Name() string + Name() RoleIdentifier } type ScopeName string @@ -96,8 +96,8 @@ func (name ScopeName) Expand() (Scope, error) { return ExpandScope(name) } -func (name ScopeName) Name() string { - return string(name) +func (name ScopeName) Name() RoleIdentifier { + return RoleIdentifier{Name: string(name)} } // Scope acts the exact same as a Role with the addition that is can also @@ -114,8 +114,8 @@ func (s Scope) Expand() (Scope, error) { return s, nil } -func (s Scope) Name() string { - return s.Role.Name +func (s Scope) Name() RoleIdentifier { + return s.Role.Identifier } func ExpandScope(scope ScopeName) (Scope, error) { diff --git a/coderd/rbac/subject_test.go b/coderd/rbac/subject_test.go index 330ad7403797b..e2a2f24932c36 100644 --- a/coderd/rbac/subject_test.go +++ b/coderd/rbac/subject_test.go @@ -24,13 +24,13 @@ func TestSubjectEqual(t *testing.T) { Name: "Same", A: rbac.Subject{ ID: "id", - Roles: rbac.RoleNames{rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember()}, Groups: []string{"group"}, Scope: rbac.ScopeAll, }, B: rbac.Subject{ ID: "id", - Roles: rbac.RoleNames{rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember()}, Groups: []string{"group"}, Scope: rbac.ScopeAll, }, @@ -49,7 +49,7 @@ func TestSubjectEqual(t *testing.T) { { Name: "RolesNilVs0", A: rbac.Subject{ - Roles: rbac.RoleNames{}, + Roles: rbac.RoleIdentifiers{}, }, B: rbac.Subject{ Roles: nil, @@ -69,20 +69,20 @@ func TestSubjectEqual(t *testing.T) { { Name: "DifferentRoles", A: rbac.Subject{ - Roles: rbac.RoleNames{rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember()}, }, B: rbac.Subject{ - Roles: rbac.RoleNames{rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleOwner()}, }, Expected: false, }, { Name: "Different#Roles", A: rbac.Subject{ - Roles: rbac.RoleNames{rbac.RoleMember()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember()}, }, B: rbac.Subject{ - Roles: rbac.RoleNames{rbac.RoleMember(), rbac.RoleOwner()}, + Roles: rbac.RoleIdentifiers{rbac.RoleMember(), rbac.RoleOwner()}, }, Expected: false, }, diff --git a/coderd/roles.go b/coderd/roles.go index e8505baa4d255..8c066f5fecbb3 100644 --- a/coderd/roles.go +++ b/coderd/roles.go @@ -10,7 +10,6 @@ import ( "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/rbac/policy" - "github.com/coder/coder/v2/coderd/rbac/rolestore" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/coderd/httpapi" @@ -21,12 +20,12 @@ import ( // roles. Ideally only included in the enterprise package, but the routes are // intermixed with AGPL endpoints. type CustomRoleHandler interface { - PatchOrganizationRole(ctx context.Context, db database.Store, rw http.ResponseWriter, orgID uuid.UUID, role codersdk.Role) (codersdk.Role, bool) + PatchOrganizationRole(ctx context.Context, rw http.ResponseWriter, r *http.Request, orgID uuid.UUID, role codersdk.Role) (codersdk.Role, bool) } type agplCustomRoleHandler struct{} -func (agplCustomRoleHandler) PatchOrganizationRole(ctx context.Context, _ database.Store, rw http.ResponseWriter, _ uuid.UUID, _ codersdk.Role) (codersdk.Role, bool) { +func (agplCustomRoleHandler) PatchOrganizationRole(ctx context.Context, rw http.ResponseWriter, _ *http.Request, _ uuid.UUID, _ codersdk.Role) (codersdk.Role, bool) { httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{ Message: "Creating and updating custom roles is an Enterprise feature. Contact sales!", }) @@ -55,7 +54,7 @@ func (api *API) patchOrgRoles(rw http.ResponseWriter, r *http.Request) { return } - updated, ok := handler.PatchOrganizationRole(ctx, api.Database, rw, organization.ID, req) + updated, ok := handler.PatchOrganizationRole(ctx, rw, r, organization.ID, req) if !ok { return } @@ -91,15 +90,7 @@ func (api *API) AssignableSiteRoles(rw http.ResponseWriter, r *http.Request) { return } - customRoles := make([]rbac.Role, 0, len(dbCustomRoles)) - for _, customRole := range dbCustomRoles { - rbacRole, err := rolestore.ConvertDBRole(customRole) - if err == nil { - customRoles = append(customRoles, rbacRole) - } - } - - httpapi.Write(ctx, rw, http.StatusOK, assignableRoles(actorRoles.Roles, rbac.SiteRoles(), customRoles)) + httpapi.Write(ctx, rw, http.StatusOK, assignableRoles(actorRoles.Roles, rbac.SiteRoles(), dbCustomRoles)) } // assignableOrgRoles returns all org wide roles that can be assigned. @@ -133,37 +124,34 @@ func (api *API) assignableOrgRoles(rw http.ResponseWriter, r *http.Request) { return } - customRoles := make([]rbac.Role, 0, len(dbCustomRoles)) - for _, customRole := range dbCustomRoles { - rbacRole, err := rolestore.ConvertDBRole(customRole) - if err == nil { - customRoles = append(customRoles, rbacRole) - } - } - - httpapi.Write(ctx, rw, http.StatusOK, assignableRoles(actorRoles.Roles, roles, customRoles)) + httpapi.Write(ctx, rw, http.StatusOK, assignableRoles(actorRoles.Roles, roles, dbCustomRoles)) } -func assignableRoles(actorRoles rbac.ExpandableRoles, roles []rbac.Role, customRoles []rbac.Role) []codersdk.AssignableRoles { +func assignableRoles(actorRoles rbac.ExpandableRoles, roles []rbac.Role, customRoles []database.CustomRole) []codersdk.AssignableRoles { assignable := make([]codersdk.AssignableRoles, 0) for _, role := range roles { // The member role is implied, and not assignable. // If there is no display name, then the role is also unassigned. // This is not the ideal logic, but works for now. - if role.Name == rbac.RoleMember() || (role.DisplayName == "") { + if role.Identifier == rbac.RoleMember() || (role.DisplayName == "") { continue } assignable = append(assignable, codersdk.AssignableRoles{ - Role: db2sdk.Role(role), - Assignable: rbac.CanAssignRole(actorRoles, role.Name), + Role: db2sdk.RBACRole(role), + Assignable: rbac.CanAssignRole(actorRoles, role.Identifier), BuiltIn: true, }) } for _, role := range customRoles { + canAssign := rbac.CanAssignRole(actorRoles, rbac.CustomSiteRole()) + if role.RoleIdentifier().IsOrgRole() { + canAssign = rbac.CanAssignRole(actorRoles, rbac.CustomOrganizationRole(role.OrganizationID.UUID)) + } + assignable = append(assignable, codersdk.AssignableRoles{ Role: db2sdk.Role(role), - Assignable: rbac.CanAssignRole(actorRoles, role.Name), + Assignable: canAssign, BuiltIn: false, }) } diff --git a/coderd/roles_test.go b/coderd/roles_test.go index 6d4f4bb6fe789..de9724b4bcb4b 100644 --- a/coderd/roles_test.go +++ b/coderd/roles_test.go @@ -6,14 +6,15 @@ import ( "slices" "testing" + "github.com/google/uuid" "github.com/stretchr/testify/require" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" - "github.com/coder/coder/v2/coderd/rbac/rolestore" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) @@ -25,7 +26,7 @@ func TestListRoles(t *testing.T) { // Create owner, member, and org admin owner := coderdtest.CreateFirstUser(t, client) member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID) - orgAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleOrgAdmin(owner.OrganizationID)) + orgAdmin, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.ScopedRoleOrgAdmin(owner.OrganizationID)) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) t.Cleanup(cancel) @@ -50,11 +51,11 @@ func TestListRoles(t *testing.T) { x, err := member.ListSiteRoles(ctx) return x, err }, - ExpectedRoles: convertRoles(map[string]bool{ - "owner": false, - "auditor": false, - "template-admin": false, - "user-admin": false, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOwner}: false, + {Name: codersdk.RoleAuditor}: false, + {Name: codersdk.RoleTemplateAdmin}: false, + {Name: codersdk.RoleUserAdmin}: false, }), }, { @@ -62,8 +63,8 @@ func TestListRoles(t *testing.T) { APICall: func(ctx context.Context) ([]codersdk.AssignableRoles, error) { return member.ListOrganizationRoles(ctx, owner.OrganizationID) }, - ExpectedRoles: convertRoles(map[string]bool{ - rbac.RoleOrgAdmin(owner.OrganizationID): false, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: false, }), }, { @@ -79,11 +80,11 @@ func TestListRoles(t *testing.T) { APICall: func(ctx context.Context) ([]codersdk.AssignableRoles, error) { return orgAdmin.ListSiteRoles(ctx) }, - ExpectedRoles: convertRoles(map[string]bool{ - "owner": false, - "auditor": false, - "template-admin": false, - "user-admin": false, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOwner}: false, + {Name: codersdk.RoleAuditor}: false, + {Name: codersdk.RoleTemplateAdmin}: false, + {Name: codersdk.RoleUserAdmin}: false, }), }, { @@ -91,8 +92,8 @@ func TestListRoles(t *testing.T) { APICall: func(ctx context.Context) ([]codersdk.AssignableRoles, error) { return orgAdmin.ListOrganizationRoles(ctx, owner.OrganizationID) }, - ExpectedRoles: convertRoles(map[string]bool{ - rbac.RoleOrgAdmin(owner.OrganizationID): true, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true, }), }, { @@ -108,11 +109,11 @@ func TestListRoles(t *testing.T) { APICall: func(ctx context.Context) ([]codersdk.AssignableRoles, error) { return client.ListSiteRoles(ctx) }, - ExpectedRoles: convertRoles(map[string]bool{ - "owner": true, - "auditor": true, - "template-admin": true, - "user-admin": true, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOwner}: true, + {Name: codersdk.RoleAuditor}: true, + {Name: codersdk.RoleTemplateAdmin}: true, + {Name: codersdk.RoleUserAdmin}: true, }), }, { @@ -120,8 +121,8 @@ func TestListRoles(t *testing.T) { APICall: func(ctx context.Context) ([]codersdk.AssignableRoles, error) { return client.ListOrganizationRoles(ctx, owner.OrganizationID) }, - ExpectedRoles: convertRoles(map[string]bool{ - rbac.RoleOrgAdmin(owner.OrganizationID): true, + ExpectedRoles: convertRoles(map[rbac.RoleIdentifier]bool{ + {Name: codersdk.RoleOrganizationAdmin, OrganizationID: owner.OrganizationID}: true, }), }, } @@ -170,21 +171,23 @@ func TestListCustomRoles(t *testing.T) { owner := coderdtest.CreateFirstUser(t, client) const roleName = "random_role" - dbgen.CustomRole(t, db, must(rolestore.ConvertRoleToDB(rbac.Role{ - Name: rbac.RoleName(roleName, owner.OrganizationID.String()), + dbgen.CustomRole(t, db, database.CustomRole{ + Name: roleName, DisplayName: "Random Role", - Site: nil, - Org: map[string][]rbac.Permission{ - owner.OrganizationID.String(): { - { - Negate: false, - ResourceType: rbac.ResourceWorkspace.Type, - Action: policy.ActionRead, - }, + OrganizationID: uuid.NullUUID{ + UUID: owner.OrganizationID, + Valid: true, + }, + SitePermissions: nil, + OrgPermissions: []database.CustomRolePermission{ + { + Negate: false, + ResourceType: rbac.ResourceWorkspace.Type, + Action: policy.ActionRead, }, }, - User: nil, - }))) + UserPermissions: nil, + }) ctx := testutil.Context(t, testutil.WaitShort) roles, err := client.ListOrganizationRoles(ctx, owner.OrganizationID) @@ -197,12 +200,12 @@ func TestListCustomRoles(t *testing.T) { }) } -func convertRole(roleName string) codersdk.Role { +func convertRole(roleName rbac.RoleIdentifier) codersdk.Role { role, _ := rbac.RoleByName(roleName) - return db2sdk.Role(role) + return db2sdk.RBACRole(role) } -func convertRoles(assignableRoles map[string]bool) []codersdk.AssignableRoles { +func convertRoles(assignableRoles map[rbac.RoleIdentifier]bool) []codersdk.AssignableRoles { converted := make([]codersdk.AssignableRoles, 0, len(assignableRoles)) for roleName, assignable := range assignableRoles { role := convertRole(roleName) diff --git a/coderd/searchquery/search.go b/coderd/searchquery/search.go index cef971a731cbd..98bdded5e98d2 100644 --- a/coderd/searchquery/search.go +++ b/coderd/searchquery/search.go @@ -1,6 +1,7 @@ package searchquery import ( + "context" "database/sql" "fmt" "net/url" @@ -16,7 +17,9 @@ import ( "github.com/coder/coder/v2/codersdk" ) -func AuditLogs(query string) (database.GetAuditLogsOffsetParams, []codersdk.ValidationError) { +// AuditLogs requires the database to fetch an organization by name +// to convert to organization uuid. +func AuditLogs(ctx context.Context, db database.Store, query string) (database.GetAuditLogsOffsetParams, []codersdk.ValidationError) { // Always lowercase for all searches. query = strings.ToLower(query) values, errors := searchTerms(query, func(term string, values url.Values) error { @@ -43,6 +46,28 @@ func AuditLogs(query string) (database.GetAuditLogsOffsetParams, []codersdk.Vali if !filter.DateTo.IsZero() { filter.DateTo = filter.DateTo.Add(23*time.Hour + 59*time.Minute + 59*time.Second) } + + // Convert the "organization" parameter to an organization uuid. This can require + // a database lookup. + organizationArg := parser.String(values, "", "organization") + if organizationArg != "" { + organizationID, err := uuid.Parse(organizationArg) + if err == nil { + filter.OrganizationID = organizationID + } else { + // Organization could be a name + organization, err := db.GetOrganizationByName(ctx, organizationArg) + if err != nil { + parser.Errors = append(parser.Errors, codersdk.ValidationError{ + Field: "organization", + Detail: fmt.Sprintf("Organization %q either does not exist, or you are unauthorized to view it", organizationArg), + }) + } else { + filter.OrganizationID = organization.ID + } + } + } + parser.ErrorExcessParams(values) return filter, parser.Errors } diff --git a/coderd/searchquery/search_test.go b/coderd/searchquery/search_test.go index 45f6de2d8bf8a..cbbeed0ee998e 100644 --- a/coderd/searchquery/search_test.go +++ b/coderd/searchquery/search_test.go @@ -1,6 +1,7 @@ package searchquery_test import ( + "context" "database/sql" "fmt" "strings" @@ -11,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/coder/coder/v2/coderd/database" - "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/database/dbmem" "github.com/coder/coder/v2/coderd/searchquery" "github.com/coder/coder/v2/codersdk" ) @@ -316,7 +317,10 @@ func TestSearchAudit(t *testing.T) { c := c t.Run(c.Name, func(t *testing.T) { t.Parallel() - values, errs := searchquery.AuditLogs(c.Query) + // Do not use a real database, this is only used for an + // organization lookup. + db := dbmem.New() + values, errs := searchquery.AuditLogs(context.Background(), db, c.Query) if c.ExpectedErrorContains != "" { require.True(t, len(errs) > 0, "expect some errors") var s strings.Builder @@ -381,7 +385,7 @@ func TestSearchUsers(t *testing.T) { Expected: database.GetUsersParams{ Search: "user-name", Status: []database.UserStatus{database.UserStatusActive}, - RbacRole: []string{rbac.RoleOwner()}, + RbacRole: []string{codersdk.RoleOwner}, }, }, { @@ -390,7 +394,7 @@ func TestSearchUsers(t *testing.T) { Expected: database.GetUsersParams{ Search: "user name", Status: []database.UserStatus{database.UserStatusSuspended}, - RbacRole: []string{rbac.RoleMember()}, + RbacRole: []string{codersdk.RoleMember}, }, }, { @@ -399,7 +403,7 @@ func TestSearchUsers(t *testing.T) { Expected: database.GetUsersParams{ Search: "user-name", Status: []database.UserStatus{database.UserStatusActive}, - RbacRole: []string{rbac.RoleOwner()}, + RbacRole: []string{codersdk.RoleOwner}, }, }, { diff --git a/coderd/telemetry/telemetry.go b/coderd/telemetry/telemetry.go index 36292179da478..91251053663f5 100644 --- a/coderd/telemetry/telemetry.go +++ b/coderd/telemetry/telemetry.go @@ -41,20 +41,13 @@ type Options struct { // URL is an endpoint to direct telemetry towards! URL *url.URL - BuiltinPostgres bool - DeploymentID string - GitHubOAuth bool - OIDCAuth bool - OIDCIssuerURL string - Wildcard bool - DERPServerRelayURL string - GitAuth []GitAuth - Prometheus bool - STUN bool - SnapshotFrequency time.Duration - Tunnel bool - ParseLicenseJWT func(lic *License) error - Experiments []string + DeploymentID string + DeploymentConfig *codersdk.DeploymentValues + BuiltinPostgres bool + Tunnel bool + + SnapshotFrequency time.Duration + ParseLicenseJWT func(lic *License) error } // New constructs a reporter for telemetry data. @@ -100,6 +93,7 @@ type Reporter interface { // database. For example, if a new user is added, a snapshot can // contain just that user entry. Report(snapshot *Snapshot) + Enabled() bool Close() } @@ -116,6 +110,10 @@ type remoteReporter struct { shutdownAt *time.Time } +func (*remoteReporter) Enabled() bool { + return true +} + func (r *remoteReporter) Report(snapshot *Snapshot) { go r.reportSync(snapshot) } @@ -242,31 +240,24 @@ func (r *remoteReporter) deployment() error { } data, err := json.Marshal(&Deployment{ - ID: r.options.DeploymentID, - Architecture: sysInfo.Architecture, - BuiltinPostgres: r.options.BuiltinPostgres, - Containerized: containerized, - Wildcard: r.options.Wildcard, - DERPServerRelayURL: r.options.DERPServerRelayURL, - GitAuth: r.options.GitAuth, - Kubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", - GitHubOAuth: r.options.GitHubOAuth, - OIDCAuth: r.options.OIDCAuth, - OIDCIssuerURL: r.options.OIDCIssuerURL, - Prometheus: r.options.Prometheus, - InstallSource: installSource, - STUN: r.options.STUN, - Tunnel: r.options.Tunnel, - OSType: sysInfo.OS.Type, - OSFamily: sysInfo.OS.Family, - OSPlatform: sysInfo.OS.Platform, - OSName: sysInfo.OS.Name, - OSVersion: sysInfo.OS.Version, - CPUCores: runtime.NumCPU(), - MemoryTotal: mem.Total, - MachineID: sysInfo.UniqueID, - StartedAt: r.startedAt, - ShutdownAt: r.shutdownAt, + ID: r.options.DeploymentID, + Architecture: sysInfo.Architecture, + BuiltinPostgres: r.options.BuiltinPostgres, + Containerized: containerized, + Config: r.options.DeploymentConfig, + Kubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", + InstallSource: installSource, + Tunnel: r.options.Tunnel, + OSType: sysInfo.OS.Type, + OSFamily: sysInfo.OS.Family, + OSPlatform: sysInfo.OS.Platform, + OSName: sysInfo.OS.Name, + OSVersion: sysInfo.OS.Version, + CPUCores: runtime.NumCPU(), + MemoryTotal: mem.Total, + MachineID: sysInfo.UniqueID, + StartedAt: r.startedAt, + ShutdownAt: r.shutdownAt, }) if err != nil { return xerrors.Errorf("marshal deployment: %w", err) @@ -353,9 +344,6 @@ func (r *remoteReporter) createSnapshot() (*Snapshot, error) { users := database.ConvertUserRows(userRows) var firstUser database.User for _, dbUser := range users { - if dbUser.Status != database.UserStatusActive { - continue - } if firstUser.CreatedAt.IsZero() { firstUser = dbUser } @@ -375,6 +363,28 @@ func (r *remoteReporter) createSnapshot() (*Snapshot, error) { } return nil }) + eg.Go(func() error { + groups, err := r.options.Database.GetGroups(ctx) + if err != nil { + return xerrors.Errorf("get groups: %w", err) + } + snapshot.Groups = make([]Group, 0, len(groups)) + for _, group := range groups { + snapshot.Groups = append(snapshot.Groups, ConvertGroup(group)) + } + return nil + }) + eg.Go(func() error { + groupMembers, err := r.options.Database.GetGroupMembers(ctx) + if err != nil { + return xerrors.Errorf("get groups: %w", err) + } + snapshot.GroupMembers = make([]GroupMember, 0, len(groupMembers)) + for _, member := range groupMembers { + snapshot.GroupMembers = append(snapshot.GroupMembers, ConvertGroupMember(member)) + } + return nil + }) eg.Go(func() error { workspaceRows, err := r.options.Database.GetWorkspaces(ctx, database.GetWorkspacesParams{}) if err != nil { @@ -481,10 +491,6 @@ func (r *remoteReporter) createSnapshot() (*Snapshot, error) { } return nil }) - eg.Go(func() error { - snapshot.Experiments = ConvertExperiments(r.options.Experiments) - return nil - }) err := eg.Wait() if err != nil { @@ -655,6 +661,26 @@ func ConvertUser(dbUser database.User) User { EmailHashed: emailHashed, RBACRoles: dbUser.RBACRoles, CreatedAt: dbUser.CreatedAt, + Status: dbUser.Status, + } +} + +func ConvertGroup(group database.Group) Group { + return Group{ + ID: group.ID, + Name: group.Name, + OrganizationID: group.OrganizationID, + AvatarURL: group.AvatarURL, + QuotaAllowance: group.QuotaAllowance, + DisplayName: group.DisplayName, + Source: group.Source, + } +} + +func ConvertGroupMember(member database.GroupMember) GroupMember { + return GroupMember{ + GroupID: member.GroupID, + UserID: member.UserID, } } @@ -745,16 +771,6 @@ func ConvertExternalProvisioner(id uuid.UUID, tags map[string]string, provisione } } -func ConvertExperiments(experiments []string) []Experiment { - var out []Experiment - - for _, exp := range experiments { - out = append(out, Experiment{Name: exp}) - } - - return out -} - // Snapshot represents a point-in-time anonymized database dump. // Data is aggregated by latest on the server-side, so partial data // can be sent without issue. @@ -769,6 +785,8 @@ type Snapshot struct { TemplateVersions []TemplateVersion `json:"template_versions"` Templates []Template `json:"templates"` Users []User `json:"users"` + Groups []Group `json:"groups"` + GroupMembers []GroupMember `json:"group_members"` WorkspaceAgentStats []WorkspaceAgentStat `json:"workspace_agent_stats"` WorkspaceAgents []WorkspaceAgent `json:"workspace_agents"` WorkspaceApps []WorkspaceApp `json:"workspace_apps"` @@ -777,40 +795,28 @@ type Snapshot struct { WorkspaceResourceMetadata []WorkspaceResourceMetadata `json:"workspace_resource_metadata"` WorkspaceResources []WorkspaceResource `json:"workspace_resources"` Workspaces []Workspace `json:"workspaces"` - Experiments []Experiment `json:"experiments"` } // Deployment contains information about the host running Coder. type Deployment struct { - ID string `json:"id"` - Architecture string `json:"architecture"` - BuiltinPostgres bool `json:"builtin_postgres"` - Containerized bool `json:"containerized"` - Kubernetes bool `json:"kubernetes"` - Tunnel bool `json:"tunnel"` - Wildcard bool `json:"wildcard"` - DERPServerRelayURL string `json:"derp_server_relay_url"` - GitAuth []GitAuth `json:"git_auth"` - GitHubOAuth bool `json:"github_oauth"` - OIDCAuth bool `json:"oidc_auth"` - OIDCIssuerURL string `json:"oidc_issuer_url"` - Prometheus bool `json:"prometheus"` - InstallSource string `json:"install_source"` - STUN bool `json:"stun"` - OSType string `json:"os_type"` - OSFamily string `json:"os_family"` - OSPlatform string `json:"os_platform"` - OSName string `json:"os_name"` - OSVersion string `json:"os_version"` - CPUCores int `json:"cpu_cores"` - MemoryTotal uint64 `json:"memory_total"` - MachineID string `json:"machine_id"` - StartedAt time.Time `json:"started_at"` - ShutdownAt *time.Time `json:"shutdown_at"` -} - -type GitAuth struct { - Type string `json:"type"` + ID string `json:"id"` + Architecture string `json:"architecture"` + BuiltinPostgres bool `json:"builtin_postgres"` + Containerized bool `json:"containerized"` + Kubernetes bool `json:"kubernetes"` + Config *codersdk.DeploymentValues `json:"config"` + Tunnel bool `json:"tunnel"` + InstallSource string `json:"install_source"` + OSType string `json:"os_type"` + OSFamily string `json:"os_family"` + OSPlatform string `json:"os_platform"` + OSName string `json:"os_name"` + OSVersion string `json:"os_version"` + CPUCores int `json:"cpu_cores"` + MemoryTotal uint64 `json:"memory_total"` + MachineID string `json:"machine_id"` + StartedAt time.Time `json:"started_at"` + ShutdownAt *time.Time `json:"shutdown_at"` } type APIKey struct { @@ -832,6 +838,21 @@ type User struct { Status database.UserStatus `json:"status"` } +type Group struct { + ID uuid.UUID `json:"id"` + Name string `json:"name"` + OrganizationID uuid.UUID `json:"organization_id"` + AvatarURL string `json:"avatar_url"` + QuotaAllowance int32 `json:"quota_allowance"` + DisplayName string `json:"display_name"` + Source database.GroupSource `json:"source"` +} + +type GroupMember struct { + UserID uuid.UUID `json:"user_id"` + GroupID uuid.UUID `json:"group_id"` +} + type WorkspaceResource struct { ID uuid.UUID `json:"id"` CreatedAt time.Time `json:"created_at"` @@ -985,11 +1006,8 @@ type ExternalProvisioner struct { ShutdownAt *time.Time `json:"shutdown_at"` } -type Experiment struct { - Name string `json:"name"` -} - type noopReporter struct{} func (*noopReporter) Report(_ *Snapshot) {} +func (*noopReporter) Enabled() bool { return false } func (*noopReporter) Close() {} diff --git a/coderd/telemetry/telemetry_test.go b/coderd/telemetry/telemetry_test.go index 4661a4f8f21bf..2eff919ddc63d 100644 --- a/coderd/telemetry/telemetry_test.go +++ b/coderd/telemetry/telemetry_test.go @@ -55,6 +55,8 @@ func TestTelemetry(t *testing.T) { SharingLevel: database.AppSharingLevelOwner, Health: database.WorkspaceAppHealthDisabled, }) + _ = dbgen.Group(t, db, database.Group{}) + _ = dbgen.GroupMember(t, db, database.GroupMember{}) wsagent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{}) // Update the workspace agent to have a valid subsystem. err = db.UpdateWorkspaceAgentStartupByID(ctx, database.UpdateWorkspaceAgentStartupByIDParams{ @@ -91,6 +93,8 @@ func TestTelemetry(t *testing.T) { require.Len(t, snapshot.Templates, 1) require.Len(t, snapshot.TemplateVersions, 1) require.Len(t, snapshot.Users, 1) + require.Len(t, snapshot.Groups, 2) + require.Len(t, snapshot.GroupMembers, 1) require.Len(t, snapshot.Workspaces, 1) require.Len(t, snapshot.WorkspaceApps, 1) require.Len(t, snapshot.WorkspaceAgents, 1) @@ -114,17 +118,6 @@ func TestTelemetry(t *testing.T) { require.Len(t, snapshot.Users, 1) require.Equal(t, snapshot.Users[0].EmailHashed, "bb44bf07cf9a2db0554bba63a03d822c927deae77df101874496df5a6a3e896d@coder.com") }) - t.Run("Experiments", func(t *testing.T) { - t.Parallel() - - const expName = "my-experiment" - exps := []string{expName} - _, snapshot := collectSnapshot(t, dbmem.New(), func(opts telemetry.Options) telemetry.Options { - opts.Experiments = exps - return opts - }) - require.Equal(t, []telemetry.Experiment{{Name: expName}}, snapshot.Experiments) - }) } // nolint:paralleltest diff --git a/coderd/templates.go b/coderd/templates.go index b4c546814737e..3027321fdbba2 100644 --- a/coderd/templates.go +++ b/coderd/templates.go @@ -435,55 +435,78 @@ func (api *API) postTemplateByOrganization(rw http.ResponseWriter, r *http.Reque // @Param organization path string true "Organization ID" format(uuid) // @Success 200 {array} codersdk.Template // @Router /organizations/{organization}/templates [get] -func (api *API) templatesByOrganization(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - organization := httpmw.OrganizationParam(r) +func (api *API) templatesByOrganization() http.HandlerFunc { + // TODO: Should deprecate this endpoint and make it akin to /workspaces with + // a filter. There isn't a need to make the organization filter argument + // part of the query url. + // mutate the filter to only include templates from the given organization. + return api.fetchTemplates(func(r *http.Request, arg *database.GetTemplatesWithFilterParams) { + organization := httpmw.OrganizationParam(r) + arg.OrganizationID = organization.ID + }) +} - p := httpapi.NewQueryParamParser() - values := r.URL.Query() +// @Summary Get all templates +// @ID get-all-templates +// @Security CoderSessionToken +// @Produce json +// @Tags Templates +// @Success 200 {array} codersdk.Template +// @Router /templates [get] +func (api *API) fetchTemplates(mutate func(r *http.Request, arg *database.GetTemplatesWithFilterParams)) http.HandlerFunc { + return func(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + p := httpapi.NewQueryParamParser() + values := r.URL.Query() + + deprecated := sql.NullBool{} + if values.Has("deprecated") { + deprecated = sql.NullBool{ + Bool: p.Boolean(values, false, "deprecated"), + Valid: true, + } + } + if len(p.Errors) > 0 { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid query params.", + Validations: p.Errors, + }) + return + } - deprecated := sql.NullBool{} - if values.Has("deprecated") { - deprecated = sql.NullBool{ - Bool: p.Boolean(values, false, "deprecated"), - Valid: true, + prepared, err := api.HTTPAuth.AuthorizeSQLFilter(r, policy.ActionRead, rbac.ResourceTemplate.Type) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error preparing sql filter.", + Detail: err.Error(), + }) + return } - } - if len(p.Errors) > 0 { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid query params.", - Validations: p.Errors, - }) - return - } - prepared, err := api.HTTPAuth.AuthorizeSQLFilter(r, policy.ActionRead, rbac.ResourceTemplate.Type) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error preparing sql filter.", - Detail: err.Error(), - }) - return - } + args := database.GetTemplatesWithFilterParams{ + Deprecated: deprecated, + } + if mutate != nil { + mutate(r, &args) + } - // Filter templates based on rbac permissions - templates, err := api.Database.GetAuthorizedTemplates(ctx, database.GetTemplatesWithFilterParams{ - OrganizationID: organization.ID, - Deprecated: deprecated, - }, prepared) - if errors.Is(err, sql.ErrNoRows) { - err = nil - } + // Filter templates based on rbac permissions + templates, err := api.Database.GetAuthorizedTemplates(ctx, args, prepared) + if errors.Is(err, sql.ErrNoRows) { + err = nil + } - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error fetching templates in organization.", - Detail: err.Error(), - }) - return - } + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error fetching templates in organization.", + Detail: err.Error(), + }) + return + } - httpapi.Write(ctx, rw, http.StatusOK, api.convertTemplates(templates)) + httpapi.Write(ctx, rw, http.StatusOK, api.convertTemplates(templates)) + } } // @Summary Get templates by organization and template name diff --git a/coderd/templates_test.go b/coderd/templates_test.go index 01b3462f603c3..2813f713f5ea2 100644 --- a/coderd/templates_test.go +++ b/coderd/templates_test.go @@ -37,8 +37,7 @@ func TestTemplate(t *testing.T) { version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.Template(ctx, template.ID) require.NoError(t, err) @@ -63,8 +62,7 @@ func TestPostTemplateByOrganization(t *testing.T) { }) assert.Equal(t, (3 * time.Hour).Milliseconds(), expected.ActivityBumpMillis) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) got, err := user.Template(ctx, expected.ID) require.NoError(t, err) @@ -86,8 +84,7 @@ func TestPostTemplateByOrganization(t *testing.T) { version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.CreateTemplate(ctx, user.OrganizationID, codersdk.CreateTemplateRequest{ Name: template.Name, @@ -98,15 +95,30 @@ func TestPostTemplateByOrganization(t *testing.T) { require.Equal(t, http.StatusConflict, apiErr.StatusCode()) }) - t.Run("DefaultTTLTooLow", func(t *testing.T) { + t.Run("ReservedName", func(t *testing.T) { t.Parallel() client := coderdtest.New(t, nil) user := coderdtest.CreateFirstUser(t, client) version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitShort) + _, err := client.CreateTemplate(ctx, user.OrganizationID, codersdk.CreateTemplateRequest{ + Name: "new", + VersionID: version.ID, + }) + var apiErr *codersdk.Error + require.ErrorAs(t, err, &apiErr) + require.Equal(t, http.StatusBadRequest, apiErr.StatusCode()) + }) + + t.Run("DefaultTTLTooLow", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + user := coderdtest.CreateFirstUser(t, client) + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) + + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.CreateTemplate(ctx, user.OrganizationID, codersdk.CreateTemplateRequest{ Name: "testing", VersionID: version.ID, @@ -124,9 +136,7 @@ func TestPostTemplateByOrganization(t *testing.T) { user := coderdtest.CreateFirstUser(t, client) version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() - + ctx := testutil.Context(t, testutil.WaitLong) got, err := client.CreateTemplate(ctx, user.OrganizationID, codersdk.CreateTemplateRequest{ Name: "testing", VersionID: version.ID, @@ -143,15 +153,13 @@ func TestPostTemplateByOrganization(t *testing.T) { owner := coderdtest.CreateFirstUser(t, client) user, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID) version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil) - expected := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID, func(request *codersdk.CreateTemplateRequest) { request.DisableEveryoneGroupAccess = true }) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() - + ctx := testutil.Context(t, testutil.WaitLong) _, err := user.Template(ctx, expected.ID) + var apiErr *codersdk.Error require.ErrorAs(t, err, &apiErr) require.Equal(t, http.StatusNotFound, apiErr.StatusCode()) @@ -161,9 +169,7 @@ func TestPostTemplateByOrganization(t *testing.T) { t.Parallel() client := coderdtest.New(t, nil) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() - + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.CreateTemplate(ctx, uuid.New(), codersdk.CreateTemplateRequest{ Name: "test", VersionID: uuid.New(), @@ -241,8 +247,7 @@ func TestPostTemplateByOrganization(t *testing.T) { client := coderdtest.New(t, nil) user := coderdtest.CreateFirstUser(t, client) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.CreateTemplate(ctx, user.OrganizationID, codersdk.CreateTemplateRequest{ Name: "test", @@ -398,8 +403,7 @@ func TestTemplatesByOrganization(t *testing.T) { client := coderdtest.New(t, nil) user := coderdtest.CreateFirstUser(t, client) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) templates, err := client.TemplatesByOrganization(ctx, user.OrganizationID) require.NoError(t, err) @@ -414,8 +418,7 @@ func TestTemplatesByOrganization(t *testing.T) { version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) templates, err := client.TemplatesByOrganization(ctx, user.OrganizationID) require.NoError(t, err) @@ -430,12 +433,47 @@ func TestTemplatesByOrganization(t *testing.T) { coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) coderdtest.CreateTemplate(t, client, user.OrganizationID, version2.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) templates, err := client.TemplatesByOrganization(ctx, user.OrganizationID) require.NoError(t, err) require.Len(t, templates, 2) + + // Listing all should match + templates, err = client.Templates(ctx) + require.NoError(t, err) + require.Len(t, templates, 2) + }) + t.Run("MultipleOrganizations", func(t *testing.T) { + t.Parallel() + client := coderdtest.New(t, nil) + owner := coderdtest.CreateFirstUser(t, client) + org2 := coderdtest.CreateOrganization(t, client, coderdtest.CreateOrganizationOptions{}) + user, _ := coderdtest.CreateAnotherUser(t, client, org2.ID) + + // 2 templates in first organization + version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil) + version2 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil) + coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID) + coderdtest.CreateTemplate(t, client, owner.OrganizationID, version2.ID) + + // 2 in the second organization + version3 := coderdtest.CreateTemplateVersion(t, client, org2.ID, nil) + version4 := coderdtest.CreateTemplateVersion(t, client, org2.ID, nil) + coderdtest.CreateTemplate(t, client, org2.ID, version3.ID) + coderdtest.CreateTemplate(t, client, org2.ID, version4.ID) + + ctx := testutil.Context(t, testutil.WaitLong) + + // All 4 are viewable by the owner + templates, err := client.Templates(ctx) + require.NoError(t, err) + require.Len(t, templates, 4) + + // Only 2 are viewable by the org user + templates, err = user.Templates(ctx) + require.NoError(t, err) + require.Len(t, templates, 2) }) } @@ -446,8 +484,7 @@ func TestTemplateByOrganizationAndName(t *testing.T) { client := coderdtest.New(t, nil) user := coderdtest.CreateFirstUser(t, client) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.TemplateByName(ctx, user.OrganizationID, "something") var apiErr *codersdk.Error @@ -462,8 +499,7 @@ func TestTemplateByOrganizationAndName(t *testing.T) { version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.TemplateByName(ctx, user.OrganizationID, template.Name) require.NoError(t, err) @@ -497,8 +533,7 @@ func TestPatchTemplateMeta(t *testing.T) { // updatedAt is too close together. time.Sleep(time.Millisecond * 5) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) updated, err := client.UpdateTemplateMeta(ctx, template.ID, req) require.NoError(t, err) @@ -542,8 +577,7 @@ func TestPatchTemplateMeta(t *testing.T) { DeprecationMessage: ptr.Ref("APGL cannot deprecate"), } - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) updated, err := client.UpdateTemplateMeta(ctx, template.ID, req) require.NoError(t, err) @@ -566,8 +600,8 @@ func TestPatchTemplateMeta(t *testing.T) { // updatedAt is too close together. time.Sleep(time.Millisecond * 5) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) + // nolint:gocritic // Setting up unit test data err := db.UpdateTemplateAccessControlByID(dbauthz.As(ctx, coderdtest.AuthzUserSubject(tplAdmin, user.OrganizationID)), database.UpdateTemplateAccessControlByIDParams{ ID: template.ID, @@ -607,8 +641,7 @@ func TestPatchTemplateMeta(t *testing.T) { MaxPortShareLevel: &level, } - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.UpdateTemplateMeta(ctx, template.ID, req) // AGPL cannot change max port sharing level @@ -643,8 +676,7 @@ func TestPatchTemplateMeta(t *testing.T) { // We're too fast! Sleep so we can be sure that updatedAt is greater time.Sleep(time.Millisecond * 5) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.UpdateTemplateMeta(ctx, template.ID, req) require.NoError(t, err) @@ -675,8 +707,7 @@ func TestPatchTemplateMeta(t *testing.T) { DefaultTTLMillis: -1, } - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) _, err := client.UpdateTemplateMeta(ctx, template.ID, req) require.ErrorContains(t, err, "default_ttl_ms: Must be a positive integer") @@ -886,8 +917,7 @@ func TestPatchTemplateMeta(t *testing.T) { ctr.DefaultTTLMillis = ptr.Ref(24 * time.Hour.Milliseconds()) }) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) req := codersdk.UpdateTemplateMeta{ Name: template.Name, @@ -921,8 +951,7 @@ func TestPatchTemplateMeta(t *testing.T) { ctr.DefaultTTLMillis = ptr.Ref(24 * time.Hour.Milliseconds()) }) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) req := codersdk.UpdateTemplateMeta{ DefaultTTLMillis: -int64(time.Hour), @@ -956,8 +985,7 @@ func TestPatchTemplateMeta(t *testing.T) { Icon: "", } - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) updated, err := client.UpdateTemplateMeta(ctx, template.ID, req) require.NoError(t, err) @@ -1164,8 +1192,7 @@ func TestDeleteTemplate(t *testing.T) { template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) err := client.DeleteTemplate(ctx, template.ID) require.NoError(t, err) @@ -1183,8 +1210,7 @@ func TestDeleteTemplate(t *testing.T) { coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID) - ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) - defer cancel() + ctx := testutil.Context(t, testutil.WaitLong) err := client.DeleteTemplate(ctx, template.ID) var apiErr *codersdk.Error diff --git a/coderd/templateversions.go b/coderd/templateversions.go index 788a01ba353b1..1c9131ef0d17c 100644 --- a/coderd/templateversions.go +++ b/coderd/templateversions.go @@ -353,21 +353,16 @@ func (api *API) templateVersionExternalAuth(rw http.ResponseWriter, r *http.Requ return } - _, updated, err := config.RefreshToken(ctx, api.Database, authLink) - if err != nil { + _, err = config.RefreshToken(ctx, api.Database, authLink) + if err != nil && !externalauth.IsInvalidTokenError(err) { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Failed to refresh external auth token.", Detail: err.Error(), }) return } - // If the token couldn't be validated, then we assume the user isn't - // authenticated and return early. - if !updated { - providers = append(providers, provider) - continue - } - provider.Authenticated = true + + provider.Authenticated = err == nil providers = append(providers, provider) } diff --git a/coderd/userauth.go b/coderd/userauth.go index 3f341db65bcb1..c7550b89d05f7 100644 --- a/coderd/userauth.go +++ b/coderd/userauth.go @@ -231,7 +231,7 @@ func (api *API) postLogin(rw http.ResponseWriter, r *http.Request) { return } - user, roles, ok := api.loginRequest(ctx, rw, loginWithPassword) + user, actor, ok := api.loginRequest(ctx, rw, loginWithPassword) // 'user.ID' will be empty, or will be an actual value. Either is correct // here. aReq.UserID = user.ID @@ -240,15 +240,8 @@ func (api *API) postLogin(rw http.ResponseWriter, r *http.Request) { return } - userSubj := rbac.Subject{ - ID: user.ID.String(), - Roles: rbac.RoleNames(roles.Roles), - Groups: roles.Groups, - Scope: rbac.ScopeAll, - } - //nolint:gocritic // Creating the API key as the user instead of as system. - cookie, key, err := api.createAPIKey(dbauthz.As(ctx, userSubj), apikey.CreateParams{ + cookie, key, err := api.createAPIKey(dbauthz.As(ctx, actor), apikey.CreateParams{ UserID: user.ID, LoginType: database.LoginTypePassword, RemoteAddr: r.RemoteAddr, @@ -278,7 +271,7 @@ func (api *API) postLogin(rw http.ResponseWriter, r *http.Request) { // // The user struct is always returned, even if authentication failed. This is // to support knowing what user attempted to login. -func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req codersdk.LoginWithPasswordRequest) (database.User, database.GetAuthorizationUserRolesRow, bool) { +func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req codersdk.LoginWithPasswordRequest) (database.User, rbac.Subject, bool) { logger := api.Logger.Named(userAuthLoggerName) //nolint:gocritic // In order to login, we need to get the user first! @@ -290,7 +283,7 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } // If the user doesn't exist, it will be a default struct. @@ -300,7 +293,7 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } if !equal { @@ -309,7 +302,7 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co httpapi.Write(ctx, rw, http.StatusUnauthorized, codersdk.Response{ Message: "Incorrect email or password.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } // If password authentication is disabled and the user does not have the @@ -318,14 +311,14 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{ Message: "Password authentication is disabled.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } if user.LoginType != database.LoginTypePassword { httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{ Message: fmt.Sprintf("Incorrect login type, attempting to use %q but user is of login type %q", database.LoginTypePassword, user.LoginType), }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } if user.Status == database.UserStatusDormant { @@ -340,29 +333,28 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error occurred. Try again later, or contact an admin for assistance.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } } - //nolint:gocritic // System needs to fetch user roles in order to login user. - roles, err := api.Database.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), user.ID) + subject, userStatus, err := httpmw.UserRBACSubject(ctx, api.Database, user.ID, rbac.ScopeAll) if err != nil { logger.Error(ctx, "unable to fetch authorization user roles", slog.Error(err)) httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error.", }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } // If the user logged into a suspended account, reject the login request. - if roles.Status != database.UserStatusActive { + if userStatus != database.UserStatusActive { httpapi.Write(ctx, rw, http.StatusUnauthorized, codersdk.Response{ - Message: fmt.Sprintf("Your account is %s. Contact an admin to reactivate your account.", roles.Status), + Message: fmt.Sprintf("Your account is %s. Contact an admin to reactivate your account.", userStatus), }) - return user, database.GetAuthorizationUserRolesRow{}, false + return user, rbac.Subject{}, false } - return user, roles, true + return user, subject, true } // Clear the user's session cookie. @@ -607,6 +599,9 @@ func (api *API) userOAuth2Github(rw http.ResponseWriter, r *http.Request) { return } + ghName := ghUser.GetName() + normName := httpapi.NormalizeRealUsername(ghName) + // If we have a nil GitHub ID, that is a big problem. That would mean we link // this user and all other users with this bug to the same uuid. // We should instead throw an error. This should never occur in production. @@ -641,7 +636,15 @@ func (api *API) userOAuth2Github(rw http.ResponseWriter, r *http.Request) { if user.ID == uuid.Nil { aReq.Action = database.AuditActionRegister } - + // See: https://github.com/coder/coder/discussions/13340 + // In GitHub Enterprise, admins are permitted to have `_` + // in their usernames. This is janky, but much better + // than changing the username format globally. + username := ghUser.GetLogin() + if strings.Contains(username, "_") { + api.Logger.Warn(ctx, "login associates a github username that contains underscores. underscores are not permitted in usernames, replacing with `-`", slog.F("username", username)) + username = strings.ReplaceAll(username, "_", "-") + } params := (&oauthLoginParams{ User: user, Link: link, @@ -650,8 +653,9 @@ func (api *API) userOAuth2Github(rw http.ResponseWriter, r *http.Request) { LoginType: database.LoginTypeGithub, AllowSignups: api.GithubOAuth2Config.AllowSignups, Email: verifiedEmail.GetEmail(), - Username: ghUser.GetLogin(), + Username: username, AvatarURL: ghUser.GetAvatarURL(), + Name: normName, DebugContext: OauthDebugContext{}, }).SetInitAuditRequest(func(params *audit.RequestParams) (*audit.Request[database.User], func()) { return audit.InitRequest[database.User](rw, params) @@ -701,6 +705,9 @@ type OIDCConfig struct { // EmailField selects the claim field to be used as the created user's // email. EmailField string + // NameField selects the claim field to be used as the created user's + // full / given name. + NameField string // AuthURLParams are additional parameters to be passed to the OIDC provider // when requesting an access token. AuthURLParams map[string]string @@ -939,6 +946,8 @@ func (api *API) userOIDC(rw http.ResponseWriter, r *http.Request) { } userEmailDomain := emailSp[len(emailSp)-1] for _, domain := range api.OIDCConfig.EmailDomain { + // Folks sometimes enter EmailDomain with a leading '@'. + domain = strings.TrimPrefix(domain, "@") if strings.EqualFold(userEmailDomain, domain) { ok = true break @@ -952,13 +961,22 @@ func (api *API) userOIDC(rw http.ResponseWriter, r *http.Request) { } } + // The 'name' is an optional property in Coder. If not specified, + // it will be left blank. + var name string + nameRaw, ok := mergedClaims[api.OIDCConfig.NameField] + if ok { + name, _ = nameRaw.(string) + name = httpapi.NormalizeRealUsername(name) + } + var picture string pictureRaw, ok := mergedClaims["picture"] if ok { picture, _ = pictureRaw.(string) } - ctx = slog.With(ctx, slog.F("email", email), slog.F("username", username)) + ctx = slog.With(ctx, slog.F("email", email), slog.F("username", username), slog.F("name", name)) usingGroups, groups, groupErr := api.oidcGroups(ctx, mergedClaims) if groupErr != nil { groupErr.Write(rw, r) @@ -996,6 +1014,7 @@ func (api *API) userOIDC(rw http.ResponseWriter, r *http.Request) { AllowSignups: api.OIDCConfig.AllowSignups, Email: email, Username: username, + Name: name, AvatarURL: picture, UsingRoles: api.OIDCConfig.RoleSyncEnabled(), Roles: roles, @@ -1222,6 +1241,7 @@ type oauthLoginParams struct { AllowSignups bool Email string Username string + Name string AvatarURL string // Is UsingGroups is true, then the user will be assigned // to the Groups provided. @@ -1486,15 +1506,18 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C } //nolint:gocritic // No user present in the context. - memberships, err := tx.GetOrganizationMembershipsByUserID(dbauthz.AsSystemRestricted(ctx), user.ID) + memberships, err := tx.OrganizationMembers(dbauthz.AsSystemRestricted(ctx), database.OrganizationMembersParams{ + UserID: user.ID, + OrganizationID: uuid.Nil, + }) if err != nil { return xerrors.Errorf("get organization memberships: %w", err) } // If the user is not in the default organization, then we can't assign groups. // A user cannot be in groups to an org they are not a member of. - if !slices.ContainsFunc(memberships, func(member database.OrganizationMember) bool { - return member.OrganizationID == defaultOrganization.ID + if !slices.ContainsFunc(memberships, func(member database.OrganizationMembersRow) bool { + return member.OrganizationMember.OrganizationID == defaultOrganization.ID }) { return xerrors.Errorf("user %s is not a member of the default organization, cannot assign to groups in the org", user.ID) } @@ -1513,7 +1536,9 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C ignored := make([]string, 0) filtered := make([]string, 0, len(params.Roles)) for _, role := range params.Roles { - if _, err := rbac.RoleByName(role); err == nil { + // TODO: This only supports mapping deployment wide roles. Organization scoped roles + // are unsupported. + if _, err := rbac.RoleByName(rbac.RoleIdentifier{Name: role}); err == nil { filtered = append(filtered, role) } else { ignored = append(ignored, role) @@ -1544,6 +1569,10 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C user.AvatarURL = params.AvatarURL needsUpdate = true } + if user.Name != params.Name { + user.Name = params.Name + needsUpdate = true + } // If the upstream email or username has changed we should mirror // that in Coder. Many enterprises use a user's email/username as diff --git a/coderd/userauth_test.go b/coderd/userauth_test.go index f1adbfe869610..bc556fe604ebe 100644 --- a/coderd/userauth_test.go +++ b/coderd/userauth_test.go @@ -16,6 +16,7 @@ import ( "github.com/google/go-github/v43/github" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/xerrors" @@ -213,6 +214,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ ID: github.Int64(100), Login: github.String("kyle"), + Name: github.String("Kylium Carbonate"), }, nil }, TeamMembership: func(ctx context.Context, client *http.Client, org, team, username string) (*github.Membership, error) { @@ -272,7 +274,9 @@ func TestUserOAuth2Github(t *testing.T) { }, AuthenticatedUser: func(ctx context.Context, client *http.Client) (*github.User, error) { return &github.User{ - ID: github.Int64(100), + ID: github.Int64(100), + Login: github.String("testuser"), + Name: github.String("The Right Honorable Sir Test McUser"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -305,7 +309,9 @@ func TestUserOAuth2Github(t *testing.T) { }, AuthenticatedUser: func(ctx context.Context, client *http.Client) (*github.User, error) { return &github.User{ - ID: github.Int64(100), + ID: github.Int64(100), + Login: github.String("testuser"), + Name: github.String("The Right Honorable Sir Test McUser"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -346,9 +352,10 @@ func TestUserOAuth2Github(t *testing.T) { }, AuthenticatedUser: func(ctx context.Context, _ *http.Client) (*github.User, error) { return &github.User{ - Login: github.String("kyle"), - ID: i64ptr(1234), AvatarURL: github.String("/hello-world"), + ID: i64ptr(1234), + Login: github.String("kyle"), + Name: github.String("Kylium Carbonate"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -372,6 +379,60 @@ func TestUserOAuth2Github(t *testing.T) { require.NoError(t, err) require.Equal(t, "kyle@coder.com", user.Email) require.Equal(t, "kyle", user.Username) + require.Equal(t, "Kylium Carbonate", user.Name) + require.Equal(t, "/hello-world", user.AvatarURL) + + require.Len(t, auditor.AuditLogs(), numLogs) + require.NotEqual(t, auditor.AuditLogs()[numLogs-1].UserID, uuid.Nil) + require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) + }) + t.Run("SignupWeirdName", func(t *testing.T) { + t.Parallel() + auditor := audit.NewMock() + client := coderdtest.New(t, &coderdtest.Options{ + Auditor: auditor, + GithubOAuth2Config: &coderd.GithubOAuth2Config{ + OAuth2Config: &testutil.OAuth2Config{}, + AllowOrganizations: []string{"coder"}, + AllowSignups: true, + ListOrganizationMemberships: func(_ context.Context, _ *http.Client) ([]*github.Membership, error) { + return []*github.Membership{{ + State: &stateActive, + Organization: &github.Organization{ + Login: github.String("coder"), + }, + }}, nil + }, + AuthenticatedUser: func(_ context.Context, _ *http.Client) (*github.User, error) { + return &github.User{ + AvatarURL: github.String("/hello-world"), + ID: i64ptr(1234), + Login: github.String("kyle"), + Name: github.String(" " + strings.Repeat("a", 129) + " "), + }, nil + }, + ListEmails: func(_ context.Context, _ *http.Client) ([]*github.UserEmail, error) { + return []*github.UserEmail{{ + Email: github.String("kyle@coder.com"), + Verified: github.Bool(true), + Primary: github.Bool(true), + }}, nil + }, + }, + }) + numLogs := len(auditor.AuditLogs()) + + resp := oauth2Callback(t, client) + numLogs++ // add an audit log for login + + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) + + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "kyle@coder.com", user.Email) + require.Equal(t, "kyle", user.Username) + require.Equal(t, strings.Repeat("a", 128), user.Name) require.Equal(t, "/hello-world", user.AvatarURL) require.Len(t, auditor.AuditLogs(), numLogs) @@ -401,8 +462,10 @@ func TestUserOAuth2Github(t *testing.T) { }, AuthenticatedUser: func(ctx context.Context, client *http.Client) (*github.User, error) { return &github.User{ - ID: github.Int64(100), - Login: github.String("kyle"), + AvatarURL: github.String("/hello-world"), + ID: github.Int64(100), + Login: github.String("kyle"), + Name: github.String("Kylium Carbonate"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -419,10 +482,19 @@ func TestUserOAuth2Github(t *testing.T) { resp := oauth2Callback(t, client) numLogs++ // add an audit log for login + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "kyle@coder.com", user.Email) + require.Equal(t, "kyle", user.Username) + require.Equal(t, "Kylium Carbonate", user.Name) + require.Equal(t, "/hello-world", user.AvatarURL) + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) }) + // nolint: dupl t.Run("SignupAllowedTeamInFirstOrganization", func(t *testing.T) { t.Parallel() auditor := audit.NewMock() @@ -456,6 +528,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ ID: github.Int64(100), Login: github.String("mathias"), + Name: github.String("Mathias Mathias"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -472,10 +545,18 @@ func TestUserOAuth2Github(t *testing.T) { resp := oauth2Callback(t, client) numLogs++ // add an audit log for login + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "mathias@coder.com", user.Email) + require.Equal(t, "mathias", user.Username) + require.Equal(t, "Mathias Mathias", user.Name) + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) }) + // nolint: dupl t.Run("SignupAllowedTeamInSecondOrganization", func(t *testing.T) { t.Parallel() auditor := audit.NewMock() @@ -509,6 +590,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ ID: github.Int64(100), Login: github.String("mathias"), + Name: github.String("Mathias Mathias"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -525,6 +607,13 @@ func TestUserOAuth2Github(t *testing.T) { resp := oauth2Callback(t, client) numLogs++ // add an audit log for login + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "mathias@coder.com", user.Email) + require.Equal(t, "mathias", user.Username) + require.Equal(t, "Mathias Mathias", user.Name) + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) @@ -548,6 +637,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ ID: github.Int64(100), Login: github.String("mathias"), + Name: github.String("Mathias Mathias"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -564,10 +654,61 @@ func TestUserOAuth2Github(t *testing.T) { resp := oauth2Callback(t, client) numLogs++ // add an audit log for login + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "mathias@coder.com", user.Email) + require.Equal(t, "mathias", user.Username) + require.Equal(t, "Mathias Mathias", user.Name) + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) }) + t.Run("SignupReplaceUnderscores", func(t *testing.T) { + t.Parallel() + auditor := audit.NewMock() + client := coderdtest.New(t, &coderdtest.Options{ + Auditor: auditor, + GithubOAuth2Config: &coderd.GithubOAuth2Config{ + AllowSignups: true, + AllowEveryone: true, + OAuth2Config: &testutil.OAuth2Config{}, + ListOrganizationMemberships: func(_ context.Context, _ *http.Client) ([]*github.Membership, error) { + return []*github.Membership{}, nil + }, + TeamMembership: func(_ context.Context, _ *http.Client, _, _, _ string) (*github.Membership, error) { + return nil, xerrors.New("no teams") + }, + AuthenticatedUser: func(_ context.Context, _ *http.Client) (*github.User, error) { + return &github.User{ + ID: github.Int64(100), + Login: github.String("mathias_coder"), + }, nil + }, + ListEmails: func(_ context.Context, _ *http.Client) ([]*github.UserEmail, error) { + return []*github.UserEmail{{ + Email: github.String("mathias@coder.com"), + Verified: github.Bool(true), + Primary: github.Bool(true), + }}, nil + }, + }, + }) + numLogs := len(auditor.AuditLogs()) + + resp := oauth2Callback(t, client) + numLogs++ // add an audit log for login + + require.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode) + require.Len(t, auditor.AuditLogs(), numLogs) + require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) + + client.SetSessionToken(authCookieValue(resp.Cookies())) + user, err := client.User(context.Background(), "me") + require.NoError(t, err) + require.Equal(t, "mathias-coder", user.Username) + }) t.Run("SignupFailedInactiveInOrg", func(t *testing.T) { t.Parallel() client := coderdtest.New(t, &coderdtest.Options{ @@ -591,6 +732,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ ID: github.Int64(100), Login: github.String("kyle"), + Name: github.String("Kylium Carbonate"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -652,6 +794,7 @@ func TestUserOAuth2Github(t *testing.T) { return &github.User{ Login: github.String("alice"), ID: github.Int64(ghID), + Name: github.String("Alice Liddell"), }, nil }, ListEmails: func(ctx context.Context, client *http.Client) ([]*github.UserEmail, error) { @@ -739,8 +882,7 @@ func TestUserOIDC(t *testing.T) { UserInfoClaims jwt.MapClaims AllowSignups bool EmailDomain []string - Username string - AvatarURL string + AssertUser func(t testing.TB, u codersdk.User) StatusCode int IgnoreEmailVerified bool IgnoreUserInfo bool @@ -752,7 +894,9 @@ func TestUserOIDC(t *testing.T) { }, AllowSignups: true, StatusCode: http.StatusOK, - Username: "kyle", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "kyle", u.Username) + }, }, { Name: "EmailNotVerified", @@ -778,9 +922,11 @@ func TestUserOIDC(t *testing.T) { "email": "kyle@kwc.io", "email_verified": false, }, - AllowSignups: true, - StatusCode: http.StatusOK, - Username: "kyle", + AllowSignups: true, + StatusCode: http.StatusOK, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, u.Username, "kyle") + }, IgnoreEmailVerified: true, }, { @@ -795,6 +941,30 @@ func TestUserOIDC(t *testing.T) { }, StatusCode: http.StatusForbidden, }, + { + Name: "EmailDomainWithLeadingAt", + IDTokenClaims: jwt.MapClaims{ + "email": "cian@coder.com", + "email_verified": true, + }, + AllowSignups: true, + EmailDomain: []string{ + "@coder.com", + }, + StatusCode: http.StatusOK, + }, + { + Name: "EmailDomainForbiddenWithLeadingAt", + IDTokenClaims: jwt.MapClaims{ + "email": "kyle@kwc.io", + "email_verified": true, + }, + AllowSignups: true, + EmailDomain: []string{ + "@coder.com", + }, + StatusCode: http.StatusForbidden, + }, { Name: "EmailDomainCaseInsensitive", IDTokenClaims: jwt.MapClaims{ @@ -802,6 +972,9 @@ func TestUserOIDC(t *testing.T) { "email_verified": true, }, AllowSignups: true, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, u.Username, "kyle") + }, EmailDomain: []string{ "kwc.io", }, @@ -839,7 +1012,9 @@ func TestUserOIDC(t *testing.T) { "email": "kyle@kwc.io", "email_verified": true, }, - Username: "kyle", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "kyle", u.Username) + }, AllowSignups: true, StatusCode: http.StatusOK, }, @@ -850,10 +1025,56 @@ func TestUserOIDC(t *testing.T) { "email_verified": true, "preferred_username": "hotdog", }, - Username: "hotdog", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "hotdog", u.Username) + }, AllowSignups: true, StatusCode: http.StatusOK, }, + { + Name: "FullNameFromClaims", + IDTokenClaims: jwt.MapClaims{ + "email": "kyle@kwc.io", + "email_verified": true, + "name": "Hot Dog", + }, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "Hot Dog", u.Name) + }, + AllowSignups: true, + StatusCode: http.StatusOK, + }, + { + Name: "InvalidFullNameFromClaims", + IDTokenClaims: jwt.MapClaims{ + "email": "kyle@kwc.io", + "email_verified": true, + // Full names must be less or equal to than 128 characters in length. + // However, we should not fail to log someone in if their name is too long. + // Just truncate it. + "name": strings.Repeat("a", 129), + }, + AllowSignups: true, + StatusCode: http.StatusOK, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, strings.Repeat("a", 128), u.Name) + }, + }, + { + Name: "FullNameWhitespace", + IDTokenClaims: jwt.MapClaims{ + "email": "kyle@kwc.io", + "email_verified": true, + // Full names must not have leading or trailing whitespace, but this is a + // daft reason to fail a login. + "name": " Bobby Whitespace ", + }, + AllowSignups: true, + StatusCode: http.StatusOK, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "Bobby Whitespace", u.Name) + }, + }, { // Services like Okta return the email as the username: // https://developer.okta.com/docs/reference/api/oidc/#base-claims-always-present @@ -861,9 +1082,12 @@ func TestUserOIDC(t *testing.T) { IDTokenClaims: jwt.MapClaims{ "email": "kyle@kwc.io", "email_verified": true, + "name": "Kylium Carbonate", "preferred_username": "kyle@kwc.io", }, - Username: "kyle", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "kyle", u.Username) + }, AllowSignups: true, StatusCode: http.StatusOK, }, @@ -873,7 +1097,10 @@ func TestUserOIDC(t *testing.T) { IDTokenClaims: jwt.MapClaims{ "preferred_username": "kyle@kwc.io", }, - Username: "kyle", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "kyle", u.Username) + assert.Empty(t, u.Name) + }, AllowSignups: true, StatusCode: http.StatusOK, }, @@ -885,9 +1112,11 @@ func TestUserOIDC(t *testing.T) { "preferred_username": "kyle", "picture": "/example.png", }, - Username: "kyle", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "/example.png", u.AvatarURL) + assert.Equal(t, "kyle", u.Username) + }, AllowSignups: true, - AvatarURL: "/example.png", StatusCode: http.StatusOK, }, { @@ -899,10 +1128,14 @@ func TestUserOIDC(t *testing.T) { UserInfoClaims: jwt.MapClaims{ "preferred_username": "potato", "picture": "/example.png", + "name": "Kylium Carbonate", + }, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "/example.png", u.AvatarURL) + assert.Equal(t, "Kylium Carbonate", u.Name) + assert.Equal(t, "potato", u.Username) }, - Username: "potato", AllowSignups: true, - AvatarURL: "/example.png", StatusCode: http.StatusOK, }, { @@ -925,7 +1158,9 @@ func TestUserOIDC(t *testing.T) { "email_verified": true, "preferred_username": "user", }, - Username: "user", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "user", u.Username) + }, AllowSignups: true, IgnoreEmailVerified: false, StatusCode: http.StatusOK, @@ -948,13 +1183,18 @@ func TestUserOIDC(t *testing.T) { IDTokenClaims: jwt.MapClaims{ "email": "user@internal.domain", "email_verified": true, + "name": "User McName", "preferred_username": "user", }, UserInfoClaims: jwt.MapClaims{ "email": "user.mcname@external.domain", + "name": "Mr. User McName", "preferred_username": "Mr. User McName", }, - Username: "user", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "user", u.Username) + assert.Equal(t, "User McName", u.Name) + }, IgnoreUserInfo: true, AllowSignups: true, StatusCode: http.StatusOK, @@ -965,7 +1205,9 @@ func TestUserOIDC(t *testing.T) { "email": "user@domain.tld", "email_verified": true, }, 65536), - Username: "user", + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "user", u.Username) + }, AllowSignups: true, StatusCode: http.StatusOK, }, @@ -976,9 +1218,11 @@ func TestUserOIDC(t *testing.T) { "email_verified": true, }, UserInfoClaims: inflateClaims(t, jwt.MapClaims{}, 65536), - Username: "user", - AllowSignups: true, - StatusCode: http.StatusOK, + AssertUser: func(t testing.TB, u codersdk.User) { + assert.Equal(t, "user", u.Username) + }, + AllowSignups: true, + StatusCode: http.StatusOK, }, } { tc := tc @@ -996,6 +1240,7 @@ func TestUserOIDC(t *testing.T) { cfg.EmailDomain = tc.EmailDomain cfg.IgnoreEmailVerified = tc.IgnoreEmailVerified cfg.IgnoreUserInfo = tc.IgnoreUserInfo + cfg.NameField = "name" }) auditor := audit.NewMock() @@ -1013,22 +1258,13 @@ func TestUserOIDC(t *testing.T) { ctx := testutil.Context(t, testutil.WaitLong) - if tc.Username != "" { - user, err := client.User(ctx, "me") - require.NoError(t, err) - require.Equal(t, tc.Username, user.Username) - - require.Len(t, auditor.AuditLogs(), numLogs) - require.NotEqual(t, auditor.AuditLogs()[numLogs-1].UserID, uuid.Nil) - require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) - } - - if tc.AvatarURL != "" { + if tc.AssertUser != nil { user, err := client.User(ctx, "me") require.NoError(t, err) - require.Equal(t, tc.AvatarURL, user.AvatarURL) + tc.AssertUser(t, user) require.Len(t, auditor.AuditLogs(), numLogs) + require.NotEqual(t, uuid.Nil, auditor.AuditLogs()[numLogs-1].UserID) require.Equal(t, database.AuditActionRegister, auditor.AuditLogs()[numLogs-1].Action) } }) diff --git a/coderd/users.go b/coderd/users.go index 8db74cadadc9b..5ef0b2f8316e8 100644 --- a/coderd/users.go +++ b/coderd/users.go @@ -187,6 +187,7 @@ func (api *API) postFirstUser(rw http.ResponseWriter, r *http.Request) { CreateUserRequest: codersdk.CreateUserRequest{ Email: createUser.Email, Username: createUser.Username, + Name: createUser.Name, Password: createUser.Password, OrganizationID: defaultOrg.ID, }, @@ -223,7 +224,7 @@ func (api *API) postFirstUser(rw http.ResponseWriter, r *http.Request) { // Add the admin role to this first user. //nolint:gocritic // needed to create first user _, err = api.Database.UpdateUserRoles(dbauthz.AsSystemRestricted(ctx), database.UpdateUserRolesParams{ - GrantedRoles: []string{rbac.RoleOwner()}, + GrantedRoles: []string{rbac.RoleOwner().String()}, ID: user.ID, }) if err != nil { @@ -805,7 +806,7 @@ func (api *API) putUserStatus(status database.UserStatus) func(rw http.ResponseW Message: "You cannot suspend yourself.", }) return - case slice.Contains(user.RBACRoles, rbac.RoleOwner()): + case slice.Contains(user.RBACRoles, rbac.RoleOwner().String()): // You may not suspend an owner httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ Message: fmt.Sprintf("You cannot suspend a user with the %q role. You must remove the role first.", rbac.RoleOwner()), @@ -1027,12 +1028,16 @@ func (api *API) userRoles(rw http.ResponseWriter, r *http.Request) { return } + // TODO: Replace this with "GetAuthorizationUserRoles" resp := codersdk.UserRoles{ Roles: user.RBACRoles, OrganizationRoles: make(map[uuid.UUID][]string), } - memberships, err := api.Database.GetOrganizationMembershipsByUserID(ctx, user.ID) + memberships, err := api.Database.OrganizationMembers(ctx, database.OrganizationMembersParams{ + UserID: user.ID, + OrganizationID: uuid.Nil, + }) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching user's organization memberships.", @@ -1042,7 +1047,7 @@ func (api *API) userRoles(rw http.ResponseWriter, r *http.Request) { } for _, mem := range memberships { - resp.OrganizationRoles[mem.OrganizationID] = mem.Roles + resp.OrganizationRoles[mem.OrganizationMember.OrganizationID] = mem.OrganizationMember.Roles } httpapi.Write(ctx, rw, http.StatusOK, resp) @@ -1220,6 +1225,7 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create ID: uuid.New(), Email: req.Email, Username: req.Username, + Name: httpapi.NormalizeRealUsername(req.Name), CreatedAt: dbtime.Now(), UpdatedAt: dbtime.Now(), HashedPassword: []byte{}, diff --git a/coderd/users_test.go b/coderd/users_test.go index 01cac4d1c8251..758a3ba738b90 100644 --- a/coderd/users_test.go +++ b/coderd/users_test.go @@ -70,8 +70,14 @@ func TestFirstUser(t *testing.T) { t.Run("Create", func(t *testing.T) { t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) client := coderdtest.New(t, nil) _ = coderdtest.CreateFirstUser(t, client) + u, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + assert.Equal(t, coderdtest.FirstUserParams.Name, u.Name) + assert.Equal(t, coderdtest.FirstUserParams.Email, u.Email) + assert.Equal(t, coderdtest.FirstUserParams.Username, u.Username) }) t.Run("Trial", func(t *testing.T) { @@ -96,6 +102,7 @@ func TestFirstUser(t *testing.T) { req := codersdk.CreateFirstUserRequest{ Email: "testuser@coder.com", Username: "testuser", + Name: "Test User", Password: "SomeSecurePassword!", Trial: true, } @@ -525,7 +532,7 @@ func TestPostUsers(t *testing.T) { require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionCreate, auditor.AuditLogs()[numLogs-1].Action) - require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-2].Action) + require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-3].Action) require.Len(t, user.OrganizationIDs, 1) assert.Equal(t, firstUser.OrganizationID, user.OrganizationIDs[0]) @@ -692,7 +699,7 @@ func TestUpdateUserProfile(t *testing.T) { require.Equal(t, http.StatusConflict, apiErr.StatusCode()) }) - t.Run("UpdateUser", func(t *testing.T) { + t.Run("UpdateSelf", func(t *testing.T) { t.Parallel() auditor := audit.NewMock() client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor}) @@ -704,15 +711,48 @@ func TestUpdateUserProfile(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) defer cancel() - _, _ = client.User(ctx, codersdk.Me) + me, err := client.User(ctx, codersdk.Me) + require.NoError(t, err) + userProfile, err := client.UpdateUserProfile(ctx, codersdk.Me, codersdk.UpdateUserProfileRequest{ - Username: "newusername", - Name: "Mr User", + Username: me.Username + "1", + Name: me.Name + "1", }) + numLogs++ // add an audit log for user update + require.NoError(t, err) - require.Equal(t, userProfile.Username, "newusername") - require.Equal(t, userProfile.Name, "Mr User") + require.Equal(t, me.Username+"1", userProfile.Username) + require.Equal(t, me.Name+"1", userProfile.Name) + + require.Len(t, auditor.AuditLogs(), numLogs) + require.Equal(t, database.AuditActionWrite, auditor.AuditLogs()[numLogs-1].Action) + }) + + t.Run("UpdateSelfAsMember", func(t *testing.T) { + t.Parallel() + auditor := audit.NewMock() + client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor}) + numLogs := len(auditor.AuditLogs()) + + firstUser := coderdtest.CreateFirstUser(t, client) + numLogs++ // add an audit log for login + + memberClient, memberUser := coderdtest.CreateAnotherUser(t, client, firstUser.OrganizationID) + numLogs++ // add an audit log for user creation + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + userProfile, err := memberClient.UpdateUserProfile(ctx, codersdk.Me, codersdk.UpdateUserProfileRequest{ + Username: memberUser.Username + "1", + Name: memberUser.Name + "1", + }) numLogs++ // add an audit log for user update + numLogs++ // add an audit log for API key creation + + require.NoError(t, err) + require.Equal(t, memberUser.Username+"1", userProfile.Username) + require.Equal(t, memberUser.Name+"1", userProfile.Name) require.Len(t, auditor.AuditLogs(), numLogs) require.Equal(t, database.AuditActionWrite, auditor.AuditLogs()[numLogs-1].Action) @@ -929,12 +969,12 @@ func TestGrantSiteRoles(t *testing.T) { admin := coderdtest.New(t, nil) first := coderdtest.CreateFirstUser(t, admin) member, _ := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID) - orgAdmin, _ := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID, rbac.RoleOrgAdmin(first.OrganizationID)) + orgAdmin, _ := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID, rbac.ScopedRoleOrgAdmin(first.OrganizationID)) randOrg, err := admin.CreateOrganization(ctx, codersdk.CreateOrganizationRequest{ Name: "random", }) require.NoError(t, err) - _, randOrgUser := coderdtest.CreateAnotherUser(t, admin, randOrg.ID, rbac.RoleOrgAdmin(randOrg.ID)) + _, randOrgUser := coderdtest.CreateAnotherUser(t, admin, randOrg.ID, rbac.ScopedRoleOrgAdmin(randOrg.ID)) userAdmin, _ := coderdtest.CreateAnotherUser(t, admin, first.OrganizationID, rbac.RoleUserAdmin()) const newUser = "newUser" @@ -953,7 +993,7 @@ func TestGrantSiteRoles(t *testing.T) { Name: "OrgRoleInSite", Client: admin, AssignToUser: codersdk.Me, - Roles: []string{rbac.RoleOrgAdmin(first.OrganizationID)}, + Roles: []string{rbac.RoleOrgAdmin()}, Error: true, StatusCode: http.StatusBadRequest, }, @@ -961,7 +1001,7 @@ func TestGrantSiteRoles(t *testing.T) { Name: "UserNotExists", Client: admin, AssignToUser: uuid.NewString(), - Roles: []string{rbac.RoleOwner()}, + Roles: []string{codersdk.RoleOwner}, Error: true, StatusCode: http.StatusBadRequest, }, @@ -987,7 +1027,7 @@ func TestGrantSiteRoles(t *testing.T) { Client: admin, OrgID: first.OrganizationID, AssignToUser: codersdk.Me, - Roles: []string{rbac.RoleOwner()}, + Roles: []string{codersdk.RoleOwner}, Error: true, StatusCode: http.StatusBadRequest, }, @@ -996,7 +1036,7 @@ func TestGrantSiteRoles(t *testing.T) { Client: orgAdmin, OrgID: randOrg.ID, AssignToUser: randOrgUser.ID.String(), - Roles: []string{rbac.RoleOrgMember(randOrg.ID)}, + Roles: []string{rbac.RoleOrgMember()}, Error: true, StatusCode: http.StatusNotFound, }, @@ -1014,9 +1054,9 @@ func TestGrantSiteRoles(t *testing.T) { Client: orgAdmin, OrgID: first.OrganizationID, AssignToUser: newUser, - Roles: []string{rbac.RoleOrgAdmin(first.OrganizationID)}, + Roles: []string{rbac.RoleOrgAdmin()}, ExpectedRoles: []string{ - rbac.RoleOrgAdmin(first.OrganizationID), + rbac.RoleOrgAdmin(), }, Error: false, }, @@ -1024,9 +1064,9 @@ func TestGrantSiteRoles(t *testing.T) { Name: "UserAdminMakeMember", Client: userAdmin, AssignToUser: newUser, - Roles: []string{rbac.RoleMember()}, + Roles: []string{codersdk.RoleMember}, ExpectedRoles: []string{ - rbac.RoleMember(), + codersdk.RoleMember, }, Error: false, }, @@ -1091,7 +1131,7 @@ func TestInitialRoles(t *testing.T) { roles, err := client.UserRoles(ctx, codersdk.Me) require.NoError(t, err) require.ElementsMatch(t, roles.Roles, []string{ - rbac.RoleOwner(), + codersdk.RoleOwner, }, "should be a member and admin") require.ElementsMatch(t, roles.OrganizationRoles[first.OrganizationID], []string{}, "should be a member") @@ -1256,12 +1296,12 @@ func TestUsersFilter(t *testing.T) { users := make([]codersdk.User, 0) users = append(users, firstUser) for i := 0; i < 15; i++ { - roles := []string{} + roles := []rbac.RoleIdentifier{} if i%2 == 0 { roles = append(roles, rbac.RoleTemplateAdmin(), rbac.RoleUserAdmin()) } if i%3 == 0 { - roles = append(roles, "auditor") + roles = append(roles, rbac.RoleAuditor()) } userClient, userData := coderdtest.CreateAnotherUser(t, client, first.OrganizationID, roles...) // Set the last seen for each user to a unique day @@ -1346,12 +1386,12 @@ func TestUsersFilter(t *testing.T) { { Name: "Admins", Filter: codersdk.UsersRequest{ - Role: rbac.RoleOwner(), + Role: codersdk.RoleOwner, Status: codersdk.UserStatusSuspended + "," + codersdk.UserStatusActive, }, FilterF: func(_ codersdk.UsersRequest, u codersdk.User) bool { for _, r := range u.Roles { - if r.Name == rbac.RoleOwner() { + if r.Name == codersdk.RoleOwner { return true } } @@ -1366,7 +1406,7 @@ func TestUsersFilter(t *testing.T) { }, FilterF: func(_ codersdk.UsersRequest, u codersdk.User) bool { for _, r := range u.Roles { - if r.Name == rbac.RoleOwner() { + if r.Name == codersdk.RoleOwner { return true } } @@ -1376,7 +1416,7 @@ func TestUsersFilter(t *testing.T) { { Name: "Members", Filter: codersdk.UsersRequest{ - Role: rbac.RoleMember(), + Role: codersdk.RoleMember, Status: codersdk.UserStatusSuspended + "," + codersdk.UserStatusActive, }, FilterF: func(_ codersdk.UsersRequest, u codersdk.User) bool { @@ -1390,7 +1430,7 @@ func TestUsersFilter(t *testing.T) { }, FilterF: func(_ codersdk.UsersRequest, u codersdk.User) bool { for _, r := range u.Roles { - if r.Name == rbac.RoleOwner() { + if r.Name == codersdk.RoleOwner { return (strings.ContainsAny(u.Username, "iI") || strings.ContainsAny(u.Email, "iI")) && u.Status == codersdk.UserStatusActive } @@ -1405,7 +1445,7 @@ func TestUsersFilter(t *testing.T) { }, FilterF: func(_ codersdk.UsersRequest, u codersdk.User) bool { for _, r := range u.Roles { - if r.Name == rbac.RoleOwner() { + if r.Name == codersdk.RoleOwner { return (strings.ContainsAny(u.Username, "iI") || strings.ContainsAny(u.Email, "iI")) && u.Status == codersdk.UserStatusActive } @@ -1453,7 +1493,7 @@ func TestUsersFilter(t *testing.T) { exp = append(exp, made) } } - require.ElementsMatch(t, exp, matched.Users, "expected workspaces returned") + require.ElementsMatch(t, exp, matched.Users, "expected users returned") }) } } diff --git a/coderd/util/slice/slice.go b/coderd/util/slice/slice.go index f06930f373557..9bb1da930ff45 100644 --- a/coderd/util/slice/slice.go +++ b/coderd/util/slice/slice.go @@ -4,6 +4,15 @@ import ( "golang.org/x/exp/constraints" ) +// ToStrings works for any type where the base type is a string. +func ToStrings[T ~string](a []T) []string { + tmp := make([]string, 0, len(a)) + for _, v := range a { + tmp = append(tmp, string(v)) + } + return tmp +} + // Omit creates a new slice with the arguments omitted from the list. func Omit[T comparable](a []T, omits ...T) []T { tmp := make([]T, 0, len(a)) diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go index 1821948572e29..e9e2ab18027d9 100644 --- a/coderd/workspaceagents.go +++ b/coderd/workspaceagents.go @@ -18,14 +18,12 @@ import ( "github.com/sqlc-dev/pqtype" "golang.org/x/exp/maps" "golang.org/x/exp/slices" - "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" "golang.org/x/xerrors" "nhooyr.io/websocket" "tailscale.com/tailcfg" "cdr.dev/slog" - agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/agentapi" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" @@ -136,144 +134,8 @@ func (api *API) workspaceAgent(rw http.ResponseWriter, r *http.Request) { httpapi.Write(ctx, rw, http.StatusOK, apiAgent) } -// @Summary Get authorized workspace agent manifest -// @ID get-authorized-workspace-agent-manifest -// @Security CoderSessionToken -// @Produce json -// @Tags Agents -// @Success 200 {object} agentsdk.Manifest -// @Router /workspaceagents/me/manifest [get] -func (api *API) workspaceAgentManifest(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - workspaceAgent := httpmw.WorkspaceAgent(r) - - // As this API becomes deprecated, use the new protobuf API and convert the - // types back to the SDK types. - manifestAPI := &agentapi.ManifestAPI{ - AccessURL: api.AccessURL, - AppHostname: api.AppHostname, - ExternalAuthConfigs: api.ExternalAuthConfigs, - DisableDirectConnections: api.DeploymentValues.DERP.Config.BlockDirect.Value(), - DerpForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(), - - AgentFn: func(_ context.Context) (database.WorkspaceAgent, error) { return workspaceAgent, nil }, - WorkspaceIDFn: func(ctx context.Context, wa *database.WorkspaceAgent) (uuid.UUID, error) { - // Sadly this results in a double query, but it's only temporary for - // now. - ws, err := api.Database.GetWorkspaceByAgentID(ctx, wa.ID) - if err != nil { - return uuid.Nil, err - } - return ws.Workspace.ID, nil - }, - Database: api.Database, - DerpMapFn: api.DERPMap, - } - manifest, err := manifestAPI.GetManifest(ctx, &agentproto.GetManifestRequest{}) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error fetching workspace agent manifest.", - Detail: err.Error(), - }) - return - } - sdkManifest, err := agentsdk.ManifestFromProto(manifest) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error converting manifest.", - Detail: err.Error(), - }) - return - } - - httpapi.Write(ctx, rw, http.StatusOK, sdkManifest) -} - const AgentAPIVersionREST = "1.0" -// @Summary Submit workspace agent startup -// @ID submit-workspace-agent-startup -// @Security CoderSessionToken -// @Accept json -// @Produce json -// @Tags Agents -// @Param request body agentsdk.PostStartupRequest true "Startup request" -// @Success 200 -// @Router /workspaceagents/me/startup [post] -// @x-apidocgen {"skip": true} -func (api *API) postWorkspaceAgentStartup(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - workspaceAgent := httpmw.WorkspaceAgent(r) - apiAgent, err := db2sdk.WorkspaceAgent( - api.DERPMap(), *api.TailnetCoordinator.Load(), workspaceAgent, nil, nil, nil, api.AgentInactiveDisconnectTimeout, - api.DeploymentValues.AgentFallbackTroubleshootingURL.String(), - ) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error reading workspace agent.", - Detail: err.Error(), - }) - return - } - - var req agentsdk.PostStartupRequest - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - api.Logger.Debug( - ctx, - "post workspace agent version", - slog.F("agent_id", apiAgent.ID), - slog.F("agent_version", req.Version), - slog.F("remote_addr", r.RemoteAddr), - ) - - if !semver.IsValid(req.Version) { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid workspace agent version provided.", - Detail: fmt.Sprintf("invalid semver version: %q", req.Version), - }) - return - } - - // Validate subsystems. - seen := make(map[codersdk.AgentSubsystem]bool) - for _, s := range req.Subsystems { - if !s.Valid() { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid workspace agent subsystem provided.", - Detail: fmt.Sprintf("invalid subsystem: %q", s), - }) - return - } - if seen[s] { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid workspace agent subsystem provided.", - Detail: fmt.Sprintf("duplicate subsystem: %q", s), - }) - return - } - seen[s] = true - } - - if err := api.Database.UpdateWorkspaceAgentStartupByID(ctx, database.UpdateWorkspaceAgentStartupByIDParams{ - ID: apiAgent.ID, - Version: req.Version, - ExpandedDirectory: req.ExpandedDirectory, - Subsystems: convertWorkspaceAgentSubsystems(req.Subsystems), - APIVersion: AgentAPIVersionREST, - }); err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Error setting agent version", - Detail: err.Error(), - }) - return - } - - httpapi.Write(ctx, rw, http.StatusOK, nil) -} - // @Summary Patch workspace agent logs // @ID patch-workspace-agent-logs // @Security CoderSessionToken @@ -938,79 +800,6 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) { } } -// @Summary Coordinate workspace agent via Tailnet -// @Description It accepts a WebSocket connection to an agent that listens to -// @Description incoming connections and publishes node updates. -// @ID coordinate-workspace-agent-via-tailnet -// @Security CoderSessionToken -// @Tags Agents -// @Success 101 -// @Router /workspaceagents/me/coordinate [get] -func (api *API) workspaceAgentCoordinate(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - api.WebsocketWaitMutex.Lock() - api.WebsocketWaitGroup.Add(1) - api.WebsocketWaitMutex.Unlock() - defer api.WebsocketWaitGroup.Done() - // The middleware only accept agents for resources on the latest build. - workspaceAgent := httpmw.WorkspaceAgent(r) - build := httpmw.LatestBuild(r) - - workspace, err := api.Database.GetWorkspaceByID(ctx, build.WorkspaceID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Internal error fetching workspace.", - Detail: err.Error(), - }) - return - } - - owner, err := api.Database.GetUserByID(ctx, workspace.OwnerID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Internal error fetching user.", - Detail: err.Error(), - }) - return - } - - conn, err := websocket.Accept(rw, r, nil) - if err != nil { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Failed to accept websocket.", - Detail: err.Error(), - }) - return - } - - ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageBinary) - defer wsNetConn.Close() - - closeCtx, closeCtxCancel := context.WithCancel(ctx) - defer closeCtxCancel() - monitor := api.startAgentWebsocketMonitor(closeCtx, workspaceAgent, build, conn) - defer monitor.close() - - api.Logger.Debug(ctx, "accepting agent", - slog.F("owner", owner.Username), - slog.F("workspace", workspace.Name), - slog.F("name", workspaceAgent.Name), - ) - api.Logger.Debug(ctx, "accepting agent details", slog.F("agent", workspaceAgent)) - - defer conn.Close(websocket.StatusNormalClosure, "") - - err = (*api.TailnetCoordinator.Load()).ServeAgent(wsNetConn, workspaceAgent.ID, - fmt.Sprintf("%s-%s-%s", owner.Username, workspace.Name, workspaceAgent.Name), - ) - if err != nil { - api.Logger.Warn(ctx, "tailnet coordinator agent error", slog.Error(err)) - _ = conn.Close(websocket.StatusInternalError, err.Error()) - return - } -} - // workspaceAgentClientCoordinate accepts a WebSocket that reads node network updates. // After accept a PubSub starts listening for new connection node updates // which are written to the WebSocket. @@ -1084,6 +873,56 @@ func (api *API) workspaceAgentClientCoordinate(rw http.ResponseWriter, r *http.R } } +// @Summary Post workspace agent log source +// @ID post-workspace-agent-log-source +// @Security CoderSessionToken +// @Accept json +// @Produce json +// @Tags Agents +// @Param request body agentsdk.PostLogSourceRequest true "Log source request" +// @Success 200 {object} codersdk.WorkspaceAgentLogSource +// @Router /workspaceagents/me/log-source [post] +func (api *API) workspaceAgentPostLogSource(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var req agentsdk.PostLogSourceRequest + if !httpapi.Read(ctx, rw, r, &req) { + return + } + + workspaceAgent := httpmw.WorkspaceAgent(r) + + sources, err := api.Database.InsertWorkspaceAgentLogSources(ctx, database.InsertWorkspaceAgentLogSourcesParams{ + WorkspaceAgentID: workspaceAgent.ID, + CreatedAt: dbtime.Now(), + ID: []uuid.UUID{req.ID}, + DisplayName: []string{req.DisplayName}, + Icon: []string{req.Icon}, + }) + if err != nil { + if database.IsUniqueViolation(err, "workspace_agent_log_sources_pkey") { + httpapi.Write(ctx, rw, http.StatusCreated, codersdk.WorkspaceAgentLogSource{ + WorkspaceAgentID: workspaceAgent.ID, + CreatedAt: dbtime.Now(), + ID: req.ID, + DisplayName: req.DisplayName, + Icon: req.Icon, + }) + return + } + httpapi.InternalServerError(rw, err) + return + } + + if len(sources) != 1 { + httpapi.InternalServerError(rw, xerrors.Errorf("database should've returned 1 row, got %d", len(sources))) + return + } + + apiSource := convertLogSources(sources)[0] + + httpapi.Write(ctx, rw, http.StatusCreated, apiSource) +} + // convertProvisionedApps converts applications that are in the middle of provisioning process. // It means that they may not have an agent or workspace assigned (dry-run job). func convertProvisionedApps(dbApps []database.WorkspaceApp) []codersdk.WorkspaceApp { @@ -1121,214 +960,6 @@ func convertScripts(dbScripts []database.WorkspaceAgentScript) []codersdk.Worksp return scripts } -// @Summary Submit workspace agent stats -// @ID submit-workspace-agent-stats -// @Security CoderSessionToken -// @Accept json -// @Produce json -// @Tags Agents -// @Param request body agentsdk.Stats true "Stats request" -// @Success 200 {object} agentsdk.StatsResponse -// @Router /workspaceagents/me/report-stats [post] -// @Deprecated Uses agent API v2 endpoint instead. -func (api *API) workspaceAgentReportStats(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - workspaceAgent := httpmw.WorkspaceAgent(r) - row, err := api.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Failed to get workspace.", - Detail: err.Error(), - }) - return - } - workspace := row.Workspace - - var req agentsdk.Stats - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - // An empty stat means it's just looking for the report interval. - if req.ConnectionsByProto == nil { - httpapi.Write(ctx, rw, http.StatusOK, agentsdk.StatsResponse{ - ReportInterval: api.AgentStatsRefreshInterval, - }) - return - } - - api.Logger.Debug(ctx, "read stats report", - slog.F("interval", api.AgentStatsRefreshInterval), - slog.F("workspace_agent_id", workspaceAgent.ID), - slog.F("workspace_id", workspace.ID), - slog.F("payload", req), - ) - - protoStats := &agentproto.Stats{ - ConnectionsByProto: req.ConnectionsByProto, - ConnectionCount: req.ConnectionCount, - ConnectionMedianLatencyMs: req.ConnectionMedianLatencyMS, - RxPackets: req.RxPackets, - RxBytes: req.RxBytes, - TxPackets: req.TxPackets, - TxBytes: req.TxBytes, - SessionCountVscode: req.SessionCountVSCode, - SessionCountJetbrains: req.SessionCountJetBrains, - SessionCountReconnectingPty: req.SessionCountReconnectingPTY, - SessionCountSsh: req.SessionCountSSH, - Metrics: make([]*agentproto.Stats_Metric, len(req.Metrics)), - } - for i, metric := range req.Metrics { - metricType := agentproto.Stats_Metric_TYPE_UNSPECIFIED - switch metric.Type { - case agentsdk.AgentMetricTypeCounter: - metricType = agentproto.Stats_Metric_COUNTER - case agentsdk.AgentMetricTypeGauge: - metricType = agentproto.Stats_Metric_GAUGE - } - - protoStats.Metrics[i] = &agentproto.Stats_Metric{ - Name: metric.Name, - Type: metricType, - Value: metric.Value, - Labels: make([]*agentproto.Stats_Metric_Label, len(metric.Labels)), - } - for j, label := range metric.Labels { - protoStats.Metrics[i].Labels[j] = &agentproto.Stats_Metric_Label{ - Name: label.Name, - Value: label.Value, - } - } - } - err = api.statsReporter.ReportAgentStats( - ctx, - dbtime.Now(), - workspace, - workspaceAgent, - row.TemplateName, - protoStats, - ) - if err != nil { - httpapi.InternalServerError(rw, err) - return - } - - httpapi.Write(ctx, rw, http.StatusOK, agentsdk.StatsResponse{ - ReportInterval: api.AgentStatsRefreshInterval, - }) -} - -func ellipse(v string, n int) string { - if len(v) > n { - return v[:n] + "..." - } - return v -} - -// @Summary Submit workspace agent metadata -// @ID submit-workspace-agent-metadata -// @Security CoderSessionToken -// @Accept json -// @Tags Agents -// @Param request body []agentsdk.PostMetadataRequest true "Workspace agent metadata request" -// @Success 204 "Success" -// @Router /workspaceagents/me/metadata [post] -// @x-apidocgen {"skip": true} -func (api *API) workspaceAgentPostMetadata(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var req agentsdk.PostMetadataRequest - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - workspaceAgent := httpmw.WorkspaceAgent(r) - - // Split into function to allow call by deprecated handler. - err := api.workspaceAgentUpdateMetadata(ctx, workspaceAgent, req) - if err != nil { - api.Logger.Error(ctx, "failed to handle metadata request", slog.Error(err)) - httpapi.InternalServerError(rw, err) - return - } - - httpapi.Write(ctx, rw, http.StatusNoContent, nil) -} - -func (api *API) workspaceAgentUpdateMetadata(ctx context.Context, workspaceAgent database.WorkspaceAgent, req agentsdk.PostMetadataRequest) error { - const ( - // maxValueLen is set to 2048 to stay under the 8000 byte Postgres - // NOTIFY limit. Since both value and error can be set, the real - // payload limit is 2 * 2048 * 4/3 = 5461 bytes + a few hundred bytes for JSON - // syntax, key names, and metadata. - maxValueLen = 2048 - maxErrorLen = maxValueLen - ) - - collectedAt := time.Now() - - datum := database.UpdateWorkspaceAgentMetadataParams{ - WorkspaceAgentID: workspaceAgent.ID, - Key: make([]string, 0, len(req.Metadata)), - Value: make([]string, 0, len(req.Metadata)), - Error: make([]string, 0, len(req.Metadata)), - CollectedAt: make([]time.Time, 0, len(req.Metadata)), - } - - for _, md := range req.Metadata { - metadataError := md.Error - - // We overwrite the error if the provided payload is too long. - if len(md.Value) > maxValueLen { - metadataError = fmt.Sprintf("value of %d bytes exceeded %d bytes", len(md.Value), maxValueLen) - md.Value = md.Value[:maxValueLen] - } - - if len(md.Error) > maxErrorLen { - metadataError = fmt.Sprintf("error of %d bytes exceeded %d bytes", len(md.Error), maxErrorLen) - md.Error = md.Error[:maxErrorLen] - } - - // We don't want a misconfigured agent to fill the database. - datum.Key = append(datum.Key, md.Key) - datum.Value = append(datum.Value, md.Value) - datum.Error = append(datum.Error, metadataError) - // We ignore the CollectedAt from the agent to avoid bugs caused by - // clock skew. - datum.CollectedAt = append(datum.CollectedAt, collectedAt) - - api.Logger.Debug( - ctx, "accepted metadata report", - slog.F("workspace_agent_id", workspaceAgent.ID), - slog.F("collected_at", collectedAt), - slog.F("original_collected_at", md.CollectedAt), - slog.F("key", md.Key), - slog.F("value", ellipse(md.Value, 16)), - ) - } - - payload, err := json.Marshal(agentapi.WorkspaceAgentMetadataChannelPayload{ - CollectedAt: collectedAt, - Keys: datum.Key, - }) - if err != nil { - return err - } - - err = api.Database.UpdateWorkspaceAgentMetadata(ctx, datum) - if err != nil { - return err - } - - err = api.Pubsub.Publish(agentapi.WatchWorkspaceAgentMetadataChannel(workspaceAgent.ID), payload) - if err != nil { - return err - } - - return nil -} - // @Summary Watch for workspace agent metadata updates // @ID watch-for-workspace-agent-metadata-updates // @Security CoderSessionToken @@ -1562,211 +1193,6 @@ func convertWorkspaceAgentMetadata(db []database.WorkspaceAgentMetadatum) []code return result } -// @Summary Submit workspace agent lifecycle state -// @ID submit-workspace-agent-lifecycle-state -// @Security CoderSessionToken -// @Accept json -// @Tags Agents -// @Param request body agentsdk.PostLifecycleRequest true "Workspace agent lifecycle request" -// @Success 204 "Success" -// @Router /workspaceagents/me/report-lifecycle [post] -// @x-apidocgen {"skip": true} -func (api *API) workspaceAgentReportLifecycle(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - workspaceAgent := httpmw.WorkspaceAgent(r) - row, err := api.Database.GetWorkspaceByAgentID(ctx, workspaceAgent.ID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Failed to get workspace.", - Detail: err.Error(), - }) - return - } - workspace := row.Workspace - - var req agentsdk.PostLifecycleRequest - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - logger := api.Logger.With( - slog.F("workspace_agent_id", workspaceAgent.ID), - slog.F("workspace_id", workspace.ID), - slog.F("payload", req), - ) - logger.Debug(ctx, "workspace agent state report") - - lifecycleState := req.State - dbLifecycleState := database.WorkspaceAgentLifecycleState(lifecycleState) - if !dbLifecycleState.Valid() { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid lifecycle state.", - Detail: fmt.Sprintf("Invalid lifecycle state %q, must be be one of %q.", lifecycleState, database.AllWorkspaceAgentLifecycleStateValues()), - }) - return - } - - if req.ChangedAt.IsZero() { - // Backwards compatibility with older agents. - req.ChangedAt = dbtime.Now() - } - changedAt := sql.NullTime{Time: req.ChangedAt, Valid: true} - - startedAt := workspaceAgent.StartedAt - readyAt := workspaceAgent.ReadyAt - switch lifecycleState { - case codersdk.WorkspaceAgentLifecycleStarting: - startedAt = changedAt - readyAt.Valid = false // This agent is re-starting, so it's not ready yet. - case codersdk.WorkspaceAgentLifecycleReady, codersdk.WorkspaceAgentLifecycleStartError: - readyAt = changedAt - } - - err = api.Database.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{ - ID: workspaceAgent.ID, - LifecycleState: dbLifecycleState, - StartedAt: startedAt, - ReadyAt: readyAt, - }) - if err != nil { - if !xerrors.Is(err, context.Canceled) { - // not an error if we are canceled - logger.Error(ctx, "failed to update lifecycle state", slog.Error(err)) - } - httpapi.InternalServerError(rw, err) - return - } - - api.publishWorkspaceUpdate(ctx, workspace.ID) - - httpapi.Write(ctx, rw, http.StatusNoContent, nil) -} - -// @Summary Submit workspace agent application health -// @ID submit-workspace-agent-application-health -// @Security CoderSessionToken -// @Accept json -// @Produce json -// @Tags Agents -// @Param request body agentsdk.PostAppHealthsRequest true "Application health request" -// @Success 200 -// @Router /workspaceagents/me/app-health [post] -func (api *API) postWorkspaceAppHealth(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() - workspaceAgent := httpmw.WorkspaceAgent(r) - var req agentsdk.PostAppHealthsRequest - if !httpapi.Read(ctx, rw, r, &req) { - return - } - - if req.Healths == nil || len(req.Healths) == 0 { - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Health field is empty", - }) - return - } - - apps, err := api.Database.GetWorkspaceAppsByAgentID(ctx, workspaceAgent.ID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Error getting agent apps", - Detail: err.Error(), - }) - return - } - - var newApps []database.WorkspaceApp - for id, newHealth := range req.Healths { - old := func() *database.WorkspaceApp { - for _, app := range apps { - if app.ID == id { - return &app - } - } - - return nil - }() - if old == nil { - httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ - Message: "Error setting workspace app health", - Detail: xerrors.Errorf("workspace app name %s not found", id).Error(), - }) - return - } - - if old.HealthcheckUrl == "" { - httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ - Message: "Error setting workspace app health", - Detail: xerrors.Errorf("health checking is disabled for workspace app %s", id).Error(), - }) - return - } - - switch newHealth { - case codersdk.WorkspaceAppHealthInitializing: - case codersdk.WorkspaceAppHealthHealthy: - case codersdk.WorkspaceAppHealthUnhealthy: - default: - httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ - Message: "Error setting workspace app health", - Detail: xerrors.Errorf("workspace app health %s is not a valid value", newHealth).Error(), - }) - return - } - - // don't save if the value hasn't changed - if old.Health == database.WorkspaceAppHealth(newHealth) { - continue - } - old.Health = database.WorkspaceAppHealth(newHealth) - - newApps = append(newApps, *old) - } - - for _, app := range newApps { - err = api.Database.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{ - ID: app.ID, - Health: app.Health, - }) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Error setting workspace app health", - Detail: err.Error(), - }) - return - } - } - - resource, err := api.Database.GetWorkspaceResourceByID(ctx, workspaceAgent.ResourceID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error fetching workspace resource.", - Detail: err.Error(), - }) - return - } - job, err := api.Database.GetWorkspaceBuildByJobID(ctx, resource.JobID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error fetching workspace build.", - Detail: err.Error(), - }) - return - } - workspace, err := api.Database.GetWorkspaceByID(ctx, job.WorkspaceID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error fetching workspace.", - Detail: err.Error(), - }) - return - } - api.publishWorkspaceUpdate(ctx, workspace.ID) - - httpapi.Write(ctx, rw, http.StatusOK, nil) -} - // workspaceAgentsExternalAuth returns an access token for a given URL // or finds a provider by ID. // @@ -1912,25 +1338,25 @@ func (api *API) workspaceAgentsExternalAuth(rw http.ResponseWriter, r *http.Requ return } - externalAuthLink, valid, err := externalAuthConfig.RefreshToken(ctx, api.Database, externalAuthLink) - if err != nil { + refreshedLink, err := externalAuthConfig.RefreshToken(ctx, api.Database, externalAuthLink) + if err != nil && !externalauth.IsInvalidTokenError(err) { handleRetrying(http.StatusInternalServerError, codersdk.Response{ Message: "Failed to refresh external auth token.", Detail: err.Error(), }) return } - if !valid { + if err != nil { // Set the previous token so the retry logic will skip validating the // same token again. This should only be set if the token is invalid and there // was no error. If it is invalid because of an error, then we should recheck. - previousToken = &externalAuthLink + previousToken = &refreshedLink handleRetrying(http.StatusOK, agentsdk.ExternalAuthResponse{ URL: redirectURL.String(), }) return } - resp, err := createExternalAuthResponse(externalAuthConfig.Type, externalAuthLink.OAuthAccessToken, externalAuthLink.OAuthExtra) + resp, err := createExternalAuthResponse(externalAuthConfig.Type, refreshedLink.OAuthAccessToken, refreshedLink.OAuthExtra) if err != nil { handleRetrying(http.StatusInternalServerError, codersdk.Response{ Message: "Failed to create external auth response.", @@ -2067,24 +1493,3 @@ func convertWorkspaceAgentLog(logEntry database.WorkspaceAgentLog) codersdk.Work SourceID: logEntry.LogSourceID, } } - -func convertWorkspaceAgentSubsystems(ss []codersdk.AgentSubsystem) []database.WorkspaceAgentSubsystem { - out := make([]database.WorkspaceAgentSubsystem, 0, len(ss)) - for _, s := range ss { - switch s { - case codersdk.AgentSubsystemEnvbox: - out = append(out, database.WorkspaceAgentSubsystemEnvbox) - case codersdk.AgentSubsystemEnvbuilder: - out = append(out, database.WorkspaceAgentSubsystemEnvbuilder) - case codersdk.AgentSubsystemExectrace: - out = append(out, database.WorkspaceAgentSubsystemExectrace) - default: - // Invalid, drop it. - } - } - - sort.Slice(out, func(i, j int) bool { - return out[i] < out[j] - }) - return out -} diff --git a/coderd/workspaceagents_test.go b/coderd/workspaceagents_test.go index e99b6a297c103..a2915d2633f13 100644 --- a/coderd/workspaceagents_test.go +++ b/coderd/workspaceagents_test.go @@ -17,6 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/xerrors" + "google.golang.org/protobuf/types/known/timestamppb" "tailscale.com/tailcfg" "cdr.dev/slog" @@ -34,7 +35,6 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/externalauth" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/codersdk/workspacesdk" @@ -921,15 +921,15 @@ func TestWorkspaceAgentAppHealth(t *testing.T) { require.EqualValues(t, codersdk.WorkspaceAppHealthUnhealthy, manifest.Apps[1].Health) } -// TestWorkspaceAgentReportStats tests the legacy (agent API v1) report stats endpoint. -func TestWorkspaceAgentReportStats(t *testing.T) { +func TestWorkspaceAgentPostLogSource(t *testing.T) { t.Parallel() t.Run("OK", func(t *testing.T) { t.Parallel() - client, db := coderdtest.NewWithDatabase(t, nil) user := coderdtest.CreateFirstUser(t, client) + ctx := testutil.Context(t, testutil.WaitShort) + r := dbfake.WorkspaceBuild(t, db, database.Workspace{ OrganizationID: user.OrganizationID, OwnerID: user.UserID, @@ -938,85 +938,28 @@ func TestWorkspaceAgentReportStats(t *testing.T) { agentClient := agentsdk.New(client.URL) agentClient.SetSessionToken(r.AgentToken) - _, err := agentClient.PostStats(context.Background(), &agentsdk.Stats{ - ConnectionsByProto: map[string]int64{"TCP": 1}, - ConnectionCount: 1, - RxPackets: 1, - RxBytes: 1, - TxPackets: 1, - TxBytes: 1, - SessionCountVSCode: 1, - SessionCountJetBrains: 0, - SessionCountReconnectingPTY: 0, - SessionCountSSH: 0, - ConnectionMedianLatencyMS: 10, - }) - require.NoError(t, err) - - newWorkspace, err := client.Workspace(context.Background(), r.Workspace.ID) - require.NoError(t, err) - - assert.True(t, - newWorkspace.LastUsedAt.After(r.Workspace.LastUsedAt), - "%s is not after %s", newWorkspace.LastUsedAt, r.Workspace.LastUsedAt, - ) - }) - - t.Run("FailDeleted", func(t *testing.T) { - t.Parallel() - - owner, db := coderdtest.NewWithDatabase(t, nil) - ownerUser := coderdtest.CreateFirstUser(t, owner) - client, admin := coderdtest.CreateAnotherUser(t, owner, ownerUser.OrganizationID, rbac.RoleTemplateAdmin(), rbac.RoleUserAdmin()) - r := dbfake.WorkspaceBuild(t, db, database.Workspace{ - OrganizationID: admin.OrganizationIDs[0], - OwnerID: admin.ID, - }).WithAgent().Do() - - agentClient := agentsdk.New(client.URL) - agentClient.SetSessionToken(r.AgentToken) + req := agentsdk.PostLogSourceRequest{ + ID: uuid.New(), + DisplayName: "colin logs", + Icon: "/emojis/1f42e.png", + } - _, err := agentClient.PostStats(context.Background(), &agentsdk.Stats{ - ConnectionsByProto: map[string]int64{"TCP": 1}, - ConnectionCount: 1, - RxPackets: 1, - RxBytes: 1, - TxPackets: 1, - TxBytes: 1, - SessionCountVSCode: 0, - SessionCountJetBrains: 0, - SessionCountReconnectingPTY: 0, - SessionCountSSH: 0, - ConnectionMedianLatencyMS: 10, - }) + res, err := agentClient.PostLogSource(ctx, req) require.NoError(t, err) - - newWorkspace, err := client.Workspace(context.Background(), r.Workspace.ID) + assert.Equal(t, req.ID, res.ID) + assert.Equal(t, req.DisplayName, res.DisplayName) + assert.Equal(t, req.Icon, res.Icon) + assert.NotZero(t, res.WorkspaceAgentID) + assert.NotZero(t, res.CreatedAt) + + // should be idempotent + res, err = agentClient.PostLogSource(ctx, req) require.NoError(t, err) - - // nolint:gocritic // using db directly over creating a delete job - err = db.UpdateWorkspaceDeletedByID(dbauthz.As(context.Background(), - coderdtest.AuthzUserSubject(admin, ownerUser.OrganizationID)), - database.UpdateWorkspaceDeletedByIDParams{ - ID: newWorkspace.ID, - Deleted: true, - }) - require.NoError(t, err) - - _, err = agentClient.PostStats(context.Background(), &agentsdk.Stats{ - ConnectionsByProto: map[string]int64{"TCP": 1}, - ConnectionCount: 1, - RxPackets: 1, - RxBytes: 1, - TxPackets: 1, - TxBytes: 1, - SessionCountVSCode: 1, - SessionCountJetBrains: 0, - SessionCountReconnectingPTY: 0, - SessionCountSSH: 0, - ConnectionMedianLatencyMS: 10, - }) - require.ErrorContains(t, err, "agent is invalid") + assert.Equal(t, req.ID, res.ID) + assert.Equal(t, req.DisplayName, res.DisplayName) + assert.Equal(t, req.Icon, res.Icon) + assert.NotZero(t, res.WorkspaceAgentID) + assert.NotZero(t, res.CreatedAt) }) } @@ -1025,6 +968,7 @@ func TestWorkspaceAgent_LifecycleState(t *testing.T) { t.Run("Set", func(t *testing.T) { t.Parallel() + ctx := testutil.Context(t, testutil.WaitLong) client, db := coderdtest.NewWithDatabase(t, nil) user := coderdtest.CreateFirstUser(t, client) @@ -1040,8 +984,15 @@ func TestWorkspaceAgent_LifecycleState(t *testing.T) { } } - agentClient := agentsdk.New(client.URL) - agentClient.SetSessionToken(r.AgentToken) + ac := agentsdk.New(client.URL) + ac.SetSessionToken(r.AgentToken) + conn, err := ac.ConnectRPC(ctx) + require.NoError(t, err) + defer func() { + cErr := conn.Close() + require.NoError(t, cErr) + }() + agentAPI := agentproto.NewDRPCAgentClient(conn) tests := []struct { state codersdk.WorkspaceAgentLifecycle @@ -1063,16 +1014,17 @@ func TestWorkspaceAgent_LifecycleState(t *testing.T) { for _, tt := range tests { tt := tt t.Run(string(tt.state), func(t *testing.T) { - ctx := testutil.Context(t, testutil.WaitLong) - - err := agentClient.PostLifecycle(ctx, agentsdk.PostLifecycleRequest{ - State: tt.state, - ChangedAt: time.Now(), - }) + state, err := agentsdk.ProtoFromLifecycleState(tt.state) if tt.wantErr { require.Error(t, err) return } + _, err = agentAPI.UpdateLifecycle(ctx, &agentproto.UpdateLifecycleRequest{ + Lifecycle: &agentproto.Lifecycle{ + State: state, + ChangedAt: timestamppb.Now(), + }, + }) require.NoError(t, err, "post lifecycle state %q", tt.state) workspace, err = client.Workspace(ctx, workspace.ID) @@ -1155,11 +1107,11 @@ func TestWorkspaceAgent_Metadata(t *testing.T) { require.EqualValues(t, 3, manifest.Metadata[0].Timeout) post := func(ctx context.Context, key string, mr codersdk.WorkspaceAgentMetadataResult) { - err := agentClient.PostMetadata(ctx, agentsdk.PostMetadataRequest{ - Metadata: []agentsdk.Metadata{ + _, err := aAPI.BatchUpdateMetadata(ctx, &agentproto.BatchUpdateMetadataRequest{ + Metadata: []*agentproto.Metadata{ { - Key: key, - WorkspaceAgentMetadataResult: mr, + Key: key, + Result: agentsdk.ProtoFromMetadataResult(mr), }, }, }) @@ -1398,7 +1350,7 @@ func TestWorkspaceAgent_Metadata_CatchMemoryLeak(t *testing.T) { agentClient := agentsdk.New(client.URL) agentClient.SetSessionToken(r.AgentToken) - ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitSuperLong)) + ctx := testutil.Context(t, testutil.WaitSuperLong) conn, err := agentClient.ConnectRPC(ctx) require.NoError(t, err) defer func() { @@ -1410,17 +1362,18 @@ func TestWorkspaceAgent_Metadata_CatchMemoryLeak(t *testing.T) { manifest := requireGetManifest(ctx, t, aAPI) post := func(ctx context.Context, key, value string) error { - return agentClient.PostMetadata(ctx, agentsdk.PostMetadataRequest{ - Metadata: []agentsdk.Metadata{ + _, err := aAPI.BatchUpdateMetadata(ctx, &agentproto.BatchUpdateMetadataRequest{ + Metadata: []*agentproto.Metadata{ { Key: key, - WorkspaceAgentMetadataResult: codersdk.WorkspaceAgentMetadataResult{ + Result: agentsdk.ProtoFromMetadataResult(codersdk.WorkspaceAgentMetadataResult{ CollectedAt: time.Now(), Value: value, - }, + }), }, }, }) + return err } workspace, err = client.Workspace(ctx, workspace.ID) @@ -1451,20 +1404,21 @@ func TestWorkspaceAgent_Metadata_CatchMemoryLeak(t *testing.T) { postDone := testutil.Go(t, func() { for { + select { + case <-metadataDone: + return + default: + } // We need to send two separate metadata updates to trigger the // memory leak. foo2 will cause the number of foo1 to be doubled, etc. - err = post(ctx, "foo1", "hi") + err := post(ctx, "foo1", "hi") if err != nil { - if !xerrors.Is(err, context.Canceled) { - assert.NoError(t, err, "post metadata foo1") - } + assert.NoError(t, err, "post metadata foo1") return } err = post(ctx, "foo2", "bye") if err != nil { - if !xerrors.Is(err, context.Canceled) { - assert.NoError(t, err, "post metadata foo1") - } + assert.NoError(t, err, "post metadata foo1") return } } @@ -1483,13 +1437,8 @@ func TestWorkspaceAgent_Metadata_CatchMemoryLeak(t *testing.T) { // testing it is not straightforward. db.err.Store(&wantErr) - select { - case <-ctx.Done(): - t.Fatal("timeout waiting for SSE to close") - case <-metadataDone: - } - cancel() - <-postDone + testutil.RequireRecvCtx(ctx, t, metadataDone) + testutil.RequireRecvCtx(ctx, t, postDone) } func TestWorkspaceAgent_Startup(t *testing.T) { diff --git a/coderd/workspaceagentsrpc.go b/coderd/workspaceagentsrpc.go index 24b6088ddd8f2..b413db264feac 100644 --- a/coderd/workspaceagentsrpc.go +++ b/coderd/workspaceagentsrpc.go @@ -143,6 +143,7 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) { DerpForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(), DerpMapUpdateFrequency: api.Options.DERPMapUpdateFrequency, ExternalAuthConfigs: api.ExternalAuthConfigs, + Experiments: api.Experiments, // Optional: WorkspaceID: build.WorkspaceID, // saves the extra lookup later @@ -164,31 +165,6 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) { } } -func (api *API) startAgentWebsocketMonitor(ctx context.Context, - workspaceAgent database.WorkspaceAgent, workspaceBuild database.WorkspaceBuild, - conn *websocket.Conn, -) *agentConnectionMonitor { - monitor := &agentConnectionMonitor{ - apiCtx: api.ctx, - workspaceAgent: workspaceAgent, - workspaceBuild: workspaceBuild, - conn: conn, - pingPeriod: api.AgentConnectionUpdateFrequency, - db: api.Database, - replicaID: api.ID, - updater: api, - disconnectTimeout: api.AgentInactiveDisconnectTimeout, - logger: api.Logger.With( - slog.F("workspace_id", workspaceBuild.WorkspaceID), - slog.F("agent_id", workspaceAgent.ID), - ), - } - monitor.init() - monitor.start(ctx) - - return monitor -} - type yamuxPingerCloser struct { mux *yamux.Session } diff --git a/coderd/workspaceagentsrpc_test.go b/coderd/workspaceagentsrpc_test.go index a92fbdcd1ca1a..ca8f334d4e766 100644 --- a/coderd/workspaceagentsrpc_test.go +++ b/coderd/workspaceagentsrpc_test.go @@ -1,8 +1,10 @@ package coderd_test import ( + "context" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" agentproto "github.com/coder/coder/v2/agent/proto" @@ -14,6 +16,52 @@ import ( "github.com/coder/coder/v2/testutil" ) +// Ported to RPC API from coderd/workspaceagents_test.go +func TestWorkspaceAgentReportStats(t *testing.T) { + t.Parallel() + + client, db := coderdtest.NewWithDatabase(t, nil) + user := coderdtest.CreateFirstUser(t, client) + r := dbfake.WorkspaceBuild(t, db, database.Workspace{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent().Do() + + ac := agentsdk.New(client.URL) + ac.SetSessionToken(r.AgentToken) + conn, err := ac.ConnectRPC(context.Background()) + require.NoError(t, err) + defer func() { + _ = conn.Close() + }() + agentAPI := agentproto.NewDRPCAgentClient(conn) + + _, err = agentAPI.UpdateStats(context.Background(), &agentproto.UpdateStatsRequest{ + Stats: &agentproto.Stats{ + ConnectionsByProto: map[string]int64{"TCP": 1}, + ConnectionCount: 1, + RxPackets: 1, + RxBytes: 1, + TxPackets: 1, + TxBytes: 1, + SessionCountVscode: 1, + SessionCountJetbrains: 0, + SessionCountReconnectingPty: 0, + SessionCountSsh: 0, + ConnectionMedianLatencyMs: 10, + }, + }) + require.NoError(t, err) + + newWorkspace, err := client.Workspace(context.Background(), r.Workspace.ID) + require.NoError(t, err) + + assert.True(t, + newWorkspace.LastUsedAt.After(r.Workspace.LastUsedAt), + "%s is not after %s", newWorkspace.LastUsedAt, r.Workspace.LastUsedAt, + ) +} + func TestAgentAPI_LargeManifest(t *testing.T) { t.Parallel() ctx := testutil.Context(t, testutil.WaitLong) diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index 7bf470a3cc416..69f1aadca49b2 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -573,7 +573,7 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT } // This strips the session token from a workspace app request. - cookieHeaders := r.Header.Values("Cookie")[:] + cookieHeaders := r.Header.Values("Cookie") r.Header.Del("Cookie") for _, cookieHeader := range cookieHeaders { r.Header.Add("Cookie", httpapi.StripCoderCookies(cookieHeader)) diff --git a/coderd/workspacebuilds.go b/coderd/workspacebuilds.go index ef5b63a1e5b19..e04e585d4aa53 100644 --- a/coderd/workspacebuilds.go +++ b/coderd/workspacebuilds.go @@ -555,7 +555,7 @@ func (api *API) verifyUserCanCancelWorkspaceBuilds(ctx context.Context, userID u if err != nil { return false, xerrors.New("user does not exist") } - return slices.Contains(user.RBACRoles, rbac.RoleOwner()), nil // only user with "owner" role can cancel workspace builds + return slices.Contains(user.RBACRoles, rbac.RoleOwner().String()), nil // only user with "owner" role can cancel workspace builds } // @Summary Get build parameters for workspace build diff --git a/coderd/workspacebuilds_test.go b/coderd/workspacebuilds_test.go index f8560ff911925..389e0563f46f8 100644 --- a/coderd/workspacebuilds_test.go +++ b/coderd/workspacebuilds_test.go @@ -20,9 +20,11 @@ import ( "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/coderdtest/oidctest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbtime" + "github.com/coder/coder/v2/coderd/externalauth" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/provisioner/echo" @@ -222,7 +224,7 @@ func TestWorkspaceBuilds(t *testing.T) { t.Parallel() client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true}) first := coderdtest.CreateFirstUser(t, client) - second, secondUser := coderdtest.CreateAnotherUser(t, client, first.OrganizationID, "owner") + second, secondUser := coderdtest.CreateAnotherUser(t, client, first.OrganizationID, rbac.RoleOwner()) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) defer cancel() @@ -711,6 +713,78 @@ func TestWorkspaceBuildStatus(t *testing.T) { require.EqualValues(t, codersdk.WorkspaceStatusDeleted, workspace.LatestBuild.Status) } +func TestWorkspaceDeleteSuspendedUser(t *testing.T) { + t.Parallel() + const providerID = "fake-github" + fake := oidctest.NewFakeIDP(t, oidctest.WithServing()) + + validateCalls := 0 + userSuspended := false + owner := coderdtest.New(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + ExternalAuthConfigs: []*externalauth.Config{ + fake.ExternalAuthConfig(t, providerID, &oidctest.ExternalAuthConfigOptions{ + ValidatePayload: func(email string) (interface{}, int, error) { + validateCalls++ + if userSuspended { + // Simulate the user being suspended from the IDP too. + return "", http.StatusForbidden, xerrors.New("user is suspended") + } + return "OK", 0, nil + }, + }), + }, + }) + + first := coderdtest.CreateFirstUser(t, owner) + + // New user that we will suspend when we try to delete the workspace. + client, user := coderdtest.CreateAnotherUser(t, owner, first.OrganizationID, rbac.RoleTemplateAdmin()) + fake.ExternalLogin(t, client) + + version := coderdtest.CreateTemplateVersion(t, client, first.OrganizationID, &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionApply: echo.ApplyComplete, + ProvisionPlan: []*proto.Response{{ + Type: &proto.Response_Plan{ + Plan: &proto.PlanComplete{ + Error: "", + Resources: nil, + Parameters: nil, + ExternalAuthProviders: []*proto.ExternalAuthProviderResource{ + { + Id: providerID, + Optional: false, + }, + }, + }, + }, + }}, + }) + + validateCalls = 0 // Reset + coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + template := coderdtest.CreateTemplate(t, client, first.OrganizationID, version.ID) + workspace := coderdtest.CreateWorkspace(t, client, first.OrganizationID, template.ID) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) + require.Equal(t, 1, validateCalls) // Ensure the external link is working + + // Suspend the user + ctx := testutil.Context(t, testutil.WaitLong) + _, err := owner.UpdateUserStatus(ctx, user.ID.String(), codersdk.UserStatusSuspended) + require.NoError(t, err, "suspend user") + + // Now delete the workspace build + userSuspended = true + build, err := owner.CreateWorkspaceBuild(ctx, workspace.ID, codersdk.CreateWorkspaceBuildRequest{ + Transition: codersdk.WorkspaceTransitionDelete, + }) + require.NoError(t, err) + build = coderdtest.AwaitWorkspaceBuildJobCompleted(t, owner, build.ID) + require.Equal(t, 2, validateCalls) + require.Equal(t, codersdk.WorkspaceStatusDeleted, build.Status) +} + func TestWorkspaceBuildDebugMode(t *testing.T) { t.Parallel() diff --git a/coderd/workspaces.go b/coderd/workspaces.go index 7d0344be4e321..7e6698736eeb6 100644 --- a/coderd/workspaces.go +++ b/coderd/workspaces.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net/http" + "slices" "strconv" "time" @@ -15,6 +16,7 @@ import ( "golang.org/x/xerrors" "cdr.dev/slog" + "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/audit" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" @@ -359,17 +361,12 @@ func (api *API) postWorkspacesByOrganization(rw http.ResponseWriter, r *http.Req } ) - wriBytes, err := json.Marshal(workspaceResourceInfo) - if err != nil { - api.Logger.Warn(ctx, "marshal workspace owner name") - } - aReq, commitAudit := audit.InitRequest[database.Workspace](rw, &audit.RequestParams{ Audit: *auditor, Log: api.Logger, Request: r, Action: database.AuditActionCreate, - AdditionalFields: wriBytes, + AdditionalFields: workspaceResourceInfo, OrganizationID: organization.ID, }) @@ -1105,7 +1102,9 @@ func (api *API) putExtendWorkspace(rw http.ResponseWriter, r *http.Request) { // @ID post-workspace-usage-by-id // @Security CoderSessionToken // @Tags Workspaces +// @Accept json // @Param workspace path string true "Workspace ID" format(uuid) +// @Param request body codersdk.PostWorkspaceUsageRequest false "Post workspace usage request" // @Success 204 // @Router /workspaces/{workspace}/usage [post] func (api *API) postWorkspaceUsage(rw http.ResponseWriter, r *http.Request) { @@ -1115,7 +1114,103 @@ func (api *API) postWorkspaceUsage(rw http.ResponseWriter, r *http.Request) { return } - api.workspaceUsageTracker.Add(workspace.ID) + api.statsReporter.TrackUsage(workspace.ID) + + if !api.Experiments.Enabled(codersdk.ExperimentWorkspaceUsage) { + // Continue previous behavior if the experiment is not enabled. + rw.WriteHeader(http.StatusNoContent) + return + } + + if r.Body == http.NoBody { + // Continue previous behavior if no body is present. + rw.WriteHeader(http.StatusNoContent) + return + } + + ctx := r.Context() + var req codersdk.PostWorkspaceUsageRequest + if !httpapi.Read(ctx, rw, r, &req) { + return + } + + if req.AgentID == uuid.Nil && req.AppName == "" { + // Continue previous behavior if body is empty. + rw.WriteHeader(http.StatusNoContent) + return + } + if req.AgentID == uuid.Nil { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid request", + Validations: []codersdk.ValidationError{{ + Field: "agent_id", + Detail: "must be set when app_name is set", + }}, + }) + return + } + if req.AppName == "" { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid request", + Validations: []codersdk.ValidationError{{ + Field: "app_name", + Detail: "must be set when agent_id is set", + }}, + }) + return + } + if !slices.Contains(codersdk.AllowedAppNames, req.AppName) { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid request", + Validations: []codersdk.ValidationError{{ + Field: "app_name", + Detail: fmt.Sprintf("must be one of %v", codersdk.AllowedAppNames), + }}, + }) + return + } + + stat := &proto.Stats{ + ConnectionCount: 1, + } + switch req.AppName { + case codersdk.UsageAppNameVscode: + stat.SessionCountVscode = 1 + case codersdk.UsageAppNameJetbrains: + stat.SessionCountJetbrains = 1 + case codersdk.UsageAppNameReconnectingPty: + stat.SessionCountReconnectingPty = 1 + case codersdk.UsageAppNameSSH: + stat.SessionCountSsh = 1 + default: + // This means the app_name is in the codersdk.AllowedAppNames but not being + // handled by this switch statement. + httpapi.InternalServerError(rw, xerrors.Errorf("unknown app_name %q", req.AppName)) + return + } + + agent, err := api.Database.GetWorkspaceAgentByID(ctx, req.AgentID) + if err != nil { + if httpapi.Is404Error(err) { + httpapi.ResourceNotFound(rw) + return + } + httpapi.InternalServerError(rw, err) + return + } + + template, err := api.Database.GetTemplateByID(ctx, workspace.TemplateID) + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + + err = api.statsReporter.ReportAgentStats(ctx, dbtime.Now(), workspace, agent, template.Name, stat) + if err != nil { + httpapi.InternalServerError(rw, err) + return + } + rw.WriteHeader(http.StatusNoContent) } diff --git a/coderd/workspaces_test.go b/coderd/workspaces_test.go index d91de4a5e26a1..e5a01df9f8edc 100644 --- a/coderd/workspaces_test.go +++ b/coderd/workspaces_test.go @@ -484,7 +484,7 @@ func TestWorkspacesSortOrder(t *testing.T) { client, db := coderdtest.NewWithDatabase(t, nil) firstUser := coderdtest.CreateFirstUser(t, client) - secondUserClient, secondUser := coderdtest.CreateAnotherUserMutators(t, client, firstUser.OrganizationID, []string{"owner"}, func(r *codersdk.CreateUserRequest) { + secondUserClient, secondUser := coderdtest.CreateAnotherUserMutators(t, client, firstUser.OrganizationID, []rbac.RoleIdentifier{rbac.RoleOwner()}, func(r *codersdk.CreateUserRequest) { r.Username = "zzz" }) @@ -3371,3 +3371,127 @@ func TestWorkspaceFavoriteUnfavorite(t *testing.T) { require.ErrorAs(t, err, &sdkErr) require.Equal(t, http.StatusForbidden, sdkErr.StatusCode()) } + +func TestWorkspaceUsageTracking(t *testing.T) { + t.Parallel() + t.Run("NoExperiment", func(t *testing.T) { + t.Parallel() + client, db := coderdtest.NewWithDatabase(t, nil) + user := coderdtest.CreateFirstUser(t, client) + tmpDir := t.TempDir() + r := dbfake.WorkspaceBuild(t, db, database.Workspace{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + agents[0].Directory = tmpDir + return agents + }).Do() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium) + defer cancel() + + // continue legacy behavior + err := client.PostWorkspaceUsage(ctx, r.Workspace.ID) + require.NoError(t, err) + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{}) + require.NoError(t, err) + }) + t.Run("Experiment", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium) + defer cancel() + dv := coderdtest.DeploymentValues(t) + dv.Experiments = []string{string(codersdk.ExperimentWorkspaceUsage)} + client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{ + DeploymentValues: dv, + }) + user := coderdtest.CreateFirstUser(t, client) + tmpDir := t.TempDir() + org := dbgen.Organization(t, db, database.Organization{}) + _ = dbgen.OrganizationMember(t, db, database.OrganizationMember{ + UserID: user.UserID, + OrganizationID: org.ID, + }) + templateVersion := dbgen.TemplateVersion(t, db, database.TemplateVersion{ + OrganizationID: org.ID, + CreatedBy: user.UserID, + }) + template := dbgen.Template(t, db, database.Template{ + OrganizationID: org.ID, + ActiveVersionID: templateVersion.ID, + CreatedBy: user.UserID, + DefaultTTL: int64(8 * time.Hour), + }) + _, err := client.UpdateTemplateMeta(ctx, template.ID, codersdk.UpdateTemplateMeta{ + ActivityBumpMillis: 8 * time.Hour.Milliseconds(), + }) + require.NoError(t, err) + r := dbfake.WorkspaceBuild(t, db, database.Workspace{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + TemplateID: template.ID, + Ttl: sql.NullInt64{Valid: true, Int64: int64(8 * time.Hour)}, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + agents[0].Directory = tmpDir + return agents + }).Do() + + // continue legacy behavior + err = client.PostWorkspaceUsage(ctx, r.Workspace.ID) + require.NoError(t, err) + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{}) + require.NoError(t, err) + + workspace, err := client.Workspace(ctx, r.Workspace.ID) + require.NoError(t, err) + + // only agent id fails + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + }) + require.ErrorContains(t, err, "agent_id") + // only app name fails + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AppName: "ssh", + }) + require.ErrorContains(t, err, "app_name") + // unknown app name fails + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + AppName: "unknown", + }) + require.ErrorContains(t, err, "app_name") + + // vscode works + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + AppName: "vscode", + }) + require.NoError(t, err) + // jetbrains works + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + AppName: "jetbrains", + }) + require.NoError(t, err) + // reconnecting-pty works + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + AppName: "reconnecting-pty", + }) + require.NoError(t, err) + // ssh works + err = client.PostWorkspaceUsageWithBody(ctx, r.Workspace.ID, codersdk.PostWorkspaceUsageRequest{ + AgentID: workspace.LatestBuild.Resources[0].Agents[0].ID, + AppName: "ssh", + }) + require.NoError(t, err) + + // ensure deadline has been bumped + newWorkspace, err := client.Workspace(ctx, r.Workspace.ID) + require.NoError(t, err) + require.True(t, workspace.LatestBuild.Deadline.Valid) + require.True(t, newWorkspace.LatestBuild.Deadline.Valid) + require.Greater(t, newWorkspace.LatestBuild.Deadline.Time, workspace.LatestBuild.Deadline.Time) + }) +} diff --git a/coderd/batchstats/batcher.go b/coderd/workspacestats/batcher.go similarity index 86% rename from coderd/batchstats/batcher.go rename to coderd/workspacestats/batcher.go index bbff38b0413c0..2872c368dc61c 100644 --- a/coderd/batchstats/batcher.go +++ b/coderd/workspacestats/batcher.go @@ -1,4 +1,4 @@ -package batchstats +package workspacestats import ( "context" @@ -24,9 +24,13 @@ const ( defaultFlushInterval = time.Second ) -// Batcher holds a buffer of agent stats and periodically flushes them to -// its configured store. It also updates the workspace's last used time. -type Batcher struct { +type Batcher interface { + Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats) error +} + +// DBBatcher holds a buffer of agent stats and periodically flushes them to +// its configured store. +type DBBatcher struct { store database.Store log slog.Logger @@ -50,39 +54,39 @@ type Batcher struct { } // Option is a functional option for configuring a Batcher. -type Option func(b *Batcher) +type BatcherOption func(b *DBBatcher) -// WithStore sets the store to use for storing stats. -func WithStore(store database.Store) Option { - return func(b *Batcher) { +// BatcherWithStore sets the store to use for storing stats. +func BatcherWithStore(store database.Store) BatcherOption { + return func(b *DBBatcher) { b.store = store } } -// WithBatchSize sets the number of stats to store in a batch. -func WithBatchSize(size int) Option { - return func(b *Batcher) { +// BatcherWithBatchSize sets the number of stats to store in a batch. +func BatcherWithBatchSize(size int) BatcherOption { + return func(b *DBBatcher) { b.batchSize = size } } -// WithInterval sets the interval for flushes. -func WithInterval(d time.Duration) Option { - return func(b *Batcher) { +// BatcherWithInterval sets the interval for flushes. +func BatcherWithInterval(d time.Duration) BatcherOption { + return func(b *DBBatcher) { b.interval = d } } -// WithLogger sets the logger to use for logging. -func WithLogger(log slog.Logger) Option { - return func(b *Batcher) { +// BatcherWithLogger sets the logger to use for logging. +func BatcherWithLogger(log slog.Logger) BatcherOption { + return func(b *DBBatcher) { b.log = log } } -// New creates a new Batcher and starts it. -func New(ctx context.Context, opts ...Option) (*Batcher, func(), error) { - b := &Batcher{} +// NewBatcher creates a new Batcher and starts it. +func NewBatcher(ctx context.Context, opts ...BatcherOption) (*DBBatcher, func(), error) { + b := &DBBatcher{} b.log = slog.Make(sloghuman.Sink(os.Stderr)) b.flushLever = make(chan struct{}, 1) // Buffered so that it doesn't block. for _, opt := range opts { @@ -127,7 +131,7 @@ func New(ctx context.Context, opts ...Option) (*Batcher, func(), error) { } // Add adds a stat to the batcher for the given workspace and agent. -func (b *Batcher) Add( +func (b *DBBatcher) Add( now time.Time, agentID uuid.UUID, templateID uuid.UUID, @@ -174,7 +178,7 @@ func (b *Batcher) Add( } // Run runs the batcher. -func (b *Batcher) run(ctx context.Context) { +func (b *DBBatcher) run(ctx context.Context) { // nolint:gocritic // This is only ever used for one thing - inserting agent stats. authCtx := dbauthz.AsSystemRestricted(ctx) for { @@ -199,7 +203,7 @@ func (b *Batcher) run(ctx context.Context) { } // flush flushes the batcher's buffer. -func (b *Batcher) flush(ctx context.Context, forced bool, reason string) { +func (b *DBBatcher) flush(ctx context.Context, forced bool, reason string) { b.mu.Lock() b.flushForced.Store(true) start := time.Now() @@ -256,7 +260,7 @@ func (b *Batcher) flush(ctx context.Context, forced bool, reason string) { } // initBuf resets the buffer. b MUST be locked. -func (b *Batcher) initBuf(size int) { +func (b *DBBatcher) initBuf(size int) { b.buf = &database.InsertWorkspaceAgentStatsParams{ ID: make([]uuid.UUID, 0, b.batchSize), CreatedAt: make([]time.Time, 0, b.batchSize), @@ -280,7 +284,7 @@ func (b *Batcher) initBuf(size int) { b.connectionsByProto = make([]map[string]int64, 0, size) } -func (b *Batcher) resetBuf() { +func (b *DBBatcher) resetBuf() { b.buf.ID = b.buf.ID[:0] b.buf.CreatedAt = b.buf.CreatedAt[:0] b.buf.UserID = b.buf.UserID[:0] diff --git a/coderd/batchstats/batcher_internal_test.go b/coderd/workspacestats/batcher_internal_test.go similarity index 96% rename from coderd/batchstats/batcher_internal_test.go rename to coderd/workspacestats/batcher_internal_test.go index 8954fa5455fcd..97fdaf9f2aec5 100644 --- a/coderd/batchstats/batcher_internal_test.go +++ b/coderd/workspacestats/batcher_internal_test.go @@ -1,4 +1,4 @@ -package batchstats +package workspacestats import ( "context" @@ -9,6 +9,7 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/codersdk" agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/database" @@ -16,7 +17,6 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/pubsub" - "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/cryptorand" ) @@ -35,10 +35,10 @@ func TestBatchStats(t *testing.T) { tick := make(chan time.Time) flushed := make(chan int, 1) - b, closer, err := New(ctx, - WithStore(store), - WithLogger(log), - func(b *Batcher) { + b, closer, err := NewBatcher(ctx, + BatcherWithStore(store), + BatcherWithLogger(log), + func(b *DBBatcher) { b.tickCh = tick b.flushed = flushed }, @@ -177,7 +177,7 @@ func setupDeps(t *testing.T, store database.Store, ps pubsub.Pubsub) deps { _, err := store.InsertOrganizationMember(context.Background(), database.InsertOrganizationMemberParams{ OrganizationID: org.ID, UserID: user.ID, - Roles: []string{rbac.RoleOrgMember(org.ID)}, + Roles: []string{codersdk.RoleOrganizationMember}, }) require.NoError(t, err) tv := dbgen.TemplateVersion(t, store, database.TemplateVersion{ diff --git a/coderd/workspacestats/reporter.go b/coderd/workspacestats/reporter.go index 8ae4bdd827ac3..c6b7afb3c68ad 100644 --- a/coderd/workspacestats/reporter.go +++ b/coderd/workspacestats/reporter.go @@ -22,16 +22,13 @@ import ( "github.com/coder/coder/v2/codersdk" ) -type StatsBatcher interface { - Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats) error -} - type ReporterOptions struct { Database database.Store Logger slog.Logger Pubsub pubsub.Pubsub TemplateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore] - StatsBatcher StatsBatcher + StatsBatcher Batcher + UsageTracker *UsageTracker UpdateAgentMetricsFn func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) AppStatBatchSize int @@ -205,3 +202,11 @@ func UpdateTemplateWorkspacesLastUsedAt(ctx context.Context, db database.Store, } return nil } + +func (r *Reporter) TrackUsage(workspaceID uuid.UUID) { + r.opts.UsageTracker.Add(workspaceID) +} + +func (r *Reporter) Close() error { + return r.opts.UsageTracker.Close() +} diff --git a/coderd/workspaceusage/tracker.go b/coderd/workspacestats/tracker.go similarity index 86% rename from coderd/workspaceusage/tracker.go rename to coderd/workspacestats/tracker.go index 118b021d71d52..33532247b36e0 100644 --- a/coderd/workspaceusage/tracker.go +++ b/coderd/workspacestats/tracker.go @@ -1,4 +1,4 @@ -package workspaceusage +package workspacestats import ( "bytes" @@ -25,10 +25,10 @@ type Store interface { BatchUpdateWorkspaceLastUsedAt(context.Context, database.BatchUpdateWorkspaceLastUsedAtParams) error } -// Tracker tracks and de-bounces updates to workspace usage activity. +// UsageTracker tracks and de-bounces updates to workspace usage activity. // It keeps an internal map of workspace IDs that have been used and // periodically flushes this to its configured Store. -type Tracker struct { +type UsageTracker struct { log slog.Logger // you know, for logs flushLock sync.Mutex // protects m flushErrors int // tracks the number of consecutive errors flushing @@ -42,10 +42,10 @@ type Tracker struct { flushCh chan int // used for testing. } -// New returns a new Tracker. It is the caller's responsibility +// NewTracker returns a new Tracker. It is the caller's responsibility // to call Close(). -func New(s Store, opts ...Option) *Tracker { - tr := &Tracker{ +func NewTracker(s Store, opts ...TrackerOption) *UsageTracker { + tr := &UsageTracker{ log: slog.Make(sloghuman.Sink(os.Stderr)), m: &uuidSet{}, s: s, @@ -67,33 +67,33 @@ func New(s Store, opts ...Option) *Tracker { return tr } -type Option func(*Tracker) +type TrackerOption func(*UsageTracker) -// WithLogger sets the logger to be used by Tracker. -func WithLogger(log slog.Logger) Option { - return func(h *Tracker) { +// TrackerWithLogger sets the logger to be used by Tracker. +func TrackerWithLogger(log slog.Logger) TrackerOption { + return func(h *UsageTracker) { h.log = log } } -// WithFlushInterval allows configuring the flush interval of Tracker. -func WithFlushInterval(d time.Duration) Option { - return func(h *Tracker) { +// TrackerWithFlushInterval allows configuring the flush interval of Tracker. +func TrackerWithFlushInterval(d time.Duration) TrackerOption { + return func(h *UsageTracker) { ticker := time.NewTicker(d) h.tickCh = ticker.C h.stopTick = ticker.Stop } } -// WithTickFlush allows passing two channels: one that reads +// TrackerWithTickFlush allows passing two channels: one that reads // a time.Time, and one that returns the number of marked workspaces // every time Tracker flushes. // For testing only and will panic if used outside of tests. -func WithTickFlush(tickCh <-chan time.Time, flushCh chan int) Option { +func TrackerWithTickFlush(tickCh <-chan time.Time, flushCh chan int) TrackerOption { if flag.Lookup("test.v") == nil { panic("developer error: WithTickFlush is not to be used outside of tests.") } - return func(h *Tracker) { + return func(h *UsageTracker) { h.tickCh = tickCh h.stopTick = func() {} h.flushCh = flushCh @@ -102,14 +102,14 @@ func WithTickFlush(tickCh <-chan time.Time, flushCh chan int) Option { // Add marks the workspace with the given ID as having been used recently. // Tracker will periodically flush this to its configured Store. -func (tr *Tracker) Add(workspaceID uuid.UUID) { +func (tr *UsageTracker) Add(workspaceID uuid.UUID) { tr.m.Add(workspaceID) } // flush updates last_used_at of all current workspace IDs. // If this is held while a previous flush is in progress, it will // deadlock until the previous flush has completed. -func (tr *Tracker) flush(now time.Time) { +func (tr *UsageTracker) flush(now time.Time) { // Copy our current set of IDs ids := tr.m.UniqueAndClear() count := len(ids) @@ -154,7 +154,7 @@ func (tr *Tracker) flush(now time.Time) { // loop periodically flushes every tick. // If loop is called after Close, it will exit immediately and log an error. -func (tr *Tracker) loop() { +func (tr *UsageTracker) loop() { select { case <-tr.doneCh: tr.log.Error(context.Background(), "developer error: Loop called after Close") @@ -186,7 +186,7 @@ func (tr *Tracker) loop() { // Close stops Tracker and returns once Loop has exited. // After calling Close(), Loop must not be called. -func (tr *Tracker) Close() error { +func (tr *UsageTracker) Close() error { tr.stopOnce.Do(func() { tr.stopCh <- struct{}{} tr.stopTick() diff --git a/coderd/workspaceusage/tracker_test.go b/coderd/workspacestats/tracker_test.go similarity index 96% rename from coderd/workspaceusage/tracker_test.go rename to coderd/workspacestats/tracker_test.go index ae9a9d2162d1c..99e9f9503b645 100644 --- a/coderd/workspaceusage/tracker_test.go +++ b/coderd/workspacestats/tracker_test.go @@ -1,4 +1,4 @@ -package workspaceusage_test +package workspacestats_test import ( "bytes" @@ -21,7 +21,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/coderd/database/pubsub" - "github.com/coder/coder/v2/coderd/workspaceusage" + "github.com/coder/coder/v2/coderd/workspacestats" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/testutil" ) @@ -35,9 +35,9 @@ func TestTracker(t *testing.T) { tickCh := make(chan time.Time) flushCh := make(chan int, 1) - wut := workspaceusage.New(mDB, - workspaceusage.WithLogger(log), - workspaceusage.WithTickFlush(tickCh, flushCh), + wut := workspacestats.NewTracker(mDB, + workspacestats.TrackerWithLogger(log), + workspacestats.TrackerWithTickFlush(tickCh, flushCh), ) defer wut.Close() diff --git a/coderd/workspacestats/workspacestatstest/batcher.go b/coderd/workspacestats/workspacestatstest/batcher.go new file mode 100644 index 0000000000000..ad5ba60ad16d0 --- /dev/null +++ b/coderd/workspacestats/workspacestatstest/batcher.go @@ -0,0 +1,38 @@ +package workspacestatstest + +import ( + "sync" + "time" + + "github.com/google/uuid" + + agentproto "github.com/coder/coder/v2/agent/proto" + "github.com/coder/coder/v2/coderd/workspacestats" +) + +type StatsBatcher struct { + Mu sync.Mutex + + Called int64 + LastTime time.Time + LastAgentID uuid.UUID + LastTemplateID uuid.UUID + LastUserID uuid.UUID + LastWorkspaceID uuid.UUID + LastStats *agentproto.Stats +} + +var _ workspacestats.Batcher = &StatsBatcher{} + +func (b *StatsBatcher) Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats) error { + b.Mu.Lock() + defer b.Mu.Unlock() + b.Called++ + b.LastTime = now + b.LastAgentID = agentID + b.LastTemplateID = templateID + b.LastUserID = userID + b.LastWorkspaceID = workspaceID + b.LastStats = st + return nil +} diff --git a/codersdk/agentsdk/agentsdk.go b/codersdk/agentsdk/agentsdk.go index 5dcccca09e350..32222479b37ee 100644 --- a/codersdk/agentsdk/agentsdk.go +++ b/codersdk/agentsdk/agentsdk.go @@ -84,23 +84,6 @@ type PostMetadataRequest struct { // performance. type PostMetadataRequestDeprecated = codersdk.WorkspaceAgentMetadataResult -// PostMetadata posts agent metadata to the Coder server. -// -// Deprecated: use BatchUpdateMetadata on the agent dRPC API instead -func (c *Client) PostMetadata(ctx context.Context, req PostMetadataRequest) error { - res, err := c.SDK.Request(ctx, http.MethodPost, "/api/v2/workspaceagents/me/metadata", req) - if err != nil { - return xerrors.Errorf("execute request: %w", err) - } - defer res.Body.Close() - - if res.StatusCode != http.StatusNoContent { - return codersdk.ReadBodyAsError(res) - } - - return nil -} - type Manifest struct { AgentID uuid.UUID `json:"agent_id"` AgentName string `json:"agent_name"` @@ -457,49 +440,11 @@ type StatsResponse struct { ReportInterval time.Duration `json:"report_interval"` } -// PostStats sends agent stats to the coder server -// -// Deprecated: uses agent API v1 endpoint -func (c *Client) PostStats(ctx context.Context, stats *Stats) (StatsResponse, error) { - res, err := c.SDK.Request(ctx, http.MethodPost, "/api/v2/workspaceagents/me/report-stats", stats) - if err != nil { - return StatsResponse{}, xerrors.Errorf("send request: %w", err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return StatsResponse{}, codersdk.ReadBodyAsError(res) - } - - var interval StatsResponse - err = json.NewDecoder(res.Body).Decode(&interval) - if err != nil { - return StatsResponse{}, xerrors.Errorf("decode stats response: %w", err) - } - - return interval, nil -} - type PostLifecycleRequest struct { State codersdk.WorkspaceAgentLifecycle `json:"state"` ChangedAt time.Time `json:"changed_at"` } -// PostLifecycle posts the agent's lifecycle to the Coder server. -// -// Deprecated: Use UpdateLifecycle on the dRPC API instead -func (c *Client) PostLifecycle(ctx context.Context, req PostLifecycleRequest) error { - res, err := c.SDK.Request(ctx, http.MethodPost, "/api/v2/workspaceagents/me/report-lifecycle", req) - if err != nil { - return xerrors.Errorf("agent state post request: %w", err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - return codersdk.ReadBodyAsError(res) - } - - return nil -} - type PostStartupRequest struct { Version string `json:"version"` ExpandedDirectory string `json:"expanded_directory"` @@ -533,7 +478,7 @@ func (c *Client) PatchLogs(ctx context.Context, req PatchLogs) error { return nil } -type PostLogSource struct { +type PostLogSourceRequest struct { // ID is a unique identifier for the log source. // It is scoped to a workspace agent, and can be statically // defined inside code to prevent duplicate sources from being @@ -543,7 +488,7 @@ type PostLogSource struct { Icon string `json:"icon"` } -func (c *Client) PostLogSource(ctx context.Context, req PostLogSource) (codersdk.WorkspaceAgentLogSource, error) { +func (c *Client) PostLogSource(ctx context.Context, req PostLogSourceRequest) (codersdk.WorkspaceAgentLogSource, error) { res, err := c.SDK.Request(ctx, http.MethodPost, "/api/v2/workspaceagents/me/log-source", req) if err != nil { return codersdk.WorkspaceAgentLogSource{}, err diff --git a/codersdk/agentsdk/convert.go b/codersdk/agentsdk/convert.go index adfabd1510768..fcd2dda414165 100644 --- a/codersdk/agentsdk/convert.go +++ b/codersdk/agentsdk/convert.go @@ -348,7 +348,7 @@ func ProtoFromLog(log Log) (*proto.Log, error) { } return &proto.Log{ CreatedAt: timestamppb.New(log.CreatedAt), - Output: log.Output, + Output: strings.ToValidUTF8(log.Output, "❌"), Level: proto.Log_Level(lvl), }, nil } @@ -371,3 +371,11 @@ func LifecycleStateFromProto(s proto.Lifecycle_State) (codersdk.WorkspaceAgentLi } return codersdk.WorkspaceAgentLifecycle(strings.ToLower(caps)), nil } + +func ProtoFromLifecycleState(s codersdk.WorkspaceAgentLifecycle) (proto.Lifecycle_State, error) { + caps, ok := proto.Lifecycle_State_value[strings.ToUpper(string(s))] + if !ok { + return 0, xerrors.Errorf("unknown lifecycle state: %s", s) + } + return proto.Lifecycle_State(caps), nil +} diff --git a/codersdk/agentsdk/logs_internal_test.go b/codersdk/agentsdk/logs_internal_test.go index d942689d31465..da2f0dd86dd38 100644 --- a/codersdk/agentsdk/logs_internal_test.go +++ b/codersdk/agentsdk/logs_internal_test.go @@ -231,6 +231,51 @@ func TestLogSender_SkipHugeLog(t *testing.T) { require.ErrorIs(t, err, context.Canceled) } +func TestLogSender_InvalidUTF8(t *testing.T) { + t.Parallel() + testCtx := testutil.Context(t, testutil.WaitShort) + ctx, cancel := context.WithCancel(testCtx) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fDest := newFakeLogDest() + uut := NewLogSender(logger) + + t0 := dbtime.Now() + ls1 := uuid.UUID{0x11} + + uut.Enqueue(ls1, + Log{ + CreatedAt: t0, + Output: "test log 0, src 1\xc3\x28", + Level: codersdk.LogLevelInfo, + }, + Log{ + CreatedAt: t0, + Output: "test log 1, src 1", + Level: codersdk.LogLevelInfo, + }) + + loopErr := make(chan error, 1) + go func() { + err := uut.SendLoop(ctx, fDest) + loopErr <- err + }() + + req := testutil.RequireRecvCtx(ctx, t, fDest.reqs) + require.NotNil(t, req) + require.Len(t, req.Logs, 2, "it should sanitize invalid UTF-8, but still send") + // the 0xc3, 0x28 is an invalid 2-byte sequence in UTF-8. The sanitizer replaces 0xc3 with ❌, and then + // interprets 0x28 as a 1-byte sequence "(" + require.Equal(t, "test log 0, src 1❌(", req.Logs[0].GetOutput()) + require.Equal(t, proto.Log_INFO, req.Logs[0].GetLevel()) + require.Equal(t, "test log 1, src 1", req.Logs[1].GetOutput()) + require.Equal(t, proto.Log_INFO, req.Logs[1].GetLevel()) + testutil.RequireSendCtx(ctx, t, fDest.resps, &proto.BatchCreateLogsResponse{}) + + cancel() + err := testutil.RequireRecvCtx(testCtx, t, loopErr) + require.ErrorIs(t, err, context.Canceled) +} + func TestLogSender_Batch(t *testing.T) { t.Parallel() testCtx := testutil.Context(t, testutil.WaitShort) diff --git a/codersdk/audit.go b/codersdk/audit.go index 553bd9cc2dbea..683db5406c13f 100644 --- a/codersdk/audit.go +++ b/codersdk/audit.go @@ -30,6 +30,8 @@ const ( ResourceTypeOAuth2ProviderApp ResourceType = "oauth2_provider_app" // nolint:gosec // This is not a secret. ResourceTypeOAuth2ProviderAppSecret ResourceType = "oauth2_provider_app_secret" + ResourceTypeCustomRole ResourceType = "custom_role" + ResourceTypeOrganizationMember = "organization_member" ) func (r ResourceType) FriendlyString() string { @@ -66,6 +68,10 @@ func (r ResourceType) FriendlyString() string { return "oauth2 app" case ResourceTypeOAuth2ProviderAppSecret: return "oauth2 app secret" + case ResourceTypeCustomRole: + return "custom role" + case ResourceTypeOrganizationMember: + return "organization member" default: return "unknown" } @@ -155,6 +161,7 @@ type CreateTestAuditLogRequest struct { AdditionalFields json.RawMessage `json:"additional_fields,omitempty"` Time time.Time `json:"time,omitempty" format:"date-time"` BuildReason BuildReason `json:"build_reason,omitempty" enums:"autostart,autostop,initiator"` + OrganizationID uuid.UUID `json:"organization_id,omitempty" format:"uuid"` } // AuditLogs retrieves audit logs from the given page. diff --git a/codersdk/deployment.go b/codersdk/deployment.go index c89a78668637d..7b13d083a4435 100644 --- a/codersdk/deployment.go +++ b/codersdk/deployment.go @@ -333,6 +333,7 @@ type OIDCConfig struct { Scopes serpent.StringArray `json:"scopes" typescript:",notnull"` IgnoreEmailVerified serpent.Bool `json:"ignore_email_verified" typescript:",notnull"` UsernameField serpent.String `json:"username_field" typescript:",notnull"` + NameField serpent.String `json:"name_field" typescript:",notnull"` EmailField serpent.String `json:"email_field" typescript:",notnull"` AuthURLParams serpent.Struct[map[string]string] `json:"auth_url_params" typescript:",notnull"` IgnoreUserInfo serpent.Bool `json:"ignore_user_info" typescript:",notnull"` @@ -392,7 +393,7 @@ type ExternalAuthConfig struct { AppInstallationsURL string `json:"app_installations_url" yaml:"app_installations_url"` NoRefresh bool `json:"no_refresh" yaml:"no_refresh"` Scopes []string `json:"scopes" yaml:"scopes"` - ExtraTokenKeys []string `json:"extra_token_keys" yaml:"extra_token_keys"` + ExtraTokenKeys []string `json:"-" yaml:"extra_token_keys"` DeviceFlow bool `json:"device_flow" yaml:"device_flow"` DeviceCodeURL string `json:"device_code_url" yaml:"device_code_url"` // Regex allows API requesters to match an auth config by @@ -1192,6 +1193,16 @@ when required by your organization's security policy.`, Group: &deploymentGroupOIDC, YAML: "usernameField", }, + { + Name: "OIDC Name Field", + Description: "OIDC claim field to use as the name.", + Flag: "oidc-name-field", + Env: "CODER_OIDC_NAME_FIELD", + Default: "name", + Value: &c.OIDC.NameField, + Group: &deploymentGroupOIDC, + YAML: "nameField", + }, { Name: "OIDC Email Field", Description: "OIDC claim field to use as the email.", @@ -2162,11 +2173,12 @@ type BuildInfoResponse struct { ExternalURL string `json:"external_url"` // Version returns the semantic version of the build. Version string `json:"version"` - // DashboardURL is the URL to hit the deployment's dashboard. // For external workspace proxies, this is the coderd they are connected // to. DashboardURL string `json:"dashboard_url"` + // Telemetry is a boolean that indicates whether telemetry is enabled. + Telemetry bool `json:"telemetry"` WorkspaceProxy bool `json:"workspace_proxy"` @@ -2222,6 +2234,7 @@ const ( ExperimentAutoFillParameters Experiment = "auto-fill-parameters" // This should not be taken out of experiments until we have redesigned the feature. ExperimentMultiOrganization Experiment = "multi-organization" // Requires organization context for interactions, default org is assumed. ExperimentCustomRoles Experiment = "custom-roles" // Allows creating runtime custom roles + ExperimentWorkspaceUsage Experiment = "workspace-usage" // Enables the new workspace usage tracking ) // ExperimentsAll should include all experiments that are safe for diff --git a/codersdk/groups.go b/codersdk/groups.go index eb76902b013b4..4b5b8f5a5f4e6 100644 --- a/codersdk/groups.go +++ b/codersdk/groups.go @@ -18,8 +18,8 @@ const ( ) type CreateGroupRequest struct { - Name string `json:"name"` - DisplayName string `json:"display_name"` + Name string `json:"name" validate:"required,group_name"` + DisplayName string `json:"display_name" validate:"omitempty,group_display_name"` AvatarURL string `json:"avatar_url"` QuotaAllowance int `json:"quota_allowance"` } @@ -111,8 +111,8 @@ func (c *Client) Group(ctx context.Context, group uuid.UUID) (Group, error) { type PatchGroupRequest struct { AddUsers []string `json:"add_users"` RemoveUsers []string `json:"remove_users"` - Name string `json:"name"` - DisplayName *string `json:"display_name"` + Name string `json:"name" validate:"omitempty,group_name"` + DisplayName *string `json:"display_name" validate:"omitempty,group_display_name"` AvatarURL *string `json:"avatar_url"` QuotaAllowance *int `json:"quota_allowance"` } diff --git a/codersdk/healthsdk/healthsdk.go b/codersdk/healthsdk/healthsdk.go index 8a00a8a3d63a6..007abff5e3277 100644 --- a/codersdk/healthsdk/healthsdk.go +++ b/codersdk/healthsdk/healthsdk.go @@ -105,8 +105,6 @@ type HealthcheckReport struct { Healthy bool `json:"healthy"` // Severity indicates the status of Coder health. Severity health.Severity `json:"severity" enums:"ok,warning,error"` - // FailingSections is a list of sections that have failed their healthcheck. - FailingSections []HealthSection `json:"failing_sections"` DERP DERPHealthReport `json:"derp"` AccessURL AccessURLReport `json:"access_url"` @@ -269,3 +267,9 @@ type WorkspaceProxyReport struct { BaseReport WorkspaceProxies codersdk.RegionsResponse[codersdk.WorkspaceProxy] `json:"workspace_proxies"` } + +// @typescript-ignore ClientNetcheckReport +type ClientNetcheckReport struct { + DERP DERPHealthReport `json:"derp"` + Interfaces InterfacesReport `json:"interfaces"` +} diff --git a/codersdk/healthsdk/interfaces.go b/codersdk/healthsdk/interfaces.go new file mode 100644 index 0000000000000..6f4365aaeefac --- /dev/null +++ b/codersdk/healthsdk/interfaces.go @@ -0,0 +1,80 @@ +package healthsdk + +import ( + "net" + + "tailscale.com/net/interfaces" + + "github.com/coder/coder/v2/coderd/healthcheck/health" +) + +// gVisor is nominally permitted to send packets up to 1280. +// Wireguard adds 30 bytes (1310) +// UDP adds 8 bytes (1318) +// IP adds 20-60 bytes (1338-1378) +// So, it really needs to be 1378 to be totally safe +const safeMTU = 1378 + +// @typescript-ignore InterfacesReport +type InterfacesReport struct { + BaseReport + Interfaces []Interface `json:"interfaces"` +} + +// @typescript-ignore Interface +type Interface struct { + Name string `json:"name"` + MTU int `json:"mtu"` + Addresses []string `json:"addresses"` +} + +func RunInterfacesReport() (InterfacesReport, error) { + st, err := interfaces.GetState() + if err != nil { + return InterfacesReport{}, err + } + return generateInterfacesReport(st), nil +} + +func generateInterfacesReport(st *interfaces.State) (report InterfacesReport) { + report.Severity = health.SeverityOK + for name, iface := range st.Interface { + // macOS has a ton of random interfaces, so to keep things helpful, let's filter out any + // that: + // + // - are not enabled + // - don't have any addresses + // - have only link-local addresses (e.g. fe80:...) + if (iface.Flags & net.FlagUp) == 0 { + continue + } + addrs := st.InterfaceIPs[name] + if len(addrs) == 0 { + continue + } + var r bool + healthIface := Interface{ + Name: iface.Name, + MTU: iface.MTU, + } + for _, addr := range addrs { + healthIface.Addresses = append(healthIface.Addresses, addr.String()) + if addr.Addr().IsLinkLocalUnicast() || addr.Addr().IsLinkLocalMulticast() { + continue + } + r = true + } + if !r { + continue + } + report.Interfaces = append(report.Interfaces, healthIface) + if iface.MTU < safeMTU { + report.Severity = health.SeverityWarning + report.Warnings = append(report.Warnings, + health.Messagef(health.CodeInterfaceSmallMTU, + "network interface %s has MTU %d (less than %d), which may cause problems with direct connections", iface.Name, iface.MTU, safeMTU), + ) + } + } + return report +} diff --git a/codersdk/healthsdk/interfaces_internal_test.go b/codersdk/healthsdk/interfaces_internal_test.go new file mode 100644 index 0000000000000..2996c6e1f09e3 --- /dev/null +++ b/codersdk/healthsdk/interfaces_internal_test.go @@ -0,0 +1,192 @@ +package healthsdk + +import ( + "net" + "net/netip" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + "tailscale.com/net/interfaces" + + "github.com/coder/coder/v2/coderd/healthcheck/health" +) + +func Test_generateInterfacesReport(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + state interfaces.State + severity health.Severity + expectedInterfaces []string + expectedWarnings []string + }{ + { + name: "Empty", + state: interfaces.State{}, + severity: health.SeverityOK, + expectedInterfaces: []string{}, + }, + { + name: "Normal", + state: interfaces.State{ + Interface: map[string]interfaces.Interface{ + "en0": {Interface: &net.Interface{ + MTU: 1500, + Name: "en0", + Flags: net.FlagUp, + }}, + "lo0": {Interface: &net.Interface{ + MTU: 65535, + Name: "lo0", + Flags: net.FlagUp, + }}, + }, + InterfaceIPs: map[string][]netip.Prefix{ + "en0": { + netip.MustParsePrefix("192.168.100.1/24"), + netip.MustParsePrefix("fe80::c13:1a92:3fa5:dd7e/64"), + }, + "lo0": { + netip.MustParsePrefix("127.0.0.1/8"), + netip.MustParsePrefix("::1/128"), + netip.MustParsePrefix("fe80::1/64"), + }, + }, + }, + severity: health.SeverityOK, + expectedInterfaces: []string{"en0", "lo0"}, + }, + { + name: "IgnoreDisabled", + state: interfaces.State{ + Interface: map[string]interfaces.Interface{ + "en0": {Interface: &net.Interface{ + MTU: 1300, + Name: "en0", + Flags: 0, + }}, + "lo0": {Interface: &net.Interface{ + MTU: 65535, + Name: "lo0", + Flags: net.FlagUp, + }}, + }, + InterfaceIPs: map[string][]netip.Prefix{ + "en0": {netip.MustParsePrefix("192.168.100.1/24")}, + "lo0": {netip.MustParsePrefix("127.0.0.1/8")}, + }, + }, + severity: health.SeverityOK, + expectedInterfaces: []string{"lo0"}, + }, + { + name: "IgnoreLinkLocalOnly", + state: interfaces.State{ + Interface: map[string]interfaces.Interface{ + "en0": {Interface: &net.Interface{ + MTU: 1300, + Name: "en0", + Flags: net.FlagUp, + }}, + "lo0": {Interface: &net.Interface{ + MTU: 65535, + Name: "lo0", + Flags: net.FlagUp, + }}, + }, + InterfaceIPs: map[string][]netip.Prefix{ + "en0": {netip.MustParsePrefix("fe80::1:1/64")}, + "lo0": {netip.MustParsePrefix("127.0.0.1/8")}, + }, + }, + severity: health.SeverityOK, + expectedInterfaces: []string{"lo0"}, + }, + { + name: "IgnoreNoAddress", + state: interfaces.State{ + Interface: map[string]interfaces.Interface{ + "en0": {Interface: &net.Interface{ + MTU: 1300, + Name: "en0", + Flags: net.FlagUp, + }}, + "lo0": {Interface: &net.Interface{ + MTU: 65535, + Name: "lo0", + Flags: net.FlagUp, + }}, + }, + InterfaceIPs: map[string][]netip.Prefix{ + "en0": {}, + "lo0": {netip.MustParsePrefix("127.0.0.1/8")}, + }, + }, + severity: health.SeverityOK, + expectedInterfaces: []string{"lo0"}, + }, + { + name: "SmallMTUTunnel", + state: interfaces.State{ + Interface: map[string]interfaces.Interface{ + "en0": {Interface: &net.Interface{ + MTU: 1500, + Name: "en0", + Flags: net.FlagUp, + }}, + "lo0": {Interface: &net.Interface{ + MTU: 65535, + Name: "lo0", + Flags: net.FlagUp, + }}, + "tun0": {Interface: &net.Interface{ + MTU: 1280, + Name: "tun0", + Flags: net.FlagUp, + }}, + }, + InterfaceIPs: map[string][]netip.Prefix{ + "en0": {netip.MustParsePrefix("192.168.100.1/24")}, + "tun0": {netip.MustParsePrefix("10.3.55.9/8")}, + "lo0": {netip.MustParsePrefix("127.0.0.1/8")}, + }, + }, + severity: health.SeverityWarning, + expectedInterfaces: []string{"en0", "lo0", "tun0"}, + expectedWarnings: []string{"tun0"}, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + r := generateInterfacesReport(&tc.state) + require.Equal(t, tc.severity, r.Severity) + gotInterfaces := []string{} + for _, i := range r.Interfaces { + gotInterfaces = append(gotInterfaces, i.Name) + } + slices.Sort(gotInterfaces) + slices.Sort(tc.expectedInterfaces) + require.Equal(t, tc.expectedInterfaces, gotInterfaces) + + require.Len(t, r.Warnings, len(tc.expectedWarnings), + "expected %d warnings, got %d", len(tc.expectedWarnings), len(r.Warnings)) + for _, name := range tc.expectedWarnings { + found := false + for _, w := range r.Warnings { + if strings.Contains(w.String(), name) { + found = true + break + } + } + if !found { + t.Errorf("missing warning for %s", name) + } + } + }) + } +} diff --git a/codersdk/organizations.go b/codersdk/organizations.go index 646eae71d2475..e494018258e48 100644 --- a/codersdk/organizations.go +++ b/codersdk/organizations.go @@ -40,27 +40,49 @@ func ProvisionerTypeValid[T ProvisionerType | string](pt T) error { // Organization is the JSON representation of a Coder organization. type Organization struct { - ID uuid.UUID `table:"id" json:"id" validate:"required" format:"uuid"` - Name string `table:"name,default_sort" json:"name" validate:"required"` - CreatedAt time.Time `table:"created_at" json:"created_at" validate:"required" format:"date-time"` - UpdatedAt time.Time `table:"updated_at" json:"updated_at" validate:"required" format:"date-time"` - IsDefault bool `table:"default" json:"is_default" validate:"required"` + ID uuid.UUID `table:"id" json:"id" validate:"required" format:"uuid"` + Name string `table:"name,default_sort" json:"name"` + DisplayName string `table:"display_name" json:"display_name"` + Description string `table:"description" json:"description"` + CreatedAt time.Time `table:"created_at" json:"created_at" validate:"required" format:"date-time"` + UpdatedAt time.Time `table:"updated_at" json:"updated_at" validate:"required" format:"date-time"` + IsDefault bool `table:"default" json:"is_default" validate:"required"` + Icon string `table:"icon" json:"icon"` +} + +func (o Organization) HumanName() string { + if o.DisplayName == "" { + return o.Name + } + return o.DisplayName } type OrganizationMember struct { - UserID uuid.UUID `db:"user_id" json:"user_id" format:"uuid"` - OrganizationID uuid.UUID `db:"organization_id" json:"organization_id" format:"uuid"` - CreatedAt time.Time `db:"created_at" json:"created_at" format:"date-time"` - UpdatedAt time.Time `db:"updated_at" json:"updated_at" format:"date-time"` - Roles []SlimRole `db:"roles" json:"roles"` + UserID uuid.UUID `table:"user id" json:"user_id" format:"uuid"` + OrganizationID uuid.UUID `table:"organization id" json:"organization_id" format:"uuid"` + CreatedAt time.Time `table:"created at" json:"created_at" format:"date-time"` + UpdatedAt time.Time `table:"updated at" json:"updated_at" format:"date-time"` + Roles []SlimRole `table:"organization_roles" json:"roles"` +} + +type OrganizationMemberWithName struct { + Username string `table:"username,default_sort" json:"username"` + OrganizationMember `table:"m,recursive_inline"` } type CreateOrganizationRequest struct { - Name string `json:"name" validate:"required,username"` + Name string `json:"name" validate:"required,organization_name"` + // DisplayName will default to the same value as `Name` if not provided. + DisplayName string `json:"display_name,omitempty" validate:"omitempty,organization_display_name"` + Description string `json:"description,omitempty"` + Icon string `json:"icon,omitempty"` } type UpdateOrganizationRequest struct { - Name string `json:"name" validate:"required,username"` + Name string `json:"name,omitempty" validate:"omitempty,organization_name"` + DisplayName string `json:"display_name,omitempty" validate:"omitempty,organization_display_name"` + Description *string `json:"description,omitempty"` + Icon *string `json:"icon,omitempty"` } // CreateTemplateVersionRequest enables callers to create a new Template Version. @@ -340,6 +362,25 @@ func (c *Client) TemplatesByOrganization(ctx context.Context, organizationID uui return templates, json.NewDecoder(res.Body).Decode(&templates) } +// Templates lists all viewable templates +func (c *Client) Templates(ctx context.Context) ([]Template, error) { + res, err := c.Request(ctx, http.MethodGet, + "/api/v2/templates", + nil, + ) + if err != nil { + return nil, xerrors.Errorf("execute request: %w", err) + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return nil, ReadBodyAsError(res) + } + + var templates []Template + return templates, json.NewDecoder(res.Body).Decode(&templates) +} + // TemplateByName finds a template inside the organization provided with a case-insensitive name. func (c *Client) TemplateByName(ctx context.Context, organizationID uuid.UUID, name string) (Template, error) { if name == "" { diff --git a/codersdk/rbacresources_gen.go b/codersdk/rbacresources_gen.go index 9c7d9cc485128..73d784b449535 100644 --- a/codersdk/rbacresources_gen.go +++ b/codersdk/rbacresources_gen.go @@ -48,3 +48,33 @@ const ( ActionWorkspaceStart RBACAction = "start" ActionWorkspaceStop RBACAction = "stop" ) + +// RBACResourceActions is the mapping of resources to which actions are valid for +// said resource type. +var RBACResourceActions = map[RBACResource][]RBACAction{ + ResourceWildcard: {}, + ResourceApiKey: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceAssignOrgRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead}, + ResourceAssignRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead}, + ResourceAuditLog: {ActionCreate, ActionRead}, + ResourceDebugInfo: {ActionRead}, + ResourceDeploymentConfig: {ActionRead, ActionUpdate}, + ResourceDeploymentStats: {ActionRead}, + ResourceFile: {ActionCreate, ActionRead}, + ResourceGroup: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceLicense: {ActionCreate, ActionDelete, ActionRead}, + ResourceOauth2App: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceOauth2AppCodeToken: {ActionCreate, ActionDelete, ActionRead}, + ResourceOauth2AppSecret: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceOrganization: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceOrganizationMember: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceProvisionerDaemon: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceReplicas: {ActionRead}, + ResourceSystem: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceTailnetCoordinator: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, + ResourceTemplate: {ActionCreate, ActionDelete, ActionRead, ActionUpdate, ActionViewInsights}, + ResourceUser: {ActionCreate, ActionDelete, ActionRead, ActionReadPersonal, ActionUpdate, ActionUpdatePersonal}, + ResourceWorkspace: {ActionApplicationConnect, ActionCreate, ActionDelete, ActionRead, ActionSSH, ActionWorkspaceStart, ActionWorkspaceStop, ActionUpdate}, + ResourceWorkspaceDormant: {ActionApplicationConnect, ActionCreate, ActionDelete, ActionRead, ActionSSH, ActionWorkspaceStart, ActionWorkspaceStop, ActionUpdate}, + ResourceWorkspaceProxy: {ActionCreate, ActionDelete, ActionRead, ActionUpdate}, +} diff --git a/codersdk/rbacroles.go b/codersdk/rbacroles.go new file mode 100644 index 0000000000000..fe90d98f77384 --- /dev/null +++ b/codersdk/rbacroles.go @@ -0,0 +1,13 @@ +package codersdk + +// Ideally this roles would be generated from the rbac/roles.go package. +const ( + RoleOwner string = "owner" + RoleMember string = "member" + RoleTemplateAdmin string = "template-admin" + RoleUserAdmin string = "user-admin" + RoleAuditor string = "auditor" + + RoleOrganizationAdmin string = "organization-admin" + RoleOrganizationMember string = "organization-member" +) diff --git a/codersdk/roles.go b/codersdk/roles.go index 8b119e935a6c6..7d1f007cc7463 100644 --- a/codersdk/roles.go +++ b/codersdk/roles.go @@ -14,8 +14,25 @@ import ( // and it would require extra db calls to fetch this information. The UI does // not need it, so most api calls will use this structure that omits information. type SlimRole struct { - Name string `json:"name"` - DisplayName string `json:"display_name"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + OrganizationID string `json:"organization_id,omitempty"` +} + +func (s SlimRole) String() string { + if s.DisplayName != "" { + return s.DisplayName + } + return s.Name +} + +// UniqueName concatenates the organization ID to create a globally unique +// string name for the role. +func (s SlimRole) UniqueName() string { + if s.OrganizationID != "" { + return s.Name + ":" + s.OrganizationID + } + return s.Name } type AssignableRoles struct { @@ -36,11 +53,11 @@ type Permission struct { // Role is a longer form of SlimRole used to edit custom roles. type Role struct { Name string `json:"name" table:"name,default_sort" validate:"username"` - OrganizationID string `json:"organization_id" table:"organization_id" format:"uuid"` + OrganizationID string `json:"organization_id,omitempty" table:"organization_id" format:"uuid"` DisplayName string `json:"display_name" table:"display_name"` SitePermissions []Permission `json:"site_permissions" table:"site_permissions"` // OrganizationPermissions are specific for the organization in the field 'OrganizationID' above. - OrganizationPermissions []Permission `json:"organization_permissions" table:"org_permissions"` + OrganizationPermissions []Permission `json:"organization_permissions" table:"organization_permissions"` UserPermissions []Permission `json:"user_permissions" table:"user_permissions"` } diff --git a/codersdk/users.go b/codersdk/users.go index 003ede2f9bd60..dd6779e3a0342 100644 --- a/codersdk/users.go +++ b/codersdk/users.go @@ -90,6 +90,7 @@ type LicensorTrialRequest struct { type CreateFirstUserRequest struct { Email string `json:"email" validate:"required,email"` Username string `json:"username" validate:"required,username"` + Name string `json:"name" validate:"user_real_name"` Password string `json:"password" validate:"required"` Trial bool `json:"trial"` TrialInfo CreateFirstUserTrialInfo `json:"trial_info"` @@ -114,6 +115,7 @@ type CreateFirstUserResponse struct { type CreateUserRequest struct { Email string `json:"email" validate:"required,email" format:"email"` Username string `json:"username" validate:"required,username"` + Name string `json:"name" validate:"user_real_name"` Password string `json:"password"` // UserLoginType defaults to LoginTypePassword. UserLoginType LoginType `json:"login_type"` @@ -379,6 +381,47 @@ func (c *Client) UpdateUserPassword(ctx context.Context, user string, req Update return nil } +// PostOrganizationMember adds a user to an organization +func (c *Client) PostOrganizationMember(ctx context.Context, organizationID uuid.UUID, user string) (OrganizationMember, error) { + res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/organizations/%s/members/%s", organizationID, user), nil) + if err != nil { + return OrganizationMember{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return OrganizationMember{}, ReadBodyAsError(res) + } + var member OrganizationMember + return member, json.NewDecoder(res.Body).Decode(&member) +} + +// DeleteOrganizationMember removes a user from an organization +func (c *Client) DeleteOrganizationMember(ctx context.Context, organizationID uuid.UUID, user string) error { + res, err := c.Request(ctx, http.MethodDelete, fmt.Sprintf("/api/v2/organizations/%s/members/%s", organizationID, user), nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return ReadBodyAsError(res) + } + return nil +} + +// OrganizationMembers lists all members in an organization +func (c *Client) OrganizationMembers(ctx context.Context, organizationID uuid.UUID) ([]OrganizationMemberWithName, error) { + res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/organizations/%s/members/", organizationID), nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, ReadBodyAsError(res) + } + var members []OrganizationMemberWithName + return members, json.NewDecoder(res.Body).Decode(&members) +} + // UpdateUserRoles grants the userID the specified roles. // Include ALL roles the user has. func (c *Client) UpdateUserRoles(ctx context.Context, user string, req UpdateRoles) (User, error) { diff --git a/codersdk/workspaces.go b/codersdk/workspaces.go index 0007e85de8ee4..69472f8d4579d 100644 --- a/codersdk/workspaces.go +++ b/codersdk/workspaces.go @@ -316,7 +316,43 @@ func (c *Client) PutExtendWorkspace(ctx context.Context, id uuid.UUID, req PutEx return nil } +type PostWorkspaceUsageRequest struct { + AgentID uuid.UUID `json:"agent_id" format:"uuid"` + AppName UsageAppName `json:"app_name"` +} + +type UsageAppName string + +const ( + UsageAppNameVscode UsageAppName = "vscode" + UsageAppNameJetbrains UsageAppName = "jetbrains" + UsageAppNameReconnectingPty UsageAppName = "reconnecting-pty" + UsageAppNameSSH UsageAppName = "ssh" +) + +var AllowedAppNames = []UsageAppName{ + UsageAppNameVscode, + UsageAppNameJetbrains, + UsageAppNameReconnectingPty, + UsageAppNameSSH, +} + +// PostWorkspaceUsage marks the workspace as having been used recently and records an app stat. +func (c *Client) PostWorkspaceUsageWithBody(ctx context.Context, id uuid.UUID, req PostWorkspaceUsageRequest) error { + path := fmt.Sprintf("/api/v2/workspaces/%s/usage", id.String()) + res, err := c.Request(ctx, http.MethodPost, path, req) + if err != nil { + return xerrors.Errorf("post workspace usage: %w", err) + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + return ReadBodyAsError(res) + } + return nil +} + // PostWorkspaceUsage marks the workspace as having been used recently. +// Deprecated: use PostWorkspaceUsageWithBody instead func (c *Client) PostWorkspaceUsage(ctx context.Context, id uuid.UUID) error { path := fmt.Sprintf("/api/v2/workspaces/%s/usage", id.String()) res, err := c.Request(ctx, http.MethodPost, path, nil) @@ -330,14 +366,52 @@ func (c *Client) PostWorkspaceUsage(ctx context.Context, id uuid.UUID) error { return nil } +// UpdateWorkspaceUsageWithBodyContext periodically posts workspace usage for the workspace +// with the given id and app name in the background. +// The caller is responsible for calling the returned function to stop the background +// process. +func (c *Client) UpdateWorkspaceUsageWithBodyContext(ctx context.Context, workspaceID uuid.UUID, req PostWorkspaceUsageRequest) func() { + hbCtx, hbCancel := context.WithCancel(ctx) + // Perform one initial update + err := c.PostWorkspaceUsageWithBody(hbCtx, workspaceID, req) + if err != nil { + c.logger.Warn(ctx, "failed to post workspace usage", slog.Error(err)) + } + ticker := time.NewTicker(time.Minute) + doneCh := make(chan struct{}) + go func() { + defer func() { + ticker.Stop() + close(doneCh) + }() + for { + select { + case <-ticker.C: + err := c.PostWorkspaceUsageWithBody(hbCtx, workspaceID, req) + if err != nil { + c.logger.Warn(ctx, "failed to post workspace usage in background", slog.Error(err)) + } + case <-hbCtx.Done(): + return + } + } + }() + return func() { + hbCancel() + <-doneCh + } +} + // UpdateWorkspaceUsageContext periodically posts workspace usage for the workspace // with the given id in the background. // The caller is responsible for calling the returned function to stop the background // process. -func (c *Client) UpdateWorkspaceUsageContext(ctx context.Context, id uuid.UUID) func() { +// Deprecated: use UpdateWorkspaceUsageContextWithBody instead +func (c *Client) UpdateWorkspaceUsageContext(ctx context.Context, workspaceID uuid.UUID) func() { hbCtx, hbCancel := context.WithCancel(ctx) // Perform one initial update - if err := c.PostWorkspaceUsage(hbCtx, id); err != nil { + err := c.PostWorkspaceUsage(hbCtx, workspaceID) + if err != nil { c.logger.Warn(ctx, "failed to post workspace usage", slog.Error(err)) } ticker := time.NewTicker(time.Minute) @@ -350,7 +424,8 @@ func (c *Client) UpdateWorkspaceUsageContext(ctx context.Context, id uuid.UUID) for { select { case <-ticker.C: - if err := c.PostWorkspaceUsage(hbCtx, id); err != nil { + err := c.PostWorkspaceUsage(hbCtx, workspaceID) + if err != nil { c.logger.Warn(ctx, "failed to post workspace usage in background", slog.Error(err)) } case <-hbCtx.Done(): diff --git a/codersdk/workspacesdk/connector.go b/codersdk/workspacesdk/connector.go index d6349adaf6b40..5ac009af15091 100644 --- a/codersdk/workspacesdk/connector.go +++ b/codersdk/workspacesdk/connector.go @@ -3,8 +3,10 @@ package workspacesdk import ( "context" "errors" + "fmt" "io" "net/http" + "slices" "sync" "time" @@ -14,6 +16,7 @@ import ( "tailscale.com/tailcfg" "cdr.dev/slog" + "github.com/coder/coder/v2/buildinfo" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/tailnet" "github.com/coder/coder/v2/tailnet/proto" @@ -101,6 +104,9 @@ func (tac *tailnetAPIConnector) run() { defer close(tac.closed) for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(tac.ctx); { tailnetClient, err := tac.dial() + if xerrors.Is(err, &codersdk.Error{}) { + return + } if err != nil { continue } @@ -110,13 +116,29 @@ func (tac *tailnetAPIConnector) run() { } } +var permanentErrorStatuses = []int{ + http.StatusConflict, // returned if client/agent connections disabled (browser only) + http.StatusBadRequest, // returned if API mismatch + http.StatusNotFound, // returned if user doesn't have permission or agent doesn't exist +} + func (tac *tailnetAPIConnector) dial() (proto.DRPCTailnetClient, error) { tac.logger.Debug(tac.ctx, "dialing Coder tailnet v2+ API") // nolint:bodyclose ws, res, err := websocket.Dial(tac.ctx, tac.coordinateURL, tac.dialOptions) if tac.isFirst { - if res != nil && res.StatusCode == http.StatusConflict { + if res != nil && slices.Contains(permanentErrorStatuses, res.StatusCode) { err = codersdk.ReadBodyAsError(res) + // A bit more human-readable help in the case the API version was rejected + var sdkErr *codersdk.Error + if xerrors.As(err, &sdkErr) { + if sdkErr.Message == AgentAPIMismatchMessage && + sdkErr.StatusCode() == http.StatusBadRequest { + sdkErr.Helper = fmt.Sprintf( + "Ensure your client release version (%s, different than the API version) matches the server release version", + buildinfo.Version()) + } + } tac.connected <- err return nil, err } diff --git a/codersdk/workspacesdk/connector_internal_test.go b/codersdk/workspacesdk/connector_internal_test.go index 06ff3e2c668df..c7fc036ffa2a1 100644 --- a/codersdk/workspacesdk/connector_internal_test.go +++ b/codersdk/workspacesdk/connector_internal_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/google/uuid" + "github.com/hashicorp/yamux" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "nhooyr.io/websocket" @@ -17,6 +18,8 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/apiversion" + "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/tailnet" "github.com/coder/coder/v2/tailnet/proto" @@ -34,8 +37,10 @@ func TestTailnetAPIConnector_Disconnects(t *testing.T) { testCtx := testutil.Context(t, testutil.WaitShort) ctx, cancel := context.WithCancel(testCtx) logger := slogtest.Make(t, &slogtest.Options{ - // we get EOF when we simulate a DERPMap error - IgnoredErrorIs: append(slogtest.DefaultIgnoredErrorIs, io.EOF), + IgnoredErrorIs: append(slogtest.DefaultIgnoredErrorIs, + io.EOF, // we get EOF when we simulate a DERPMap error + yamux.ErrSessionShutdown, // coordination can throw these when DERP error tears down session + ), }).Leveled(slog.LevelDebug) agentID := uuid.UUID{0x55} clientID := uuid.UUID{0x66} @@ -94,6 +99,41 @@ func TestTailnetAPIConnector_Disconnects(t *testing.T) { require.NotNil(t, reqDisc.Disconnect) } +func TestTailnetAPIConnector_UplevelVersion(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + agentID := uuid.UUID{0x55} + + svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + sVer := apiversion.New(proto.CurrentMajor, proto.CurrentMinor-1) + + // the following matches what Coderd does; + // c.f. coderd/workspaceagents.go: workspaceAgentClientCoordinate + cVer := r.URL.Query().Get("version") + if err := sVer.Validate(cVer); err != nil { + httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{ + Message: AgentAPIMismatchMessage, + Validations: []codersdk.ValidationError{ + {Field: "version", Detail: err.Error()}, + }, + }) + return + } + })) + + fConn := newFakeTailnetConn() + + uut := runTailnetAPIConnector(ctx, logger, agentID, svr.URL, &websocket.DialOptions{}, fConn) + + err := testutil.RequireRecvCtx(ctx, t, uut.connected) + var sdkErr *codersdk.Error + require.ErrorAs(t, err, &sdkErr) + require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode()) + require.Equal(t, AgentAPIMismatchMessage, sdkErr.Message) + require.NotEmpty(t, sdkErr.Helper) +} + type fakeTailnetConn struct{} func (*fakeTailnetConn) UpdatePeers([]*proto.CoordinateResponse_PeerUpdate) error { diff --git a/codersdk/workspacesdk/workspacesdk.go b/codersdk/workspacesdk/workspacesdk.go index f1e3bd67ea3dc..04765c13d9877 100644 --- a/codersdk/workspacesdk/workspacesdk.go +++ b/codersdk/workspacesdk/workspacesdk.go @@ -21,7 +21,6 @@ import ( "cdr.dev/slog" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/tailnet" - "github.com/coder/coder/v2/tailnet/proto" ) // AgentIP is a static IPv6 address with the Tailscale prefix that is used to route @@ -55,6 +54,8 @@ const ( AgentMinimumListeningPort = 9 ) +const AgentAPIMismatchMessage = "Unknown or unsupported API version" + // AgentIgnoredListeningPorts contains a list of ports to ignore when looking for // running applications inside a workspace. We want to ignore non-HTTP servers, // so we pre-populate this list with common ports that are not HTTP servers. @@ -239,7 +240,15 @@ func (c *Client) DialAgent(dialCtx context.Context, agentID uuid.UUID, options * return nil, xerrors.Errorf("parse url: %w", err) } q := coordinateURL.Query() - q.Add("version", proto.CurrentVersion.String()) + // TODO (ethanndickson) - the current version includes 2 additions we don't currently use: + // + // 2.1 GetAnnouncementBanners on the Agent API (version locked to Tailnet API) + // 2.2 PostTelemetry on the Tailnet API + // + // So, asking for API 2.2 just makes us incompatible back level servers, for no real benefit. + // As a temporary measure, we'll specifically ask for API version 2.0 until we implement sending + // telemetry. + q.Add("version", "2.0") coordinateURL.RawQuery = q.Encode() connector := runTailnetAPIConnector(ctx, options.Logger, diff --git a/docker-compose.yaml b/docker-compose.yaml index 9b41c5f47ae61..58692aa73e1f1 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -25,7 +25,9 @@ services: database: condition: service_healthy database: - image: "postgres:14.2" + # Minimum supported version is 13. + # More versions here: https://hub.docker.com/_/postgres + image: "postgres:16" ports: - "5432:5432" environment: diff --git a/docs/about/screenshots.md b/docs/about/screenshots.md new file mode 100644 index 0000000000000..608e92e42ee5e --- /dev/null +++ b/docs/about/screenshots.md @@ -0,0 +1,59 @@ +# Screenshots + +## Log in + +![Install Coder in your cloud or air-gapped on-premises. Developers simply log in via their browser to access their Workspaces.](../images/screenshots/login.png) + +Install Coder in your cloud or air-gapped on-premises. Developers simply log in +via their browser to access their Workspaces. + +## Templates + +![Developers provision their own ephemeral Workspaces in minutes using pre-defined Templates that include approved tooling and infrastructure.](../images/screenshots/templates_listing.png) + +Developers provision their own ephemeral Workspaces in minutes using pre-defined +Templates that include approved tooling and infrastructure. + +![Template administrators can either create a new Template from scratch or choose a Starter Template](../images/screenshots/starter_templates.png) + +Template administrators can either create a new Template from scratch or choose +a Starter Template. + +![Templates define the underlying infrastructure that Coder Workspaces run on.](../images/screenshots/terraform.png) + +Template administrators build Templates using Terraform. Templates define the +underlying infrastructure that Coder Workspaces run on. + +## Workspaces + +![Developers create and delete their own workspaces. Coder administrators can easily enforce Workspace scheduling and autostop policies to ensure idle Workspaces don’t burn unnecessary cloud budget.](../images/screenshots/workspaces_listing.png) + +Developers create and delete their own workspaces. Coder administrators can +easily enforce Workspace scheduling and autostop policies to ensure idle +Workspaces don’t burn unnecessary cloud budget. + +![Developers launch their favorite web-based or desktop IDE, browse files, or access their Workspace’s Terminal.](../images/screenshots/workspace_launch.png) + +Developers launch their favorite web-based or desktop IDE, browse files, or +access their Workspace’s Terminal. + +## Administration + +![Coder administrators can access Template usage insights to understand which Templates are most popular and how well they perform for developers.](../images/screenshots/templates_insights.png) + +Coder administrators can access Template usage insights to understand which +Templates are most popular and how well they perform for developers. + +![Coder administrators can control *every* aspect of their Coder deployment.](../images/screenshots/settings.png) + +Coder administrators can control _every_ aspect of their Coder deployment. + +![Coder administrators and auditor roles can review how users are interacting with their Coder Workspaces and Templates.](../images/screenshots/audit.png) + +Coder administrators and auditor roles can review how users are interacting with +their Coder Workspaces and Templates. + +![Coder administrators can monitor the health of their Coder deployment, including database latency, active provisioners, and more.](../images/screenshots/healthcheck.png) + +Coder administrators can monitor the health of their Coder deployment, including +database latency, active provisioners, and more. diff --git a/docs/admin/appearance.md b/docs/admin/appearance.md index 51710855a80fb..edfd144834254 100644 --- a/docs/admin/appearance.md +++ b/docs/admin/appearance.md @@ -18,16 +18,17 @@ is Coder. Specify a custom URL for your enterprise's logo to be displayed on the sign in page and in the top left corner of the dashboard. The default is the Coder logo. -## Service Banner +## Announcement Banners -![service banner](../images/admin/service-banner-config.png) +![service banner](../images/admin/announcement_banner_settings.png) -A Service Banner lets admins post important messages to all site users. Only -Site Owners may set the service banner. +Announcement Banners let admins post important messages to all site users. Only +Site Owners may set the announcement banners. -Example: Notify users of scheduled maintenance of the Coder deployment. +Example: Use multiple announcement banners for concurrent deployment-wide +updates, such as maintenance or new feature rollout. -![service banner maintenance](../images/admin/service-banner-maintenance.png) +![Multiple announcements](../images/admin/multiple-banners.PNG) Example: Adhere to government network classification requirements and notify users of which network their Coder deployment is on. diff --git a/docs/about/architecture.md b/docs/admin/architectures/architecture.md similarity index 93% rename from docs/about/architecture.md rename to docs/admin/architectures/architecture.md index af826ef784145..318e8e7d5356a 100644 --- a/docs/about/architecture.md +++ b/docs/admin/architectures/architecture.md @@ -4,9 +4,6 @@ The Coder deployment model is flexible and offers various components that platform administrators can deploy and scale depending on their use case. This page describes possible deployments, challenges, and risks associated with them. -Learn more about our [Reference Architectures](../admin/architectures/index.md) -and platform scaling capabilities. - ## Primary components ### coderd @@ -29,7 +26,7 @@ _provisionerd_ is the execution context for infrastructure modifying providers. At the moment, the only provider is Terraform (running `terraform`). By default, the Coder server runs multiple provisioner daemons. -[External provisioners](../admin/provisioners.md) can be added for security or +[External provisioners](../provisioners.md) can be added for security or scalability purposes. ### Agents @@ -46,7 +43,7 @@ It offers the following services along with much more: - `startup_script` automation Templates are responsible for -[creating and running agents](../templates/index.md#coder-agent) within +[creating and running agents](../../templates/index.md#coder-agent) within workspaces. ### Service Bundling @@ -76,7 +73,7 @@ they're destroyed on workspace stop. ### Single region architecture -![Architecture Diagram](../images/architecture-single-region.png) +![Architecture Diagram](../../images/architecture-single-region.png) #### Components @@ -121,11 +118,11 @@ and _Coder workspaces_ deployed in the same region. - Integrate with existing Single Sign-On (SSO) solutions used within the organization via the supported OAuth 2.0 or OpenID Connect standards. -- Learn more about [Authentication in Coder](../admin/auth.md). +- Learn more about [Authentication in Coder](../auth.md). ### Multi-region architecture -![Architecture Diagram](../images/architecture-multi-region.png) +![Architecture Diagram](../../images/architecture-multi-region.png) #### Components @@ -171,7 +168,7 @@ disruptions. Additionally, multi-cloud deployment enables organizations to leverage the unique features and capabilities offered by each cloud provider, such as region availability and pricing models. -![Architecture Diagram](../images/architecture-multi-cloud.png) +![Architecture Diagram](../../images/architecture-multi-cloud.png) #### Components @@ -205,7 +202,7 @@ nearest region and technical specifications provided by the cloud providers. **Workspace proxy** - _Security recommendation_: Use `coder` CLI to create - [authentication tokens for every workspace proxy](../admin/workspace-proxies.md#requirements), + [authentication tokens for every workspace proxy](../workspace-proxies.md#requirements), and keep them in regional secret stores. Remember to distribute them using safe, encrypted communication channel. @@ -226,8 +223,8 @@ nearest region and technical specifications provided by the cloud providers. See how to deploy [Coder on Azure Kubernetes Service](https://github.com/ericpaulsen/coder-aks). -Learn more about [security requirements](../install/kubernetes.md) for deploying -Coder on Kubernetes. +Learn more about [security requirements](../../install/kubernetes.md) for +deploying Coder on Kubernetes. **Load balancer** @@ -286,9 +283,9 @@ The key features of the air-gapped architecture include: - _Secure data transfer_: Enable encrypted communication channels and robust access controls to safeguard sensitive information. -Learn more about [offline deployments](../install/offline.md) of Coder. +Learn more about [offline deployments](../../install/offline.md) of Coder. -![Architecture Diagram](../images/architecture-air-gapped.png) +![Architecture Diagram](../../images/architecture-air-gapped.png) #### Components @@ -330,8 +327,8 @@ across multiple regions and diverse cloud platforms. - Since the _Registry_ is isolated from the internet, platform engineers are responsible for maintaining Workspace container images and conducting periodic updates of base Docker images. -- It is recommended to keep [Dev Containers](../templates/dev-containers.md) up - to date with the latest released +- It is recommended to keep [Dev Containers](../../templates/dev-containers.md) + up to date with the latest released [Envbuilder](https://github.com/coder/envbuilder) runtime. **Mirror of Terraform Registry** @@ -363,7 +360,7 @@ Learn more about [Dev containers support](https://coder.com/docs/v2/latest/templates/dev-containers) in Coder. -![Architecture Diagram](../images/architecture-devcontainers.png) +![Architecture Diagram](../../images/architecture-devcontainers.png) #### Components diff --git a/docs/admin/architectures/validated-arch.md b/docs/admin/architectures/validated-arch.md new file mode 100644 index 0000000000000..ffb5a1e919ad7 --- /dev/null +++ b/docs/admin/architectures/validated-arch.md @@ -0,0 +1,363 @@ +# Coder Validated Architecture + +Many customers operate Coder in complex organizational environments, consisting +of multiple business units, agencies, and/or subsidiaries. This can lead to +numerous Coder deployments, due to discrepancies in regulatory compliance, data +sovereignty, and level of funding across groups. The Coder Validated +Architecture (CVA) prescribes a Kubernetes-based deployment approach, enabling +your organization to deploy a stable Coder instance that is easier to maintain +and troubleshoot. + +The following sections will detail the components of the Coder Validated +Architecture, provide guidance on how to configure and deploy these components, +and offer insights into how to maintain and troubleshoot your Coder environment. + +- [General concepts](#general-concepts) +- [Kubernetes Infrastructure](#kubernetes-infrastructure) +- [PostgreSQL Database](#postgresql-database) +- [Operational readiness](#operational-readiness) + +## Who is this document for? + +This guide targets the following personas. It assumes a basic understanding of +cloud/on-premise computing, containerization, and the Coder platform. + +| Role | Description | +| ------------------------- | ------------------------------------------------------------------------------ | +| Platform Engineers | Responsible for deploying, operating the Coder deployment and infrastructure | +| Enterprise Architects | Responsible for architecting Coder deployments to meet enterprise requirements | +| Managed Service Providers | Entities that deploy and run Coder software as a service for customers | + +## CVA Guidance + +| CVA provides: | CVA does not provide: | +| ---------------------------------------------- | ---------------------------------------------------------------------------------------- | +| Single and multi-region K8s deployment options | Prescribing OS, or cloud vs. on-premise | +| Reference architectures for up to 3,000 users | An approval of your architecture; the CVA solely provides recommendations and guidelines | +| Best practices for building a Coder deployment | Recommendations for every possible deployment scenario | + +> For higher level design principles and architectural best practices, see +> Coder's +> [Well-Architected Framework](https://coder.com/blog/coder-well-architected-framework). + +## General concepts + +This section outlines core concepts and terminology essential for understanding +Coder's architecture and deployment strategies. + +### Administrator + +An administrator is a user role within the Coder platform with elevated +privileges. Admins have access to administrative functions such as user +management, template definitions, insights, and deployment configuration. + +### Coder control plane + +Coder's control plane, also known as _coderd_, is the main service recommended +for deployment with multiple replicas to ensure high availability. It provides +an API for managing workspaces and templates, and serves the dashboard UI. In +addition, each _coderd_ replica hosts 3 Terraform [provisioners](#provisioner) +by default. + +### User + +A [user](../users.md) is an individual who utilizes the Coder platform to +develop, test, and deploy applications using workspaces. Users can select +available templates to provision workspaces. They interact with Coder using the +web interface, the CLI tool, or directly calling API methods. + +### Workspace + +A [workspace](../../workspaces.md) refers to an isolated development environment +where users can write, build, and run code. Workspaces are fully configurable +and can be tailored to specific project requirements, providing developers with +a consistent and efficient development environment. Workspaces can be +autostarted and autostopped, enabling efficient resource management. + +Users can connect to workspaces using SSH or via workspace applications like +`code-server`, facilitating collaboration and remote access. Additionally, +workspaces can be parameterized, allowing users to customize settings and +configurations based on their unique needs. Workspaces are instantiated using +Coder templates and deployed on resources created by provisioners. + +### Template + +A [template](../../templates/index.md) in Coder is a predefined configuration +for creating workspaces. Templates streamline the process of workspace creation +by providing pre-configured settings, tooling, and dependencies. They are built +by template administrators on top of Terraform, allowing for efficient +management of infrastructure resources. Additionally, templates can utilize +Coder modules to leverage existing features shared with other templates, +enhancing flexibility and consistency across deployments. Templates describe +provisioning rules for infrastructure resources offered by Terraform providers. + +### Workspace Proxy + +A [workspace proxy](../workspace-proxies.md) serves as a relay connection option +for developers connecting to their workspace over SSH, a workspace app, or +through port forwarding. It helps reduce network latency for geo-distributed +teams by minimizing the distance network traffic needs to travel. Notably, +workspace proxies do not handle dashboard connections or API calls. + +### Provisioner + +Provisioners in Coder execute Terraform during workspace and template builds. +While the platform includes built-in provisioner daemons by default, there are +advantages to employing external provisioners. These external daemons provide +secure build environments and reduce server load, improving performance and +scalability. Each provisioner can handle a single concurrent workspace build, +allowing for efficient resource allocation and workload management. + +### Registry + +The [Coder Registry](https://registry.coder.com) is a platform where you can +find starter templates and _Modules_ for various cloud services and platforms. + +Templates help create self-service development environments using +Terraform-defined infrastructure, while _Modules_ simplify template creation by +providing common features like workspace applications, third-party integrations, +or helper scripts. + +Please note that the Registry is a hosted service and isn't available for +offline use. + +## Kubernetes Infrastructure + +Kubernetes is the recommended, and supported platform for deploying Coder in the +enterprise. It is the hosting platform of choice for a large majority of Coder's +Fortune 500 customers, and it is the platform in which we build and test against +here at Coder. + +### General recommendations + +In general, it is recommended to deploy Coder into its own respective cluster, +separate from production applications. Keep in mind that Coder runs development +workloads, so the cluster should be deployed as such, without production-level +configurations. + +### Compute + +Deploy your Kubernetes cluster with two node groups, one for Coder's control +plane, and another for user workspaces (if you intend on leveraging K8s for +end-user compute). + +#### Control plane nodes + +The Coder control plane node group must be static, to prevent scale down events +from dropping pods, and thus dropping user connections to the dashboard UI and +their workspaces. + +Coder's Helm Chart supports +[defining nodeSelectors, affinities, and tolerations](https://github.com/coder/coder/blob/e96652ebbcdd7554977594286b32015115c3f5b6/helm/coder/values.yaml#L221-L249) +to schedule the control plane pods on the appropriate node group. + +#### Workspace nodes + +Coder workspaces can be deployed either as Pods or Deployments in Kubernetes. +See our +[example Kubernetes workspace template](https://github.com/coder/coder/tree/main/examples/templates/kubernetes). +Configure the workspace node group to be auto-scaling, to dynamically allocate +compute as users start/stop workspaces at the beginning and end of their day. +Set nodeSelectors, affinities, and tolerations in Coder templates to assign +workspaces to the given node group: + +```hcl +resource "kubernetes_deployment" "coder" { + spec { + template { + metadata { + labels = { + app = "coder-workspace" + } + } + + spec { + affinity { + pod_anti_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 1 + pod_affinity_term { + label_selector { + match_expressions { + key = "app.kubernetes.io/instance" + operator = "In" + values = ["coder-workspace"] + } + } + topology_key = # add your node group label here + } + } + } + } + + tolerations { + # Add your tolerations here + } + + node_selector { + # Add your node selectors here + } + + container { + image = "coder-workspace:latest" + name = "dev" + } + } + } + } +} +``` + +#### Node sizing + +For sizing recommendations, see the below reference architectures: + +- [Up to 1,000 users](1k-users.md) + +- [Up to 2,000 users](2k-users.md) + +- [Up to 3,000 users](3k-users.md) + +### Networking + +It is likely your enterprise deploys Kubernetes clusters with various networking +restrictions. With this in mind, Coder requires the following connectivity: + +- Egress from workspace compute to the Coder control plane pods +- Egress from control plane pods to Coder's PostgreSQL database +- Egress from control plane pods to git and package repositories +- Ingress from user devices to the control plane Load Balancer or Ingress + controller + +We recommend configuring your network policies in accordance with the above. +Note that Coder workspaces do not require any ports to be open. + +### Storage + +If running Coder workspaces as Kubernetes Pods or Deployments, you will need to +assign persistent storage. We recommend leveraging a +[supported Container Storage Interface (CSI) driver](https://kubernetes-csi.github.io/docs/drivers.html) +in your cluster, with Dynamic Provisioning and read/write, to provide on-demand +storage to end-user workspaces. + +The following Kubernetes volume types have been validated by Coder internally, +and/or by our customers: + +- [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) +- [NFS](https://kubernetes.io/docs/concepts/storage/volumes/#nfs) +- [subPath](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) +- [cephfs](https://kubernetes.io/docs/concepts/storage/volumes/#cephfs) + +Our +[example Kubernetes workspace template](https://github.com/coder/coder/blob/5b9a65e5c137232351381fc337d9784bc9aeecfc/examples/templates/kubernetes/main.tf#L191-L219) +provisions a PersistentVolumeClaim block storage device, attached to the +Deployment. + +It is not recommended to mount volumes from the host node(s) into workspaces, +for security and reliability purposes. The below volume types are _not_ +recommended for use with Coder: + +- [Local](https://kubernetes.io/docs/concepts/storage/volumes/#local) +- [hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) + +Not that Coder's control plane filesystem is ephemeral, so no persistent storage +is required. + +## PostgreSQL database + +Coder requires access to an external PostgreSQL database to store user data, +workspace state, template files, and more. Depending on the scale of the +user-base, workspace activity, and High Availability requirements, the amount of +CPU and memory resources required by Coder's database may differ. + +### Disaster recovery + +Prepare internal scripts for dumping and restoring your database. We recommend +scheduling regular database backups, especially before upgrading Coder to a new +release. Coder does not support downgrades without initially restoring the +database to the prior version. + +### Performance efficiency + +We highly recommend deploying the PostgreSQL instance in the same region (and if +possible, same availability zone) as the Coder server to optimize for low +latency connections. We recommend keeping latency under 10ms between the Coder +server and database. + +When determining scaling requirements, take into account the following +considerations: + +- `2 vCPU x 8 GB RAM x 512 GB storage`: A baseline for database requirements for + Coder deployment with less than 1000 users, and low activity level (30% active + users). This capacity should be sufficient to support 100 external + provisioners. +- Storage size depends on user activity, workspace builds, log verbosity, + overhead on database encryption, etc. +- Allocate two additional CPU core to the database instance for every 1000 + active users. +- Enable High Availability mode for database engine for large scale deployments. + +If you enable [database encryption](../encryption.md) in Coder, consider +allocating an additional CPU core to every `coderd` replica. + +#### Resource utilization guidelines + +Below are general recommendations for sizing your PostgreSQL instance: + +- Increase number of vCPU if CPU utilization or database latency is high. +- Allocate extra memory if database performance is poor, CPU utilization is low, + and memory utilization is high. +- Utilize faster disk options (higher IOPS) such as SSDs or NVMe drives for + optimal performance enhancement and possibly reduce database load. + +## Operational readiness + +Operational readiness in Coder is about ensuring that everything is set up +correctly before launching a platform into production. It involves making sure +that the service is reliable, secure, and easily scales accordingly to user-base +needs. Operational readiness is crucial because it helps prevent issues that +could affect workspace users experience once the platform is live. + +### Helm Chart Configuration + +1. Reference our [Helm chart values file](../../../helm/coder/values.yaml) and + identify the required values for deployment. +1. Create a `values.yaml` and add it to your version control system. +1. Determine the necessary environment variables. Here is the + [full list of supported server environment variables](../../cli/server.md). +1. Follow our documented + [steps for installing Coder via Helm](../../install/kubernetes.md). + +### Template configuration + +1. Establish dedicated accounts for users with the _Template Administrator_ + role. +1. Maintain Coder templates using + [version control](../../templates/change-management.md). +1. Consider implementing a GitOps workflow to automatically push new template + versions into Coder from git. For example, on Github, you can use the + [Update Coder Template](https://github.com/marketplace/actions/update-coder-template) + action. +1. Evaluate enabling + [automatic template updates](../../templates/general-settings.md#require-automatic-updates-enterprise) + upon workspace startup. + +### Observability + +1. Enable the Prometheus endpoint (environment variable: + `CODER_PROMETHEUS_ENABLE`). +1. Deploy the + [Coder Observability bundle](https://github.com/coder/observability) to + leverage pre-configured dashboards, alerts, and runbooks for monitoring + Coder. This includes integrations between Prometheus, Grafana, Loki, and + Alertmanager. +1. Review the [Prometheus response](../prometheus.md) and set up alarms on + selected metrics. + +### User support + +1. Incorporate [support links](../appearance.md#support-links) into internal + documentation accessible from the user context menu. Ensure that hyperlinks + are valid and lead to up-to-date materials. +1. Encourage the use of `coder support bundle` to allow workspace users to + generate and provide network-related diagnostic data. diff --git a/docs/admin/audit-logs.md b/docs/admin/audit-logs.md index fada57f32065f..52ed2d34e1a97 100644 --- a/docs/admin/audit-logs.md +++ b/docs/admin/audit-logs.md @@ -13,11 +13,14 @@ We track the following resources: | APIKey
login, logout, register, create, delete |
FieldTracked
created_attrue
expires_attrue
hashed_secretfalse
idfalse
ip_addressfalse
last_usedtrue
lifetime_secondsfalse
login_typefalse
scopefalse
token_namefalse
updated_atfalse
user_idtrue
| | AuditOAuthConvertState
|
FieldTracked
created_attrue
expires_attrue
from_login_typetrue
to_login_typetrue
user_idtrue
| | Group
create, write, delete |
FieldTracked
avatar_urltrue
display_nametrue
idtrue
memberstrue
nametrue
organization_idfalse
quota_allowancetrue
sourcefalse
| +| AuditableOrganizationMember
|
FieldTracked
created_attrue
organization_idtrue
rolestrue
updated_attrue
user_idtrue
usernametrue
| +| CustomRole
|
FieldTracked
created_atfalse
display_nametrue
idfalse
nametrue
org_permissionstrue
organization_idtrue
site_permissionstrue
updated_atfalse
user_permissionstrue
| | GitSSHKey
create |
FieldTracked
created_atfalse
private_keytrue
public_keytrue
updated_atfalse
user_idtrue
| | HealthSettings
|
FieldTracked
dismissed_healthcheckstrue
idfalse
| | License
create, delete |
FieldTracked
exptrue
idfalse
jwtfalse
uploaded_attrue
uuidtrue
| | OAuth2ProviderApp
|
FieldTracked
callback_urltrue
created_atfalse
icontrue
idfalse
nametrue
updated_atfalse
| | OAuth2ProviderAppSecret
|
FieldTracked
app_idfalse
created_atfalse
display_secretfalse
hashed_secretfalse
idfalse
last_used_atfalse
secret_prefixfalse
| +| Organization
|
FieldTracked
created_atfalse
descriptiontrue
display_nametrue
icontrue
idfalse
is_defaulttrue
nametrue
updated_attrue
| | Template
write, delete |
FieldTracked
active_version_idtrue
activity_bumptrue
allow_user_autostarttrue
allow_user_autostoptrue
allow_user_cancel_workspace_jobstrue
autostart_block_days_of_weektrue
autostop_requirement_days_of_weektrue
autostop_requirement_weekstrue
created_atfalse
created_bytrue
created_by_avatar_urlfalse
created_by_usernamefalse
default_ttltrue
deletedfalse
deprecatedtrue
descriptiontrue
display_nametrue
failure_ttltrue
group_acltrue
icontrue
idtrue
max_port_sharing_leveltrue
nametrue
organization_idfalse
provisionertrue
require_active_versiontrue
time_til_dormanttrue
time_til_dormant_autodeletetrue
updated_atfalse
user_acltrue
| | TemplateVersion
create, write |
FieldTracked
archivedtrue
created_atfalse
created_bytrue
created_by_avatar_urlfalse
created_by_usernamefalse
external_auth_providersfalse
idtrue
job_idfalse
messagefalse
nametrue
organization_idfalse
readmetrue
template_idtrue
updated_atfalse
| | User
create, write, delete |
FieldTracked
avatar_urlfalse
created_atfalse
deletedtrue
emailtrue
hashed_passwordtrue
idtrue
last_seen_atfalse
login_typetrue
nametrue
quiet_hours_scheduletrue
rbac_rolestrue
statustrue
theme_preferencefalse
updated_atfalse
usernametrue
| diff --git a/docs/admin/healthcheck.md b/docs/admin/healthcheck.md index 1b3918a3bb253..44d10dadc6862 100644 --- a/docs/admin/healthcheck.md +++ b/docs/admin/healthcheck.md @@ -328,6 +328,17 @@ version of Coder. > Note: This may be a transient issue if you are currently in the process of > updating your deployment. +### EIF01 + +_Interface with Small MTU_ + +**Problem:** One or more local interfaces have MTU smaller than 1378, which is +the minimum MTU for Coder to establish direct connections without fragmentation. + +**Solution:** Since IP fragmentation can be a source of performance problems, we +recommend you disable the interface when using Coder or +[disable direct connections](../../cli#--disable-direct-connections) + ## EUNKNOWN _Unknown Error_ diff --git a/docs/admin/provisioners.md b/docs/admin/provisioners.md index 22f1eccdf1a88..422aa9b29d94c 100644 --- a/docs/admin/provisioners.md +++ b/docs/admin/provisioners.md @@ -18,11 +18,11 @@ sometimes benefits to running external provisioner daemons: - **Reduce server load**: External provisioners reduce load and build queue times from the Coder server. See - [Scaling Coder](./scale.md#concurrent-workspace-builds) for more details. + [Scaling Coder](scaling/scale-utility.md#recent-scale-tests) for more details. Each provisioner can run a single -[concurrent workspace build](./scale.md#concurrent-workspace-builds). For -example, running 30 provisioner containers will allow 30 users to start +[concurrent workspace build](scaling/scale-testing.md#control-plane-provisionerd). +For example, running 30 provisioner containers will allow 30 users to start workspaces at the same time. Provisioners are started with the diff --git a/docs/admin/architectures/index.md b/docs/admin/scaling/scale-testing.md similarity index 51% rename from docs/admin/architectures/index.md rename to docs/admin/scaling/scale-testing.md index 85c06a650dee9..761f22bfcd0e6 100644 --- a/docs/admin/architectures/index.md +++ b/docs/admin/scaling/scale-testing.md @@ -1,104 +1,20 @@ -# Reference Architectures - -This document provides prescriptive solutions and reference architectures to -support successful deployments of up to 3000 users and outlines at a high-level -the methodology currently used to scale-test Coder. - -## General concepts - -This section outlines core concepts and terminology essential for understanding -Coder's architecture and deployment strategies. - -### Administrator - -An administrator is a user role within the Coder platform with elevated -privileges. Admins have access to administrative functions such as user -management, template definitions, insights, and deployment configuration. - -### Coder - -Coder, also known as _coderd_, is the main service recommended for deployment -with multiple replicas to ensure high availability. It provides an API for -managing workspaces and templates. Each _coderd_ replica has the capability to -host multiple [provisioners](#provisioner). - -### User - -A user is an individual who utilizes the Coder platform to develop, test, and -deploy applications using workspaces. Users can select available templates to -provision workspaces. They interact with Coder using the web interface, the CLI -tool, or directly calling API methods. - -### Workspace - -A workspace refers to an isolated development environment where users can write, -build, and run code. Workspaces are fully configurable and can be tailored to -specific project requirements, providing developers with a consistent and -efficient development environment. Workspaces can be autostarted and -autostopped, enabling efficient resource management. - -Users can connect to workspaces using SSH or via workspace applications like -`code-server`, facilitating collaboration and remote access. Additionally, -workspaces can be parameterized, allowing users to customize settings and -configurations based on their unique needs. Workspaces are instantiated using -Coder templates and deployed on resources created by provisioners. - -### Template - -A template in Coder is a predefined configuration for creating workspaces. -Templates streamline the process of workspace creation by providing -pre-configured settings, tooling, and dependencies. They are built by template -administrators on top of Terraform, allowing for efficient management of -infrastructure resources. Additionally, templates can utilize Coder modules to -leverage existing features shared with other templates, enhancing flexibility -and consistency across deployments. Templates describe provisioning rules for -infrastructure resources offered by Terraform providers. - -### Workspace Proxy - -A workspace proxy serves as a relay connection option for developers connecting -to their workspace over SSH, a workspace app, or through port forwarding. It -helps reduce network latency for geo-distributed teams by minimizing the -distance network traffic needs to travel. Notably, workspace proxies do not -handle dashboard connections or API calls. - -### Provisioner - -Provisioners in Coder execute Terraform during workspace and template builds. -While the platform includes built-in provisioner daemons by default, there are -advantages to employing external provisioners. These external daemons provide -secure build environments and reduce server load, improving performance and -scalability. Each provisioner can handle a single concurrent workspace build, -allowing for efficient resource allocation and workload management. - -### Registry - -The Coder Registry is a platform where you can find starter templates and -_Modules_ for various cloud services and platforms. - -Templates help create self-service development environments using -Terraform-defined infrastructure, while _Modules_ simplify template creation by -providing common features like workspace applications, third-party integrations, -or helper scripts. - -Please note that the Registry is a hosted service and isn't available for -offline use. - -## Scale-testing methodology +# Scale Testing Scaling Coder involves planning and testing to ensure it can handle more load without compromising service. This process encompasses infrastructure setup, traffic projections, and aggressive testing to identify and mitigate potential bottlenecks. -A dedicated Kubernetes cluster for Coder is Kubernetes cluster specifically -configured to host and manage Coder workloads. Kubernetes provides container -orchestration capabilities, allowing Coder to efficiently deploy, scale, and -manage workspaces across a distributed infrastructure. This ensures high -availability, fault tolerance, and scalability for Coder deployments. Code is -deployed on this cluster using the +A dedicated Kubernetes cluster for Coder is recommended to configure, host and +manage Coder workloads. Kubernetes provides container orchestration +capabilities, allowing Coder to efficiently deploy, scale, and manage workspaces +across a distributed infrastructure. This ensures high availability, fault +tolerance, and scalability for Coder deployments. Coder is deployed on this +cluster using the [Helm chart](../../install/kubernetes.md#install-coder-with-helm). +## Methodology + Our scale tests include the following stages: 1. Prepare environment: create expected users and provision workspaces. @@ -119,7 +35,7 @@ Our scale tests include the following stages: 6. Cleanup: delete workspaces and users created in step 1. -### Infrastructure and setup requirements +## Infrastructure and setup requirements The scale tests runner can distribute the workload to overlap single scenarios based on the workflow configuration: @@ -146,7 +62,7 @@ The test is deemed successful if users did not experience interruptions in their workflows, `coderd` did not crash or require restarts, and no other internal errors were observed. -### Traffic Projections +## Traffic Projections In our scale tests, we simulate activity from 2000 users, 2000 workspaces, and 2000 agents, with two items of workspace agent metadata being sent every 10 @@ -174,11 +90,11 @@ Database: ## Available reference architectures -[Up to 1,000 users](1k-users.md) +[Up to 1,000 users](../architectures/1k-users.md) -[Up to 2,000 users](2k-users.md) +[Up to 2,000 users](../architectures/2k-users.md) -[Up to 3,000 users](3k-users.md) +[Up to 3,000 users](../architectures/3k-users.md) ## Hardware recommendation @@ -237,8 +153,8 @@ with a deployment of Coder [workspace proxies](../workspace-proxies.md). **Node Autoscaling** We recommend disabling the autoscaling for `coderd` nodes. Autoscaling can cause -interruptions for user connections, see [Autoscaling](../scale.md#autoscaling) -for more details. +interruptions for user connections, see +[Autoscaling](scale-utility.md#autoscaling) for more details. ### Control plane: Workspace Proxies @@ -315,96 +231,3 @@ Scaling down workspace nodes to zero is not recommended, as it will result in longer wait times for workspace provisioning by users. However, this may be necessary for workspaces with special resource requirements (e.g. GPUs) that incur significant cost overheads. - -### Data plane: External database - -While running in production, Coder requires a access to an external PostgreSQL -database. Depending on the scale of the user-base, workspace activity, and High -Availability requirements, the amount of CPU and memory resources required by -Coder's database may differ. - -#### Scaling formula - -When determining scaling requirements, take into account the following -considerations: - -- `2 vCPU x 8 GB RAM x 512 GB storage`: A baseline for database requirements for - Coder deployment with less than 1000 users, and low activity level (30% active - users). This capacity should be sufficient to support 100 external - provisioners. -- Storage size depends on user activity, workspace builds, log verbosity, - overhead on database encryption, etc. -- Allocate two additional CPU core to the database instance for every 1000 - active users. -- Enable _High Availability_ mode for database engine for large scale - deployments. - -If you enable [database encryption](../encryption.md) in Coder, consider -allocating an additional CPU core to every `coderd` replica. - -#### Performance optimization guidelines - -We provide the following general recommendations for PostgreSQL settings: - -- Increase number of vCPU if CPU utilization or database latency is high. -- Allocate extra memory if database performance is poor, CPU utilization is low, - and memory utilization is high. -- Utilize faster disk options (higher IOPS) such as SSDs or NVMe drives for - optimal performance enhancement and possibly reduce database load. - -## Operational readiness - -Operational readiness in Coder is about ensuring that everything is set up -correctly before launching a platform into production. It involves making sure -that the service is reliable, secure, and easily scales accordingly to user-base -needs. Operational readiness is crucial because it helps prevent issues that -could affect workspace users experience once the platform is live. - -Learn about Coder design principles and architectural best practices described -in the -[Well-Architected Framework](https://coder.com/blog/coder-well-architected-framework). - -### Configuration - -1. Identify the required Helm values for configuration. -1. Create `values.yaml` and add it to a version control system. _Note:_ it is - highly recommended that you create a custom `values.yaml` as opposed to - copying the entire default values. -1. Determine the necessary environment variables. - -### Template configuration - -1. Establish a dedicated user account for the _Template Administrator_. -1. Maintain Coder templates using version control. -1. Consider implementing a GitOps workflow to automatically push new template. - For example, on Github, you can use the - [Update Coder Template](https://github.com/marketplace/actions/update-coder-template) - action. -1. Evaluate enabling automatic template updates upon workspace startup. - -### Deployment - -1. Leverage automation tooling to automate deployment and upgrades of Coder. - -### Observability - -1. Enable the Prometheus endpoint (environment variable: - `CODER_PROMETHEUS_ENABLE`). -1. Deploy a visual monitoring system such as Grafana for metrics visualization. -1. Deploy a centralized logs aggregation solution to collect and monitor - application logs. -1. Review the [Prometheus response](../prometheus.md) and set up alarms on - selected metrics. - -### Database backups - -1. Prepare internal scripts for dumping and restoring databases. -1. Schedule regular database backups, especially before release upgrades. - -### User support - -1. Incorporate [support links](../appearance.md#support-links) into internal - documentation accessible from the user context menu. Ensure that hyperlinks - are valid and lead to up-to-date materials. -1. Encourage the use of `coder support bundle` to allow workspace users to - generate and provide network-related diagnostic data. diff --git a/docs/admin/scale.md b/docs/admin/scaling/scale-utility.md similarity index 97% rename from docs/admin/scale.md rename to docs/admin/scaling/scale-utility.md index 883516d9146f7..b841c52f6ee48 100644 --- a/docs/admin/scale.md +++ b/docs/admin/scaling/scale-utility.md @@ -1,18 +1,20 @@ +# Scale Tests and Utilities + We scale-test Coder with [a built-in utility](#scale-testing-utility) that can be used in your environment for insights into how Coder scales with your infrastructure. For scale-testing Kubernetes clusters we recommend to install and use the dedicated Coder template, [scaletest-runner](https://github.com/coder/coder/tree/main/scaletest/templates/scaletest-runner). -Learn more about [Coder’s architecture](../about/architecture.md) and our -[scale-testing methodology](architectures/index.md#scale-testing-methodology). +Learn more about [Coder’s architecture](../architectures/architecture.md) and +our [scale-testing methodology](scale-testing.md). ## Recent scale tests > Note: the below information is for reference purposes only, and are not > intended to be used as guidelines for infrastructure sizing. Review the -> [Reference Architectures](architectures/index.md) for hardware sizing -> recommendations. +> [Reference Architectures](../architectures/validated-arch.md#node-sizing) for +> hardware sizing recommendations. | Environment | Coder CPU | Coder RAM | Coder Replicas | Database | Users | Concurrent builds | Concurrent connections (Terminal/SSH) | Coder Version | Last tested | | ---------------- | --------- | --------- | -------------- | ----------------- | ----- | ----------------- | ------------------------------------- | ------------- | ------------ | @@ -247,6 +249,6 @@ an annotation on the coderd deployment. ## Troubleshooting If a load test fails or if you are experiencing performance issues during -day-to-day use, you can leverage Coder's [Prometheus metrics](./prometheus.md) +day-to-day use, you can leverage Coder's [Prometheus metrics](../prometheus.md) to identify bottlenecks during scale tests. Additionally, you can use your existing cloud monitoring stack to measure load, view server logs, etc. diff --git a/docs/api/agents.md b/docs/api/agents.md index 0d73ca9262c11..e32fb0ac10f7a 100644 --- a/docs/api/agents.md +++ b/docs/api/agents.md @@ -160,67 +160,6 @@ curl -X POST http://coder-server:8080/api/v2/workspaceagents/google-instance-ide To perform this operation, you must be authenticated. [Learn more](authentication.md). -## Submit workspace agent application health - -### Code samples - -```shell -# Example request using curl -curl -X POST http://coder-server:8080/api/v2/workspaceagents/me/app-health \ - -H 'Content-Type: application/json' \ - -H 'Coder-Session-Token: API_KEY' -``` - -`POST /workspaceagents/me/app-health` - -> Body parameter - -```json -{ - "healths": { - "property1": "disabled", - "property2": "disabled" - } -} -``` - -### Parameters - -| Name | In | Type | Required | Description | -| ------ | ---- | -------------------------------------------------------------------------- | -------- | -------------------------- | -| `body` | body | [agentsdk.PostAppHealthsRequest](schemas.md#agentsdkpostapphealthsrequest) | true | Application health request | - -### Responses - -| Status | Meaning | Description | Schema | -| ------ | ------------------------------------------------------- | ----------- | ------ | -| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | | - -To perform this operation, you must be authenticated. [Learn more](authentication.md). - -## Coordinate workspace agent via Tailnet - -### Code samples - -```shell -# Example request using curl -curl -X GET http://coder-server:8080/api/v2/workspaceagents/me/coordinate \ - -H 'Coder-Session-Token: API_KEY' -``` - -`GET /workspaceagents/me/coordinate` - -It accepts a WebSocket connection to an agent that listens to -incoming connections and publishes node updates. - -### Responses - -| Status | Meaning | Description | Schema | -| ------ | ------------------------------------------------------------------------ | ------------------- | ------ | -| 101 | [Switching Protocols](https://tools.ietf.org/html/rfc7231#section-6.2.2) | Switching Protocols | | - -To perform this operation, you must be authenticated. [Learn more](authentication.md). - ## Get workspace agent external auth ### Code samples @@ -341,78 +280,35 @@ curl -X GET http://coder-server:8080/api/v2/workspaceagents/me/gitsshkey \ To perform this operation, you must be authenticated. [Learn more](authentication.md). -## Patch workspace agent logs +## Post workspace agent log source ### Code samples ```shell # Example request using curl -curl -X PATCH http://coder-server:8080/api/v2/workspaceagents/me/logs \ +curl -X POST http://coder-server:8080/api/v2/workspaceagents/me/log-source \ -H 'Content-Type: application/json' \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` -`PATCH /workspaceagents/me/logs` +`POST /workspaceagents/me/log-source` > Body parameter ```json { - "log_source_id": "string", - "logs": [ - { - "created_at": "string", - "level": "trace", - "output": "string" - } - ] + "display_name": "string", + "icon": "string", + "id": "string" } ``` ### Parameters -| Name | In | Type | Required | Description | -| ------ | ---- | -------------------------------------------------- | -------- | ----------- | -| `body` | body | [agentsdk.PatchLogs](schemas.md#agentsdkpatchlogs) | true | logs | - -### Example responses - -> 200 Response - -```json -{ - "detail": "string", - "message": "string", - "validations": [ - { - "detail": "string", - "field": "string" - } - ] -} -``` - -### Responses - -| Status | Meaning | Description | Schema | -| ------ | ------------------------------------------------------- | ----------- | ------------------------------------------------ | -| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.Response](schemas.md#codersdkresponse) | - -To perform this operation, you must be authenticated. [Learn more](authentication.md). - -## Get authorized workspace agent manifest - -### Code samples - -```shell -# Example request using curl -curl -X GET http://coder-server:8080/api/v2/workspaceagents/me/manifest \ - -H 'Accept: application/json' \ - -H 'Coder-Session-Token: API_KEY' -``` - -`GET /workspaceagents/me/manifest` +| Name | In | Type | Required | Description | +| ------ | ---- | ------------------------------------------------------------------------ | -------- | ------------------ | +| `body` | body | [agentsdk.PostLogSourceRequest](schemas.md#agentsdkpostlogsourcerequest) | true | Log source request | ### Example responses @@ -420,217 +316,35 @@ curl -X GET http://coder-server:8080/api/v2/workspaceagents/me/manifest \ ```json { - "agent_id": "string", - "agent_name": "string", - "apps": [ - { - "command": "string", - "display_name": "string", - "external": true, - "health": "disabled", - "healthcheck": { - "interval": 0, - "threshold": 0, - "url": "string" - }, - "icon": "string", - "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", - "sharing_level": "owner", - "slug": "string", - "subdomain": true, - "subdomain_name": "string", - "url": "string" - } - ], - "derp_force_websockets": true, - "derpmap": { - "homeParams": { - "regionScore": { - "property1": 0, - "property2": 0 - } - }, - "omitDefaultRegions": true, - "regions": { - "property1": { - "avoid": true, - "embeddedRelay": true, - "nodes": [ - { - "canPort80": true, - "certName": "string", - "derpport": 0, - "forceHTTP": true, - "hostName": "string", - "insecureForTests": true, - "ipv4": "string", - "ipv6": "string", - "name": "string", - "regionID": 0, - "stunonly": true, - "stunport": 0, - "stuntestIP": "string" - } - ], - "regionCode": "string", - "regionID": 0, - "regionName": "string" - }, - "property2": { - "avoid": true, - "embeddedRelay": true, - "nodes": [ - { - "canPort80": true, - "certName": "string", - "derpport": 0, - "forceHTTP": true, - "hostName": "string", - "insecureForTests": true, - "ipv4": "string", - "ipv6": "string", - "name": "string", - "regionID": 0, - "stunonly": true, - "stunport": 0, - "stuntestIP": "string" - } - ], - "regionCode": "string", - "regionID": 0, - "regionName": "string" - } - } - }, - "directory": "string", - "disable_direct_connections": true, - "environment_variables": { - "property1": "string", - "property2": "string" - }, - "git_auth_configs": 0, - "metadata": [ - { - "display_name": "string", - "interval": 0, - "key": "string", - "script": "string", - "timeout": 0 - } - ], - "motd_file": "string", - "owner_name": "string", - "scripts": [ - { - "cron": "string", - "log_path": "string", - "log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a", - "run_on_start": true, - "run_on_stop": true, - "script": "string", - "start_blocks_login": true, - "timeout": 0 - } - ], - "vscode_port_proxy_uri": "string", - "workspace_id": "string", - "workspace_name": "string" -} -``` - -### Responses - -| Status | Meaning | Description | Schema | -| ------ | ------------------------------------------------------- | ----------- | ------------------------------------------------ | -| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [agentsdk.Manifest](schemas.md#agentsdkmanifest) | - -To perform this operation, you must be authenticated. [Learn more](authentication.md). - -## Submit workspace agent stats - -### Code samples - -```shell -# Example request using curl -curl -X POST http://coder-server:8080/api/v2/workspaceagents/me/report-stats \ - -H 'Content-Type: application/json' \ - -H 'Accept: application/json' \ - -H 'Coder-Session-Token: API_KEY' -``` - -`POST /workspaceagents/me/report-stats` - -> Body parameter - -```json -{ - "connection_count": 0, - "connection_median_latency_ms": 0, - "connections_by_proto": { - "property1": 0, - "property2": 0 - }, - "metrics": [ - { - "labels": [ - { - "name": "string", - "value": "string" - } - ], - "name": "string", - "type": "counter", - "value": 0 - } - ], - "rx_bytes": 0, - "rx_packets": 0, - "session_count_jetbrains": 0, - "session_count_reconnecting_pty": 0, - "session_count_ssh": 0, - "session_count_vscode": 0, - "tx_bytes": 0, - "tx_packets": 0 -} -``` - -### Parameters - -| Name | In | Type | Required | Description | -| ------ | ---- | ------------------------------------------ | -------- | ------------- | -| `body` | body | [agentsdk.Stats](schemas.md#agentsdkstats) | true | Stats request | - -### Example responses - -> 200 Response - -```json -{ - "report_interval": 0 + "created_at": "2019-08-24T14:15:22Z", + "display_name": "string", + "icon": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1" } ``` ### Responses -| Status | Meaning | Description | Schema | -| ------ | ------------------------------------------------------- | ----------- | ---------------------------------------------------------- | -| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [agentsdk.StatsResponse](schemas.md#agentsdkstatsresponse) | +| Status | Meaning | Description | Schema | +| ------ | ------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------ | +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.WorkspaceAgentLogSource](schemas.md#codersdkworkspaceagentlogsource) | To perform this operation, you must be authenticated. [Learn more](authentication.md). -## Removed: Patch workspace agent logs +## Patch workspace agent logs ### Code samples ```shell # Example request using curl -curl -X PATCH http://coder-server:8080/api/v2/workspaceagents/me/startup-logs \ +curl -X PATCH http://coder-server:8080/api/v2/workspaceagents/me/logs \ -H 'Content-Type: application/json' \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` -`PATCH /workspaceagents/me/startup-logs` +`PATCH /workspaceagents/me/logs` > Body parameter diff --git a/docs/api/audit.md b/docs/api/audit.md index a755ed9412bd5..0c2cf32cd2758 100644 --- a/docs/api/audit.md +++ b/docs/api/audit.md @@ -68,7 +68,8 @@ curl -X GET http://coder-server:8080/api/v2/audit?limit=0 \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", diff --git a/docs/api/debug.md b/docs/api/debug.md index 0ae74b501210a..317efbd0c0650 100644 --- a/docs/api/debug.md +++ b/docs/api/debug.md @@ -280,7 +280,6 @@ curl -X GET http://coder-server:8080/api/v2/debug/health \ } ] }, - "failing_sections": ["DERP"], "healthy": true, "provisioner_daemons": { "dismissed": true, diff --git a/docs/api/enterprise.md b/docs/api/enterprise.md index 3cf43102e7c77..758489995ccf5 100644 --- a/docs/api/enterprise.md +++ b/docs/api/enterprise.md @@ -1600,7 +1600,8 @@ curl -X PATCH http://coder-server:8080/api/v2/scim/v2/Users/{id} \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1655,7 +1656,8 @@ curl -X GET http://coder-server:8080/api/v2/templates/{template}/acl \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1690,6 +1692,7 @@ Status Code **200** | `» roles` | array | false | | | | `»» display_name` | string | false | | | | `»» name` | string | false | | | +| `»» organization_id` | string | false | | | | `» status` | [codersdk.UserStatus](schemas.md#codersdkuserstatus) | false | | | | `» theme_preference` | string | false | | | | `» username` | string | true | | | diff --git a/docs/api/general.md b/docs/api/general.md index 52313409cb02c..620e3b238d7b3 100644 --- a/docs/api/general.md +++ b/docs/api/general.md @@ -57,6 +57,7 @@ curl -X GET http://coder-server:8080/api/v2/buildinfo \ "dashboard_url": "string", "deployment_id": "string", "external_url": "string", + "telemetry": true, "upgrade_message": "string", "version": "string", "workspace_proxy": true @@ -227,7 +228,6 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "device_flow": true, "display_icon": "string", "display_name": "string", - "extra_token_keys": ["string"], "id": "string", "no_refresh": true, "regex": "string", @@ -294,6 +294,7 @@ curl -X GET http://coder-server:8080/api/v2/deployment/config \ "ignore_email_verified": true, "ignore_user_info": true, "issuer_url": "string", + "name_field": "string", "scopes": ["string"], "sign_in_text": "string", "signups_disabled_text": "string", diff --git a/docs/api/insights.md b/docs/api/insights.md index 7dae576b847b8..eb1a7679a6708 100644 --- a/docs/api/insights.md +++ b/docs/api/insights.md @@ -6,13 +6,19 @@ ```shell # Example request using curl -curl -X GET http://coder-server:8080/api/v2/insights/daus \ +curl -X GET http://coder-server:8080/api/v2/insights/daus?tz_offset=0 \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` `GET /insights/daus` +### Parameters + +| Name | In | Type | Required | Description | +| ----------- | ----- | ------- | -------- | -------------------------- | +| `tz_offset` | query | integer | true | Time-zone offset (e.g. -2) | + ### Example responses > 200 Response @@ -43,7 +49,7 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio ```shell # Example request using curl -curl -X GET http://coder-server:8080/api/v2/insights/templates?before=0&after=0 \ +curl -X GET http://coder-server:8080/api/v2/insights/templates?start_time=2019-08-24T14%3A15%3A22Z&end_time=2019-08-24T14%3A15%3A22Z&interval=week \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` @@ -52,10 +58,19 @@ curl -X GET http://coder-server:8080/api/v2/insights/templates?before=0&after=0 ### Parameters -| Name | In | Type | Required | Description | -| -------- | ----- | ------- | -------- | ----------- | -| `before` | query | integer | true | Start time | -| `after` | query | integer | true | End time | +| Name | In | Type | Required | Description | +| -------------- | ----- | ----------------- | -------- | ------------ | +| `start_time` | query | string(date-time) | true | Start time | +| `end_time` | query | string(date-time) | true | End time | +| `interval` | query | string | true | Interval | +| `template_ids` | query | array[string] | false | Template IDs | + +#### Enumerated Values + +| Parameter | Value | +| ---------- | ------ | +| `interval` | `week` | +| `interval` | `day` | ### Example responses @@ -129,7 +144,7 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio ```shell # Example request using curl -curl -X GET http://coder-server:8080/api/v2/insights/user-activity?before=0&after=0 \ +curl -X GET http://coder-server:8080/api/v2/insights/user-activity?start_time=2019-08-24T14%3A15%3A22Z&end_time=2019-08-24T14%3A15%3A22Z \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` @@ -138,10 +153,11 @@ curl -X GET http://coder-server:8080/api/v2/insights/user-activity?before=0&afte ### Parameters -| Name | In | Type | Required | Description | -| -------- | ----- | ------- | -------- | ----------- | -| `before` | query | integer | true | Start time | -| `after` | query | integer | true | End time | +| Name | In | Type | Required | Description | +| -------------- | ----- | ----------------- | -------- | ------------ | +| `start_time` | query | string(date-time) | true | Start time | +| `end_time` | query | string(date-time) | true | End time | +| `template_ids` | query | array[string] | false | Template IDs | ### Example responses @@ -180,7 +196,7 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio ```shell # Example request using curl -curl -X GET http://coder-server:8080/api/v2/insights/user-latency?before=0&after=0 \ +curl -X GET http://coder-server:8080/api/v2/insights/user-latency?start_time=2019-08-24T14%3A15%3A22Z&end_time=2019-08-24T14%3A15%3A22Z \ -H 'Accept: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` @@ -189,10 +205,11 @@ curl -X GET http://coder-server:8080/api/v2/insights/user-latency?before=0&after ### Parameters -| Name | In | Type | Required | Description | -| -------- | ----- | ------- | -------- | ----------- | -| `before` | query | integer | true | Start time | -| `after` | query | integer | true | End time | +| Name | In | Type | Required | Description | +| -------------- | ----- | ----------------- | -------- | ------------ | +| `start_time` | query | string(date-time) | true | Start time | +| `end_time` | query | string(date-time) | true | End time | +| `template_ids` | query | array[string] | false | Template IDs | ### Example responses diff --git a/docs/api/members.md b/docs/api/members.md index 6364b08ca528e..1a9beae285157 100644 --- a/docs/api/members.md +++ b/docs/api/members.md @@ -1,5 +1,72 @@ # Members +## List organization members + +### Code samples + +```shell +# Example request using curl +curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/members \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`GET /organizations/{organization}/members` + +### Parameters + +| Name | In | Type | Required | Description | +| -------------- | ---- | ------ | -------- | --------------- | +| `organization` | path | string | true | Organization ID | + +### Example responses + +> 200 Response + +```json +[ + { + "created_at": "2019-08-24T14:15:22Z", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "updated_at": "2019-08-24T14:15:22Z", + "user_id": "a169451c-8525-4352-b8ca-070dd449a1a5", + "username": "string" + } +] +``` + +### Responses + +| Status | Meaning | Description | Schema | +| ------ | ------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------- | +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | array of [codersdk.OrganizationMemberWithName](schemas.md#codersdkorganizationmemberwithname) | + +

Response Schema

+ +Status Code **200** + +| Name | Type | Required | Restrictions | Description | +| -------------------- | ----------------- | -------- | ------------ | ----------- | +| `[array item]` | array | false | | | +| `» created_at` | string(date-time) | false | | | +| `» organization_id` | string(uuid) | false | | | +| `» roles` | array | false | | | +| `»» display_name` | string | false | | | +| `»» name` | string | false | | | +| `»» organization_id` | string | false | | | +| `» updated_at` | string(date-time) | false | | | +| `» user_id` | string(uuid) | false | | | +| `» username` | string | false | | | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Get member roles by organization ### Code samples @@ -248,6 +315,102 @@ Status Code **200** To perform this operation, you must be authenticated. [Learn more](authentication.md). +## Add organization member + +### Code samples + +```shell +# Example request using curl +curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/members/{user} \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`POST /organizations/{organization}/members/{user}` + +### Parameters + +| Name | In | Type | Required | Description | +| -------------- | ---- | ------ | -------- | -------------------- | +| `organization` | path | string | true | Organization ID | +| `user` | path | string | true | User ID, name, or me | + +### Example responses + +> 200 Response + +```json +{ + "created_at": "2019-08-24T14:15:22Z", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "updated_at": "2019-08-24T14:15:22Z", + "user_id": "a169451c-8525-4352-b8ca-070dd449a1a5" +} +``` + +### Responses + +| Status | Meaning | Description | Schema | +| ------ | ------------------------------------------------------- | ----------- | -------------------------------------------------------------------- | +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.OrganizationMember](schemas.md#codersdkorganizationmember) | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + +## Remove organization member + +### Code samples + +```shell +# Example request using curl +curl -X DELETE http://coder-server:8080/api/v2/organizations/{organization}/members/{user} \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`DELETE /organizations/{organization}/members/{user}` + +### Parameters + +| Name | In | Type | Required | Description | +| -------------- | ---- | ------ | -------- | -------------------- | +| `organization` | path | string | true | Organization ID | +| `user` | path | string | true | User ID, name, or me | + +### Example responses + +> 200 Response + +```json +{ + "created_at": "2019-08-24T14:15:22Z", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "updated_at": "2019-08-24T14:15:22Z", + "user_id": "a169451c-8525-4352-b8ca-070dd449a1a5" +} +``` + +### Responses + +| Status | Meaning | Description | Schema | +| ------ | ------------------------------------------------------- | ----------- | -------------------------------------------------------------------- | +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.OrganizationMember](schemas.md#codersdkorganizationmember) | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Assign role to organization member ### Code samples @@ -289,7 +452,8 @@ curl -X PUT http://coder-server:8080/api/v2/organizations/{organization}/members "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "updated_at": "2019-08-24T14:15:22Z", diff --git a/docs/api/organizations.md b/docs/api/organizations.md index c6f4514eb9bad..a1f8273549f80 100644 --- a/docs/api/organizations.md +++ b/docs/api/organizations.md @@ -105,6 +105,9 @@ curl -X POST http://coder-server:8080/api/v2/organizations \ ```json { + "description": "string", + "display_name": "string", + "icon": "string", "name": "string" } ``` @@ -122,6 +125,9 @@ curl -X POST http://coder-server:8080/api/v2/organizations \ ```json { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", @@ -163,6 +169,9 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization} \ ```json { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", @@ -240,6 +249,9 @@ curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization} \ ```json { + "description": "string", + "display_name": "string", + "icon": "string", "name": "string" } ``` @@ -258,6 +270,9 @@ curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization} \ ```json { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", diff --git a/docs/api/schemas.md b/docs/api/schemas.md index 82804508b0e96..305b3c0e733f6 100644 --- a/docs/api/schemas.md +++ b/docs/api/schemas.md @@ -16,69 +16,6 @@ | `document` | string | true | | | | `signature` | string | true | | | -## agentsdk.AgentMetric - -```json -{ - "labels": [ - { - "name": "string", - "value": "string" - } - ], - "name": "string", - "type": "counter", - "value": 0 -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------- | --------------------------------------------------------------- | -------- | ------------ | ----------- | -| `labels` | array of [agentsdk.AgentMetricLabel](#agentsdkagentmetriclabel) | false | | | -| `name` | string | true | | | -| `type` | [agentsdk.AgentMetricType](#agentsdkagentmetrictype) | true | | | -| `value` | number | true | | | - -#### Enumerated Values - -| Property | Value | -| -------- | --------- | -| `type` | `counter` | -| `type` | `gauge` | - -## agentsdk.AgentMetricLabel - -```json -{ - "name": "string", - "value": "string" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| ------- | ------ | -------- | ------------ | ----------- | -| `name` | string | true | | | -| `value` | string | true | | | - -## agentsdk.AgentMetricType - -```json -"counter" -``` - -### Properties - -#### Enumerated Values - -| Value | -| --------- | -| `counter` | -| `gauge` | - ## agentsdk.AuthenticateResponse ```json @@ -181,172 +118,6 @@ | `level` | [codersdk.LogLevel](#codersdkloglevel) | false | | | | `output` | string | false | | | -## agentsdk.Manifest - -```json -{ - "agent_id": "string", - "agent_name": "string", - "apps": [ - { - "command": "string", - "display_name": "string", - "external": true, - "health": "disabled", - "healthcheck": { - "interval": 0, - "threshold": 0, - "url": "string" - }, - "icon": "string", - "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", - "sharing_level": "owner", - "slug": "string", - "subdomain": true, - "subdomain_name": "string", - "url": "string" - } - ], - "derp_force_websockets": true, - "derpmap": { - "homeParams": { - "regionScore": { - "property1": 0, - "property2": 0 - } - }, - "omitDefaultRegions": true, - "regions": { - "property1": { - "avoid": true, - "embeddedRelay": true, - "nodes": [ - { - "canPort80": true, - "certName": "string", - "derpport": 0, - "forceHTTP": true, - "hostName": "string", - "insecureForTests": true, - "ipv4": "string", - "ipv6": "string", - "name": "string", - "regionID": 0, - "stunonly": true, - "stunport": 0, - "stuntestIP": "string" - } - ], - "regionCode": "string", - "regionID": 0, - "regionName": "string" - }, - "property2": { - "avoid": true, - "embeddedRelay": true, - "nodes": [ - { - "canPort80": true, - "certName": "string", - "derpport": 0, - "forceHTTP": true, - "hostName": "string", - "insecureForTests": true, - "ipv4": "string", - "ipv6": "string", - "name": "string", - "regionID": 0, - "stunonly": true, - "stunport": 0, - "stuntestIP": "string" - } - ], - "regionCode": "string", - "regionID": 0, - "regionName": "string" - } - } - }, - "directory": "string", - "disable_direct_connections": true, - "environment_variables": { - "property1": "string", - "property2": "string" - }, - "git_auth_configs": 0, - "metadata": [ - { - "display_name": "string", - "interval": 0, - "key": "string", - "script": "string", - "timeout": 0 - } - ], - "motd_file": "string", - "owner_name": "string", - "scripts": [ - { - "cron": "string", - "log_path": "string", - "log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a", - "run_on_start": true, - "run_on_stop": true, - "script": "string", - "start_blocks_login": true, - "timeout": 0 - } - ], - "vscode_port_proxy_uri": "string", - "workspace_id": "string", - "workspace_name": "string" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| ---------------------------- | ------------------------------------------------------------------------------------------------- | -------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `agent_id` | string | false | | | -| `agent_name` | string | false | | | -| `apps` | array of [codersdk.WorkspaceApp](#codersdkworkspaceapp) | false | | | -| `derp_force_websockets` | boolean | false | | | -| `derpmap` | [tailcfg.DERPMap](#tailcfgderpmap) | false | | | -| `directory` | string | false | | | -| `disable_direct_connections` | boolean | false | | | -| `environment_variables` | object | false | | | -| » `[any property]` | string | false | | | -| `git_auth_configs` | integer | false | | Git auth configs stores the number of Git configurations the Coder deployment has. If this number is >0, we set up special configuration in the workspace. | -| `metadata` | array of [codersdk.WorkspaceAgentMetadataDescription](#codersdkworkspaceagentmetadatadescription) | false | | | -| `motd_file` | string | false | | | -| `owner_name` | string | false | | Owner name and WorkspaceID are used by an open-source user to identify the workspace. We do not provide insurance that this will not be removed in the future, but if it's easy to persist lets keep it around. | -| `scripts` | array of [codersdk.WorkspaceAgentScript](#codersdkworkspaceagentscript) | false | | | -| `vscode_port_proxy_uri` | string | false | | | -| `workspace_id` | string | false | | | -| `workspace_name` | string | false | | | - -## agentsdk.Metadata - -```json -{ - "age": 0, - "collected_at": "2019-08-24T14:15:22Z", - "error": "string", - "key": "string", - "value": "string" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------------- | ------- | -------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------- | -| `age` | integer | false | | Age is the number of seconds since the metadata was collected. It is provided in addition to CollectedAt to protect against clock skew. | -| `collected_at` | string | false | | | -| `error` | string | false | | | -| `key` | string | false | | | -| `value` | string | false | | | - ## agentsdk.PatchLogs ```json @@ -369,165 +140,23 @@ | `log_source_id` | string | false | | | | `logs` | array of [agentsdk.Log](#agentsdklog) | false | | | -## agentsdk.PostAppHealthsRequest - -```json -{ - "healths": { - "property1": "disabled", - "property2": "disabled" - } -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| ------------------ | ---------------------------------------------------------- | -------- | ------------ | --------------------------------------------------------------------- | -| `healths` | object | false | | Healths is a map of the workspace app name and the health of the app. | -| » `[any property]` | [codersdk.WorkspaceAppHealth](#codersdkworkspaceapphealth) | false | | | - -## agentsdk.PostLifecycleRequest - -```json -{ - "changed_at": "string", - "state": "created" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| ------------ | -------------------------------------------------------------------- | -------- | ------------ | ----------- | -| `changed_at` | string | false | | | -| `state` | [codersdk.WorkspaceAgentLifecycle](#codersdkworkspaceagentlifecycle) | false | | | - -## agentsdk.PostMetadataRequest - -```json -{ - "metadata": [ - { - "age": 0, - "collected_at": "2019-08-24T14:15:22Z", - "error": "string", - "key": "string", - "value": "string" - } - ] -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| ---------- | ----------------------------------------------- | -------- | ------------ | ----------- | -| `metadata` | array of [agentsdk.Metadata](#agentsdkmetadata) | false | | | - -## agentsdk.PostMetadataRequestDeprecated +## agentsdk.PostLogSourceRequest ```json { - "age": 0, - "collected_at": "2019-08-24T14:15:22Z", - "error": "string", - "value": "string" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------------- | ------- | -------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------- | -| `age` | integer | false | | Age is the number of seconds since the metadata was collected. It is provided in addition to CollectedAt to protect against clock skew. | -| `collected_at` | string | false | | | -| `error` | string | false | | | -| `value` | string | false | | | - -## agentsdk.PostStartupRequest - -```json -{ - "expanded_directory": "string", - "subsystems": ["envbox"], - "version": "string" -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------------------- | ----------------------------------------------------------- | -------- | ------------ | ----------- | -| `expanded_directory` | string | false | | | -| `subsystems` | array of [codersdk.AgentSubsystem](#codersdkagentsubsystem) | false | | | -| `version` | string | false | | | - -## agentsdk.Stats - -```json -{ - "connection_count": 0, - "connection_median_latency_ms": 0, - "connections_by_proto": { - "property1": 0, - "property2": 0 - }, - "metrics": [ - { - "labels": [ - { - "name": "string", - "value": "string" - } - ], - "name": "string", - "type": "counter", - "value": 0 - } - ], - "rx_bytes": 0, - "rx_packets": 0, - "session_count_jetbrains": 0, - "session_count_reconnecting_pty": 0, - "session_count_ssh": 0, - "session_count_vscode": 0, - "tx_bytes": 0, - "tx_packets": 0 -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------------------------------- | ----------------------------------------------------- | -------- | ------------ | ----------------------------------------------------------------------------------------------------------------------------- | -| `connection_count` | integer | false | | Connection count is the number of connections received by an agent. | -| `connection_median_latency_ms` | number | false | | Connection median latency ms is the median latency of all connections in milliseconds. | -| `connections_by_proto` | object | false | | Connections by proto is a count of connections by protocol. | -| » `[any property]` | integer | false | | | -| `metrics` | array of [agentsdk.AgentMetric](#agentsdkagentmetric) | false | | Metrics collected by the agent | -| `rx_bytes` | integer | false | | Rx bytes is the number of received bytes. | -| `rx_packets` | integer | false | | Rx packets is the number of received packets. | -| `session_count_jetbrains` | integer | false | | Session count jetbrains is the number of connections received by an agent that are from our JetBrains extension. | -| `session_count_reconnecting_pty` | integer | false | | Session count reconnecting pty is the number of connections received by an agent that are from the reconnecting web terminal. | -| `session_count_ssh` | integer | false | | Session count ssh is the number of connections received by an agent that are normal, non-tagged SSH sessions. | -| `session_count_vscode` | integer | false | | Session count vscode is the number of connections received by an agent that are from our VS Code extension. | -| `tx_bytes` | integer | false | | Tx bytes is the number of transmitted bytes. | -| `tx_packets` | integer | false | | Tx packets is the number of transmitted bytes. | - -## agentsdk.StatsResponse - -```json -{ - "report_interval": 0 + "display_name": "string", + "icon": "string", + "id": "string" } ``` ### Properties -| Name | Type | Required | Restrictions | Description | -| ----------------- | ------- | -------- | ------------ | ------------------------------------------------------------------------------ | -| `report_interval` | integer | false | | Report interval is the duration after which the agent should send stats again. | +| Name | Type | Required | Restrictions | Description | +| -------------- | ------ | -------- | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `display_name` | string | false | | | +| `icon` | string | false | | | +| `id` | string | false | | ID is a unique identifier for the log source. It is scoped to a workspace agent, and can be statically defined inside code to prevent duplicate sources from being created for the same agent. | ## coderd.SCIMUser @@ -948,7 +577,8 @@ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1027,7 +657,8 @@ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1234,6 +865,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "dashboard_url": "string", "deployment_id": "string", "external_url": "string", + "telemetry": true, "upgrade_message": "string", "version": "string", "workspace_proxy": true @@ -1248,6 +880,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `dashboard_url` | string | false | | Dashboard URL is the URL to hit the deployment's dashboard. For external workspace proxies, this is the coderd they are connected to. | | `deployment_id` | string | false | | Deployment ID is the unique identifier for this deployment. | | `external_url` | string | false | | External URL references the current Coder version. For production builds, this will link directly to a release. For development builds, this will link to a commit. | +| `telemetry` | boolean | false | | Telemetry is a boolean that indicates whether telemetry is enabled. | | `upgrade_message` | string | false | | Upgrade message is the message displayed to users when an outdated client is detected. | | `version` | string | false | | Version returns the semantic version of the build. | | `workspace_proxy` | boolean | false | | | @@ -1305,6 +938,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in ```json { "email": "string", + "name": "string", "password": "string", "trial": true, "trial_info": { @@ -1325,6 +959,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | Name | Type | Required | Restrictions | Description | | ------------ | ---------------------------------------------------------------------- | -------- | ------------ | ----------- | | `email` | string | true | | | +| `name` | string | false | | | | `password` | string | true | | | | `trial` | boolean | false | | | | `trial_info` | [codersdk.CreateFirstUserTrialInfo](#codersdkcreatefirstusertrialinfo) | false | | | @@ -1389,22 +1024,28 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | ----------------- | ------- | -------- | ------------ | ----------- | | `avatar_url` | string | false | | | | `display_name` | string | false | | | -| `name` | string | false | | | +| `name` | string | true | | | | `quota_allowance` | integer | false | | | ## codersdk.CreateOrganizationRequest ```json { + "description": "string", + "display_name": "string", + "icon": "string", "name": "string" } ``` ### Properties -| Name | Type | Required | Restrictions | Description | -| ------ | ------ | -------- | ------------ | ----------- | -| `name` | string | true | | | +| Name | Type | Required | Restrictions | Description | +| -------------- | ------ | -------- | ------------ | ---------------------------------------------------------------------- | +| `description` | string | false | | | +| `display_name` | string | false | | Display name will default to the same value as `Name` if not provided. | +| `icon` | string | false | | | +| `name` | string | true | | | ## codersdk.CreateTemplateRequest @@ -1540,6 +1181,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "action": "create", "additional_fields": [0], "build_reason": "autostart", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", "resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f", "resource_type": "template", "time": "2019-08-24T14:15:22Z" @@ -1553,6 +1195,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `action` | [codersdk.AuditAction](#codersdkauditaction) | false | | | | `additional_fields` | array of integer | false | | | | `build_reason` | [codersdk.BuildReason](#codersdkbuildreason) | false | | | +| `organization_id` | string | false | | | | `resource_id` | string | false | | | | `resource_type` | [codersdk.ResourceType](#codersdkresourcetype) | false | | | | `time` | string | false | | | @@ -1609,6 +1252,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in "disable_login": true, "email": "user@example.com", "login_type": "", + "name": "string", "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", "password": "string", "username": "string" @@ -1622,6 +1266,7 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in | `disable_login` | boolean | false | | Disable login sets the user's login type to 'none'. This prevents the user from being able to use a password or any other authentication method to login. Deprecated: Set UserLoginType=LoginTypeDisabled instead. | | `email` | string | true | | | | `login_type` | [codersdk.LoginType](#codersdklogintype) | false | | Login type defaults to LoginTypePassword. | +| `name` | string | false | | | | `organization_id` | string | false | | | | `password` | string | false | | | | `username` | string | true | | | @@ -2009,7 +1654,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "device_flow": true, "display_icon": "string", "display_name": "string", - "extra_token_keys": ["string"], "id": "string", "no_refresh": true, "regex": "string", @@ -2076,6 +1720,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "ignore_email_verified": true, "ignore_user_info": true, "issuer_url": "string", + "name_field": "string", "scopes": ["string"], "sign_in_text": "string", "signups_disabled_text": "string", @@ -2382,7 +2027,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "device_flow": true, "display_icon": "string", "display_name": "string", - "extra_token_keys": ["string"], "id": "string", "no_refresh": true, "regex": "string", @@ -2449,6 +2093,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "ignore_email_verified": true, "ignore_user_info": true, "issuer_url": "string", + "name_field": "string", "scopes": ["string"], "sign_in_text": "string", "signups_disabled_text": "string", @@ -2723,6 +2368,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `auto-fill-parameters` | | `multi-organization` | | `custom-roles` | +| `workspace-usage` | ## codersdk.ExternalAuth @@ -2801,7 +2447,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "device_flow": true, "display_icon": "string", "display_name": "string", - "extra_token_keys": ["string"], "id": "string", "no_refresh": true, "regex": "string", @@ -2824,7 +2469,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `device_flow` | boolean | false | | | | `display_icon` | string | false | | Display icon is a URL to an icon to display in the UI. | | `display_name` | string | false | | Display name is shown in the UI to identify the auth config. | -| `extra_token_keys` | array of string | false | | | | `id` | string | false | | ID is a unique identifier for the auth config. It defaults to `type` when not provided. | | `no_refresh` | boolean | false | | | | `regex` | string | false | | Regex allows API requesters to match an auth config by a string (e.g. coder.com) instead of by it's type. | @@ -2954,7 +2598,8 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -3528,6 +3173,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "ignore_email_verified": true, "ignore_user_info": true, "issuer_url": "string", + "name_field": "string", "scopes": ["string"], "sign_in_text": "string", "signups_disabled_text": "string", @@ -3559,6 +3205,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `ignore_email_verified` | boolean | false | | | | `ignore_user_info` | boolean | false | | | | `issuer_url` | string | false | | | +| `name_field` | string | false | | | | `scopes` | array of string | false | | | | `sign_in_text` | string | false | | | | `signups_disabled_text` | string | false | | | @@ -3572,6 +3219,9 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o ```json { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", @@ -3581,13 +3231,16 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o ### Properties -| Name | Type | Required | Restrictions | Description | -| ------------ | ------- | -------- | ------------ | ----------- | -| `created_at` | string | true | | | -| `id` | string | true | | | -| `is_default` | boolean | true | | | -| `name` | string | true | | | -| `updated_at` | string | true | | | +| Name | Type | Required | Restrictions | Description | +| -------------- | ------- | -------- | ------------ | ----------- | +| `created_at` | string | true | | | +| `description` | string | false | | | +| `display_name` | string | false | | | +| `icon` | string | false | | | +| `id` | string | true | | | +| `is_default` | boolean | true | | | +| `name` | string | false | | | +| `updated_at` | string | true | | | ## codersdk.OrganizationMember @@ -3598,7 +3251,8 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "updated_at": "2019-08-24T14:15:22Z", @@ -3616,6 +3270,36 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `updated_at` | string | false | | | | `user_id` | string | false | | | +## codersdk.OrganizationMemberWithName + +```json +{ + "created_at": "2019-08-24T14:15:22Z", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", + "roles": [ + { + "display_name": "string", + "name": "string", + "organization_id": "string" + } + ], + "updated_at": "2019-08-24T14:15:22Z", + "user_id": "a169451c-8525-4352-b8ca-070dd449a1a5", + "username": "string" +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ----------------- | ----------------------------------------------- | -------- | ------------ | ----------- | +| `created_at` | string | false | | | +| `organization_id` | string | false | | | +| `roles` | array of [codersdk.SlimRole](#codersdkslimrole) | false | | | +| `updated_at` | string | false | | | +| `user_id` | string | false | | | +| `username` | string | false | | | + ## codersdk.PatchGroupRequest ```json @@ -3714,6 +3398,22 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `icon` | string | false | | | | `name` | string | true | | | +## codersdk.PostWorkspaceUsageRequest + +```json +{ + "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", + "app_name": "vscode" +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +| ---------- | ---------------------------------------------- | -------- | ------------ | ----------- | +| `agent_id` | string | false | | | +| `app_name` | [codersdk.UsageAppName](#codersdkusageappname) | false | | | + ## codersdk.PprofConfig ```json @@ -4289,6 +3989,7 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o | `organization` | | `oauth2_provider_app` | | `oauth2_provider_app_secret` | +| `custom_role` | ## codersdk.Response @@ -4434,16 +4135,18 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o ```json { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ``` ### Properties -| Name | Type | Required | Restrictions | Description | -| -------------- | ------ | -------- | ------------ | ----------- | -| `display_name` | string | false | | | -| `name` | string | false | | | +| Name | Type | Required | Restrictions | Description | +| ----------------- | ------ | -------- | ------------ | ----------- | +| `display_name` | string | false | | | +| `name` | string | false | | | +| `organization_id` | string | false | | | ## codersdk.SupportConfig @@ -4984,7 +4687,8 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -5349,15 +5053,21 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o ```json { + "description": "string", + "display_name": "string", + "icon": "string", "name": "string" } ``` ### Properties -| Name | Type | Required | Restrictions | Description | -| ------ | ------ | -------- | ------------ | ----------- | -| `name` | string | true | | | +| Name | Type | Required | Restrictions | Description | +| -------------- | ------ | -------- | ------------ | ----------- | +| `description` | string | false | | | +| `display_name` | string | false | | | +| `icon` | string | false | | | +| `name` | string | false | | | ## codersdk.UpdateRoles @@ -5574,6 +5284,23 @@ If the schedule is empty, the user will be updated to use the default schedule.| | `share_level` | `authenticated` | | `share_level` | `public` | +## codersdk.UsageAppName + +```json +"vscode" +``` + +### Properties + +#### Enumerated Values + +| Value | +| ------------------ | +| `vscode` | +| `jetbrains` | +| `reconnecting-pty` | +| `ssh` | + ## codersdk.User ```json @@ -5589,7 +5316,8 @@ If the schedule is empty, the user will be updated to use the default schedule.| "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -6390,28 +6118,6 @@ If the schedule is empty, the user will be updated to use the default schedule.| | `id` | string | false | | | | `workspace_agent_id` | string | false | | | -## codersdk.WorkspaceAgentMetadataDescription - -```json -{ - "display_name": "string", - "interval": 0, - "key": "string", - "script": "string", - "timeout": 0 -} -``` - -### Properties - -| Name | Type | Required | Restrictions | Description | -| -------------- | ------- | -------- | ------------ | ----------- | -| `display_name` | string | false | | | -| `interval` | integer | false | | | -| `key` | string | false | | | -| `script` | string | false | | | -| `timeout` | integer | false | | | - ## codersdk.WorkspaceAgentPortShare ```json @@ -8246,7 +7952,6 @@ If the schedule is empty, the user will be updated to use the default schedule.| } ] }, - "failing_sections": ["DERP"], "healthy": true, "provisioner_daemons": { "dismissed": true, @@ -8348,7 +8053,6 @@ If the schedule is empty, the user will be updated to use the default schedule.| | `coder_version` | string | false | | The Coder version of the server that the report was generated on. | | `database` | [healthsdk.DatabaseReport](#healthsdkdatabasereport) | false | | | | `derp` | [healthsdk.DERPHealthReport](#healthsdkderphealthreport) | false | | | -| `failing_sections` | array of [healthsdk.HealthSection](#healthsdkhealthsection) | false | | Failing sections is a list of sections that have failed their healthcheck. | | `healthy` | boolean | false | | Healthy is true if the report returns no errors. Deprecated: use `Severity` instead | | `provisioner_daemons` | [healthsdk.ProvisionerDaemonsReport](#healthsdkprovisionerdaemonsreport) | false | | | | `severity` | [health.Severity](#healthseverity) | false | | Severity indicates the status of Coder health. | @@ -8844,7 +8548,6 @@ _None_ "device_flow": true, "display_icon": "string", "display_name": "string", - "extra_token_keys": ["string"], "id": "string", "no_refresh": true, "regex": "string", diff --git a/docs/api/templates.md b/docs/api/templates.md index de0498c3de87b..b85811f41d0b8 100644 --- a/docs/api/templates.md +++ b/docs/api/templates.md @@ -617,6 +617,132 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/templa To perform this operation, you must be authenticated. [Learn more](authentication.md). +## Get all templates + +### Code samples + +```shell +# Example request using curl +curl -X GET http://coder-server:8080/api/v2/templates \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`GET /templates` + +### Example responses + +> 200 Response + +```json +[ + { + "active_user_count": 0, + "active_version_id": "eae64611-bd53-4a80-bb77-df1e432c0fbc", + "activity_bump_ms": 0, + "allow_user_autostart": true, + "allow_user_autostop": true, + "allow_user_cancel_workspace_jobs": true, + "autostart_requirement": { + "days_of_week": ["monday"] + }, + "autostop_requirement": { + "days_of_week": ["monday"], + "weeks": 0 + }, + "build_time_stats": { + "property1": { + "p50": 123, + "p95": 146 + }, + "property2": { + "p50": 123, + "p95": 146 + } + }, + "created_at": "2019-08-24T14:15:22Z", + "created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f", + "created_by_name": "string", + "default_ttl_ms": 0, + "deprecated": true, + "deprecation_message": "string", + "description": "string", + "display_name": "string", + "failure_ttl_ms": 0, + "icon": "string", + "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", + "max_port_share_level": "owner", + "name": "string", + "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", + "provisioner": "terraform", + "require_active_version": true, + "time_til_dormant_autodelete_ms": 0, + "time_til_dormant_ms": 0, + "updated_at": "2019-08-24T14:15:22Z" + } +] +``` + +### Responses + +| Status | Meaning | Description | Schema | +| ------ | ------------------------------------------------------- | ----------- | --------------------------------------------------------- | +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | array of [codersdk.Template](schemas.md#codersdktemplate) | + +

Response Schema

+ +Status Code **200** + +| Name | Type | Required | Restrictions | Description | +| ------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -------- | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `[array item]` | array | false | | | +| `» active_user_count` | integer | false | | Active user count is set to -1 when loading. | +| `» active_version_id` | string(uuid) | false | | | +| `» activity_bump_ms` | integer | false | | | +| `» allow_user_autostart` | boolean | false | | Allow user autostart and AllowUserAutostop are enterprise-only. Their values are only used if your license is entitled to use the advanced template scheduling feature. | +| `» allow_user_autostop` | boolean | false | | | +| `» allow_user_cancel_workspace_jobs` | boolean | false | | | +| `» autostart_requirement` | [codersdk.TemplateAutostartRequirement](schemas.md#codersdktemplateautostartrequirement) | false | | | +| `»» days_of_week` | array | false | | Days of week is a list of days of the week in which autostart is allowed to happen. If no days are specified, autostart is not allowed. | +| `» autostop_requirement` | [codersdk.TemplateAutostopRequirement](schemas.md#codersdktemplateautostoprequirement) | false | | Autostop requirement and AutostartRequirement are enterprise features. Its value is only used if your license is entitled to use the advanced template scheduling feature. | +| `»» days_of_week` | array | false | | Days of week is a list of days of the week on which restarts are required. Restarts happen within the user's quiet hours (in their configured timezone). If no days are specified, restarts are not required. Weekdays cannot be specified twice. | +| Restarts will only happen on weekdays in this list on weeks which line up with Weeks. | +| `»» weeks` | integer | false | | Weeks is the number of weeks between required restarts. Weeks are synced across all workspaces (and Coder deployments) using modulo math on a hardcoded epoch week of January 2nd, 2023 (the first Monday of 2023). Values of 0 or 1 indicate weekly restarts. Values of 2 indicate fortnightly restarts, etc. | +| `» build_time_stats` | [codersdk.TemplateBuildTimeStats](schemas.md#codersdktemplatebuildtimestats) | false | | | +| `»» [any property]` | [codersdk.TransitionStats](schemas.md#codersdktransitionstats) | false | | | +| `»»» p50` | integer | false | | | +| `»»» p95` | integer | false | | | +| `» created_at` | string(date-time) | false | | | +| `» created_by_id` | string(uuid) | false | | | +| `» created_by_name` | string | false | | | +| `» default_ttl_ms` | integer | false | | | +| `» deprecated` | boolean | false | | | +| `» deprecation_message` | string | false | | | +| `» description` | string | false | | | +| `» display_name` | string | false | | | +| `» failure_ttl_ms` | integer | false | | Failure ttl ms TimeTilDormantMillis, and TimeTilDormantAutoDeleteMillis are enterprise-only. Their values are used if your license is entitled to use the advanced template scheduling feature. | +| `» icon` | string | false | | | +| `» id` | string(uuid) | false | | | +| `» max_port_share_level` | [codersdk.WorkspaceAgentPortShareLevel](schemas.md#codersdkworkspaceagentportsharelevel) | false | | | +| `» name` | string | false | | | +| `» organization_id` | string(uuid) | false | | | +| `» provisioner` | string | false | | | +| `» require_active_version` | boolean | false | | Require active version mandates that workspaces are built with the active template version. | +| `» time_til_dormant_autodelete_ms` | integer | false | | | +| `» time_til_dormant_ms` | integer | false | | | +| `» updated_at` | string(date-time) | false | | | + +#### Enumerated Values + +| Property | Value | +| ---------------------- | --------------- | +| `max_port_share_level` | `owner` | +| `max_port_share_level` | `authenticated` | +| `max_port_share_level` | `public` | +| `provisioner` | `terraform` | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Get template metadata by ID ### Code samples diff --git a/docs/api/users.md b/docs/api/users.md index c9910bf66c1c7..22d1c7b9cfca8 100644 --- a/docs/api/users.md +++ b/docs/api/users.md @@ -42,7 +42,8 @@ curl -X GET http://coder-server:8080/api/v2/users \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -82,6 +83,7 @@ curl -X POST http://coder-server:8080/api/v2/users \ "disable_login": true, "email": "user@example.com", "login_type": "", + "name": "string", "organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6", "password": "string", "username": "string" @@ -111,7 +113,8 @@ curl -X POST http://coder-server:8080/api/v2/users \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -227,6 +230,7 @@ curl -X POST http://coder-server:8080/api/v2/users/first \ ```json { "email": "string", + "name": "string", "password": "string", "trial": true, "trial_info": { @@ -381,7 +385,8 @@ curl -X GET http://coder-server:8080/api/v2/users/{user} \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -434,7 +439,8 @@ curl -X DELETE http://coder-server:8080/api/v2/users/{user} \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -497,7 +503,8 @@ curl -X PUT http://coder-server:8080/api/v2/users/{user}/appearance \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -993,6 +1000,9 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/organizations \ [ { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", @@ -1011,14 +1021,17 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/organizations \ Status Code **200** -| Name | Type | Required | Restrictions | Description | -| -------------- | ----------------- | -------- | ------------ | ----------- | -| `[array item]` | array | false | | | -| `» created_at` | string(date-time) | true | | | -| `» id` | string(uuid) | true | | | -| `» is_default` | boolean | true | | | -| `» name` | string | true | | | -| `» updated_at` | string(date-time) | true | | | +| Name | Type | Required | Restrictions | Description | +| ---------------- | ----------------- | -------- | ------------ | ----------- | +| `[array item]` | array | false | | | +| `» created_at` | string(date-time) | true | | | +| `» description` | string | false | | | +| `» display_name` | string | false | | | +| `» icon` | string | false | | | +| `» id` | string(uuid) | true | | | +| `» is_default` | boolean | true | | | +| `» name` | string | false | | | +| `» updated_at` | string(date-time) | true | | | To perform this operation, you must be authenticated. [Learn more](authentication.md). @@ -1049,6 +1062,9 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/organizations/{organiza ```json { "created_at": "2019-08-24T14:15:22Z", + "description": "string", + "display_name": "string", + "icon": "string", "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", "is_default": true, "name": "string", @@ -1148,7 +1164,8 @@ curl -X PUT http://coder-server:8080/api/v2/users/{user}/profile \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1201,7 +1218,8 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/roles \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1264,7 +1282,8 @@ curl -X PUT http://coder-server:8080/api/v2/users/{user}/roles \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1317,7 +1336,8 @@ curl -X PUT http://coder-server:8080/api/v2/users/{user}/status/activate \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", @@ -1370,7 +1390,8 @@ curl -X PUT http://coder-server:8080/api/v2/users/{user}/status/suspend \ "roles": [ { "display_name": "string", - "name": "string" + "name": "string", + "organization_id": "string" } ], "status": "active", diff --git a/docs/api/workspaces.md b/docs/api/workspaces.md index 886f8401f7d7e..f16d9be857fef 100644 --- a/docs/api/workspaces.md +++ b/docs/api/workspaces.md @@ -1397,16 +1397,27 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio ```shell # Example request using curl curl -X POST http://coder-server:8080/api/v2/workspaces/{workspace}/usage \ + -H 'Content-Type: application/json' \ -H 'Coder-Session-Token: API_KEY' ``` `POST /workspaces/{workspace}/usage` +> Body parameter + +```json +{ + "agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978", + "app_name": "vscode" +} +``` + ### Parameters -| Name | In | Type | Required | Description | -| ----------- | ---- | ------------ | -------- | ------------ | -| `workspace` | path | string(uuid) | true | Workspace ID | +| Name | In | Type | Required | Description | +| ----------- | ---- | ---------------------------------------------------------------------------------- | -------- | ---------------------------- | +| `workspace` | path | string(uuid) | true | Workspace ID | +| `body` | body | [codersdk.PostWorkspaceUsageRequest](schemas.md#codersdkpostworkspaceusagerequest) | false | Post workspace usage request | ### Responses diff --git a/docs/changelogs/v2.0.0.md b/docs/changelogs/v2.0.0.md index fb43de0e9581d..08636be8adb85 100644 --- a/docs/changelogs/v2.0.0.md +++ b/docs/changelogs/v2.0.0.md @@ -4,7 +4,7 @@ we have outgrown development (v0.x) releases: - 1600+ users develop on Coder every day - A single 4-core Coder server can - [happily support](https://coder.com/docs/v2/latest/admin/scale) 1000+ users + [happily support](https://coder.com/docs/admin/scaling/scale-utility#recent-scale-tests) 1000+ users and workspace connections - We have a full suite of [paid features](https://coder.com/docs/v2/latest/enterprise) and enterprise diff --git a/docs/cli/login.md b/docs/cli/login.md index 8dab8a884149c..9a27e4a6357c8 100644 --- a/docs/cli/login.md +++ b/docs/cli/login.md @@ -30,6 +30,15 @@ Specifies an email address to use if creating the first user for the deployment. Specifies a username to use if creating the first user for the deployment. +### --first-user-full-name + +| | | +| ----------- | ---------------------------------------- | +| Type | string | +| Environment | $CODER_FIRST_USER_FULL_NAME | + +Specifies a human-readable name for the first user of the deployment. + ### --first-user-password | | | diff --git a/docs/cli/server.md b/docs/cli/server.md index a7c32c2d78420..ea3672a1cb2d7 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -514,6 +514,17 @@ Ignore the email_verified claim from the upstream provider. OIDC claim field to use as the username. +### --oidc-name-field + +| | | +| ----------- | ----------------------------------- | +| Type | string | +| Environment | $CODER_OIDC_NAME_FIELD | +| YAML | oidc.nameField | +| Default | name | + +OIDC claim field to use as the name. + ### --oidc-email-field | | | diff --git a/docs/cli/speedtest.md b/docs/cli/speedtest.md index e2d3a435fb0ea..ab9d9a4f7e49c 100644 --- a/docs/cli/speedtest.md +++ b/docs/cli/speedtest.md @@ -45,3 +45,21 @@ Specifies the duration to monitor traffic. | Type | string | Specifies a file to write a network capture to. + +### -c, --column + +| | | +| ------- | -------------------------------- | +| Type | string-array | +| Default | Interval,Throughput | + +Columns to display in table output. Available columns: Interval, Throughput. + +### -o, --output + +| | | +| ------- | ------------------- | +| Type | string | +| Default | table | + +Output format. Available formats: table, json. diff --git a/docs/cli/users_create.md b/docs/cli/users_create.md index 3934f2482ac02..1e8e12530939f 100644 --- a/docs/cli/users_create.md +++ b/docs/cli/users_create.md @@ -26,6 +26,14 @@ Specifies an email address for the new user. Specifies a username for the new user. +### -n, --full-name + +| | | +| ---- | ------------------- | +| Type | string | + +Specifies an optional human-readable name for the new user. + ### -p, --password | | | diff --git a/docs/faqs.md b/docs/faqs.md index 9ee9d30ef26e1..bec3b4f66a406 100644 --- a/docs/faqs.md +++ b/docs/faqs.md @@ -501,3 +501,36 @@ Note that the JetBrains Gateway configuration blocks for each host in your SSH config file will be overwritten by the JetBrains Gateway client when it re-authenticates to your Coder deployment so you must add the above config as a separate block and not add it to any existing ones. + +### How can I restrict inbound/outbound file transfers from Coder workspaces? + +In certain environments, it is essential to keep confidential files within +workspaces and prevent users from uploading or downloading resources using tools +like `scp` or `rsync`. + +To achieve this, template admins can use the environment variable +`CODER_AGENT_BLOCK_FILE_TRANSFER` to enable additional SSH command controls. +This variable allows the system to check if the executed application is on the +block list, which includes `scp`, `rsync`, `ftp`, and `nc`. + +```hcl +resource "docker_container" "workspace" { + ... + env = [ + "CODER_AGENT_TOKEN=${coder_agent.main.token}", + "CODER_AGENT_BLOCK_FILE_TRANSFER=true", + ... + ] +} +``` + +#### Important Notice + +This control operates at the `ssh-exec` level or during `sftp` sessions. While +it can help prevent automated file transfers using the specified tools, users +can still SSH into the workspace and manually initiate file transfers. The +primary purpose of this feature is to warn and discourage users from downloading +confidential resources to their local machines. + +For more advanced security needs, consider adopting an endpoint security +solution. diff --git a/docs/ides/remote-desktops.md b/docs/ides/remote-desktops.md index 51ffe4e264cd6..5f654fb5ea8b6 100644 --- a/docs/ides/remote-desktops.md +++ b/docs/ides/remote-desktops.md @@ -33,10 +33,6 @@ To use RDP with Coder, you'll need to install an [RDP client](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients) on your local machine, and enable RDP on your workspace. -As a starting point, see the -[gcp-windows-rdp](https://github.com/matifali/coder-templates/tree/main/gcp-windows-rdp) -community template. It builds and provisions a Windows Server workspace on GCP. - Use the following command to forward the RDP port to your local machine: ```console diff --git a/docs/images/admin/announcement_banner_settings.png b/docs/images/admin/announcement_banner_settings.png new file mode 100644 index 0000000000000..beae02bc693db Binary files /dev/null and b/docs/images/admin/announcement_banner_settings.png differ diff --git a/docs/images/admin/multiple-banners.PNG b/docs/images/admin/multiple-banners.PNG new file mode 100644 index 0000000000000..07272f9116749 Binary files /dev/null and b/docs/images/admin/multiple-banners.PNG differ diff --git a/docs/images/screenshots/audit.png b/docs/images/screenshots/audit.png new file mode 100644 index 0000000000000..5538c67afd8e3 Binary files /dev/null and b/docs/images/screenshots/audit.png differ diff --git a/docs/images/screenshots/healthcheck.png b/docs/images/screenshots/healthcheck.png new file mode 100644 index 0000000000000..5b42f716ca7b6 Binary files /dev/null and b/docs/images/screenshots/healthcheck.png differ diff --git a/docs/images/screenshots/login.png b/docs/images/screenshots/login.png new file mode 100644 index 0000000000000..9bfe85e9f4cea Binary files /dev/null and b/docs/images/screenshots/login.png differ diff --git a/docs/images/screenshots/settings.png b/docs/images/screenshots/settings.png new file mode 100644 index 0000000000000..cf3f19116fb13 Binary files /dev/null and b/docs/images/screenshots/settings.png differ diff --git a/docs/images/screenshots/starter_templates.png b/docs/images/screenshots/starter_templates.png new file mode 100644 index 0000000000000..1eab19f2901cd Binary files /dev/null and b/docs/images/screenshots/starter_templates.png differ diff --git a/docs/images/screenshots/templates_insights.png b/docs/images/screenshots/templates_insights.png new file mode 100644 index 0000000000000..8375661da2603 Binary files /dev/null and b/docs/images/screenshots/templates_insights.png differ diff --git a/docs/images/screenshots/templates_listing.png b/docs/images/screenshots/templates_listing.png new file mode 100644 index 0000000000000..e887de4f4e2aa Binary files /dev/null and b/docs/images/screenshots/templates_listing.png differ diff --git a/docs/images/screenshots/terraform.png b/docs/images/screenshots/terraform.png new file mode 100644 index 0000000000000..d8780d650ea1f Binary files /dev/null and b/docs/images/screenshots/terraform.png differ diff --git a/docs/images/screenshots/workspace_launch.png b/docs/images/screenshots/workspace_launch.png new file mode 100644 index 0000000000000..ab2092e7f5d7d Binary files /dev/null and b/docs/images/screenshots/workspace_launch.png differ diff --git a/docs/images/screenshots/workspaces_listing.png b/docs/images/screenshots/workspaces_listing.png new file mode 100644 index 0000000000000..ee206c100f5ba Binary files /dev/null and b/docs/images/screenshots/workspaces_listing.png differ diff --git a/docs/install/kubernetes.md b/docs/install/kubernetes.md index 0b6d01a150297..f782263d44ad3 100644 --- a/docs/install/kubernetes.md +++ b/docs/install/kubernetes.md @@ -134,7 +134,7 @@ locally in order to log in and manage templates. helm install coder coder-v2/coder \ --namespace coder \ --values values.yaml \ - --version 2.11.2 + --version 2.12.3 ``` For the **stable** Coder release: @@ -145,7 +145,7 @@ locally in order to log in and manage templates. helm install coder coder-v2/coder \ --namespace coder \ --values values.yaml \ - --version 2.10.2 + --version 2.11.4 ``` You can watch Coder start up by running `kubectl get pods -n coder`. Once diff --git a/docs/install/offline.md b/docs/install/offline.md index 120aa5c9f76b7..d4d8d24c0c111 100644 --- a/docs/install/offline.md +++ b/docs/install/offline.md @@ -54,7 +54,7 @@ RUN mkdir -p /opt/terraform # The below step is optional if you wish to keep the existing version. # See https://github.com/coder/coder/blob/main/provisioner/terraform/install.go#L23-L24 # for supported Terraform versions. -ARG TERRAFORM_VERSION=1.7.5 +ARG TERRAFORM_VERSION=1.8.4 RUN apk update && \ apk del terraform && \ curl -LOs https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \ diff --git a/docs/install/releases.md b/docs/install/releases.md index 22dac07f687e3..8f7ffe370095e 100644 --- a/docs/install/releases.md +++ b/docs/install/releases.md @@ -47,11 +47,12 @@ pages. ## Release schedule -| Release name | Release Date | Status | -| ------------ | ------------------ | ---------------- | -| 2.7.x | January 01, 2024 | Not Supported | -| 2.8.x | Februrary 06, 2024 | Not Supported | -| 2.9.x | March 07, 2024 | Security Support | -| 2.10.x | April 03, 2024 | Stable | -| 2.11.x | May 07, 2024 | Mainline | -| 2.12.x | June 04, 2024 | Not Released | +| Release name | Release Date | Status | +| ------------ | ----------------- | ---------------- | +| 2.7.x | January 01, 2024 | Not Supported | +| 2.8.x | February 06, 2024 | Not Supported | +| 2.9.x | March 07, 2024 | Not Supported | +| 2.10.x | April 03, 2024 | Security Support | +| 2.11.x | May 07, 2024 | Stable | +| 2.12.x | June 04, 2024 | Mainline | +| 2.13.x | July 02, 2024 | Not Released | diff --git a/docs/manifest.json b/docs/manifest.json index 067aecac8e69c..bdfb26c4831ae 100644 --- a/docs/manifest.json +++ b/docs/manifest.json @@ -8,10 +8,9 @@ "icon_path": "./images/icons/home.svg", "children": [ { - "title": "Architecture", - "description": "Learn how Coder works", - "path": "./about/architecture.md", - "icon_path": "./images/icons/protractor.svg" + "title": "Screenshots", + "description": "Browse screenshots of the Coder platform", + "path": "./about/screenshots.md" } ] }, @@ -344,6 +343,30 @@ "path": "./admin/README.md", "icon_path": "./images/icons/wrench.svg", "children": [ + { + "title": "Architecture", + "description": "Learn about validated and reference architectures for Coder", + "path": "./admin/architectures/architecture.md", + "icon_path": "./images/icons/container.svg", + "children": [ + { + "title": "Validated Architecture", + "path": "./admin/architectures/validated-arch.md" + }, + { + "title": "Up to 1,000 users", + "path": "./admin/architectures/1k-users.md" + }, + { + "title": "Up to 2,000 users", + "path": "./admin/architectures/2k-users.md" + }, + { + "title": "Up to 3,000 users", + "path": "./admin/architectures/3k-users.md" + } + ] + }, { "title": "Authentication", "description": "Learn how to set up authentication using GitHub or OpenID Connect", @@ -397,26 +420,12 @@ { "title": "Scaling Coder", "description": "Learn how to use load testing tools", - "path": "./admin/scale.md", - "icon_path": "./images/icons/scale.svg" - }, - { - "title": "Reference Architectures", - "description": "Learn about reference architectures for Coder", - "path": "./admin/architectures/index.md", + "path": "./admin/scaling/scale-testing.md", "icon_path": "./images/icons/scale.svg", "children": [ { - "title": "Up to 1,000 users", - "path": "./admin/architectures/1k-users.md" - }, - { - "title": "Up to 2,000 users", - "path": "./admin/architectures/2k-users.md" - }, - { - "title": "Up to 3,000 users", - "path": "./admin/architectures/3k-users.md" + "title": "Scaling Utility", + "path": "./admin/scaling/scale-utility.md" } ] }, diff --git a/docs/platforms/aws.md b/docs/platforms/aws.md index b5114d720feac..83e0c6c2aa642 100644 --- a/docs/platforms/aws.md +++ b/docs/platforms/aws.md @@ -27,7 +27,7 @@ We recommend keeping the default instance type (`t2.xlarge`, 4 cores and 16 GB memory) if you plan on provisioning Docker containers as workspaces on this EC2 instance. Keep in mind this platforms is intended for proof-of-concept deployments and you should adjust your infrastructure when preparing for -production use. See: [Scaling Coder](../admin/scale.md) +production use. See: [Scaling Coder](../admin/scaling/scale-testing.md) Be sure to add a keypair so that you can connect over SSH to further [configure Coder](../admin/configure.md). diff --git a/docs/platforms/gcp.md b/docs/platforms/gcp.md index 630897fc79d6e..c8c4203314c77 100644 --- a/docs/platforms/gcp.md +++ b/docs/platforms/gcp.md @@ -23,7 +23,7 @@ We recommend keeping the default instance type (`e2-standard-4`, 4 cores and 16 GB memory) if you plan on provisioning Docker containers as workspaces on this VM instance. Keep in mind this platforms is intended for proof-of-concept deployments and you should adjust your infrastructure when preparing for -production use. See: [Scaling Coder](../admin/scale.md) +production use. See: [Scaling Coder](../admin/scaling/scale-testing.md)